1 /* $OpenBSD: if_ix.c,v 1.218 2024/10/04 05:22:10 yasuoka Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2013, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /* FreeBSD: src/sys/dev/ixgbe/ixgbe.c 251964 Jun 18 21:28:19 2013 UTC */
36
37 #include <dev/pci/if_ix.h>
38 #include <dev/pci/ixgbe_type.h>
39
40 /*
41 * Our TCP/IP Stack is unable to handle packets greater than MAXMCLBYTES.
42 * This interface is unable to handle packets greater than IXGBE_TSO_SIZE.
43 */
44 CTASSERT(MAXMCLBYTES <= IXGBE_TSO_SIZE);
45
46 /*********************************************************************
47 * Driver version
48 *********************************************************************/
49 /* char ixgbe_driver_version[] = "2.5.13"; */
50
51 /*********************************************************************
52 * PCI Device ID Table
53 *
54 * Used by probe to select devices to load on
55 *********************************************************************/
56
57 const struct pci_matchid ixgbe_devices[] = {
58 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598 },
59 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598_BX },
60 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AF_DUAL },
61 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AF },
62 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AT },
63 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AT2 },
64 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AT_DUAL },
65 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_CX4 },
66 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_CX4_DUAL },
67 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_XF_LR },
68 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_SFP },
69 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598_SR_DUAL_EM },
70 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598_DA_DUAL },
71 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_KX4 },
72 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_KX4_MEZZ },
73 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_XAUI },
74 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_COMBO_BP },
75 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_BPLANE_FCOE },
76 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_CX4 },
77 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_T3_LOM },
78 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP },
79 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_EM },
80 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_SF_QP },
81 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_SF2 },
82 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_FCOE },
83 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599EN_SFP },
84 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_QSFP_SF_QP },
85 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X540T },
86 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X540T1 },
87 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550T },
88 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550T1 },
89 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_KX4 },
90 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_KR },
91 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_SFP },
92 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_10G_T },
93 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_1G_T },
94 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_KR },
95 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_KR_L },
96 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_SFP_N },
97 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_SFP },
98 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_SGMII },
99 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_SGMII_L },
100 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_10G_T },
101 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_1G_T },
102 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_1G_T_L }
103 };
104
105 /*********************************************************************
106 * Function prototypes
107 *********************************************************************/
108 int ixgbe_probe(struct device *, void *, void *);
109 void ixgbe_attach(struct device *, struct device *, void *);
110 int ixgbe_detach(struct device *, int);
111 int ixgbe_activate(struct device *, int);
112 void ixgbe_start(struct ifqueue *);
113 int ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
114 int ixgbe_rxrinfo(struct ix_softc *, struct if_rxrinfo *);
115 int ixgbe_get_sffpage(struct ix_softc *, struct if_sffpage *);
116 void ixgbe_watchdog(struct ifnet *);
117 void ixgbe_init(void *);
118 void ixgbe_stop(void *);
119 void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
120 int ixgbe_media_change(struct ifnet *);
121 void ixgbe_identify_hardware(struct ix_softc *);
122 int ixgbe_allocate_pci_resources(struct ix_softc *);
123 int ixgbe_allocate_legacy(struct ix_softc *);
124 int ixgbe_allocate_msix(struct ix_softc *);
125 void ixgbe_setup_msix(struct ix_softc *);
126 int ixgbe_allocate_queues(struct ix_softc *);
127 void ixgbe_free_pci_resources(struct ix_softc *);
128 void ixgbe_setup_interface(struct ix_softc *);
129 void ixgbe_config_gpie(struct ix_softc *);
130 void ixgbe_config_delay_values(struct ix_softc *);
131 void ixgbe_add_media_types(struct ix_softc *);
132 void ixgbe_config_link(struct ix_softc *);
133
134 int ixgbe_allocate_transmit_buffers(struct ix_txring *);
135 int ixgbe_setup_transmit_structures(struct ix_softc *);
136 int ixgbe_setup_transmit_ring(struct ix_txring *);
137 void ixgbe_initialize_transmit_units(struct ix_softc *);
138 void ixgbe_free_transmit_structures(struct ix_softc *);
139 void ixgbe_free_transmit_buffers(struct ix_txring *);
140
141 int ixgbe_allocate_receive_buffers(struct ix_rxring *);
142 int ixgbe_setup_receive_structures(struct ix_softc *);
143 int ixgbe_setup_receive_ring(struct ix_rxring *);
144 void ixgbe_initialize_receive_units(struct ix_softc *);
145 void ixgbe_free_receive_structures(struct ix_softc *);
146 void ixgbe_free_receive_buffers(struct ix_rxring *);
147 void ixgbe_initialize_rss_mapping(struct ix_softc *);
148 int ixgbe_rxfill(struct ix_rxring *);
149 void ixgbe_rxrefill(void *);
150
151 int ixgbe_intr(struct ix_softc *sc);
152 void ixgbe_enable_intr(struct ix_softc *);
153 void ixgbe_disable_intr(struct ix_softc *);
154 int ixgbe_txeof(struct ix_txring *);
155 int ixgbe_rxeof(struct ix_rxring *);
156 void ixgbe_rx_offload(uint32_t, uint16_t, struct mbuf *);
157 void ixgbe_iff(struct ix_softc *);
158 void ixgbe_map_queue_statistics(struct ix_softc *);
159 void ixgbe_update_link_status(struct ix_softc *);
160 int ixgbe_get_buf(struct ix_rxring *, int);
161 int ixgbe_encap(struct ix_txring *, struct mbuf *);
162 int ixgbe_dma_malloc(struct ix_softc *, bus_size_t,
163 struct ixgbe_dma_alloc *, int);
164 void ixgbe_dma_free(struct ix_softc *, struct ixgbe_dma_alloc *);
165 static int
166 ixgbe_tx_ctx_setup(struct ix_txring *, struct mbuf *, uint32_t *,
167 uint32_t *);
168 void ixgbe_set_ivar(struct ix_softc *, uint8_t, uint8_t, int8_t);
169 void ixgbe_configure_ivars(struct ix_softc *);
170 uint8_t *ixgbe_mc_array_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
171
172 void ixgbe_setup_vlan_hw_support(struct ix_softc *);
173
174 /* Support for pluggable optic modules */
175 void ixgbe_handle_mod(struct ix_softc *);
176 void ixgbe_handle_msf(struct ix_softc *);
177 void ixgbe_handle_phy(struct ix_softc *);
178
179 /* Legacy (single vector interrupt handler */
180 int ixgbe_legacy_intr(void *);
181 void ixgbe_enable_queue(struct ix_softc *, uint32_t);
182 void ixgbe_enable_queues(struct ix_softc *);
183 void ixgbe_disable_queue(struct ix_softc *, uint32_t);
184
185 /* MSI-X (multiple vectors interrupt handlers) */
186 int ixgbe_link_intr(void *);
187 int ixgbe_queue_intr(void *);
188
189 #if NKSTAT > 0
190 static void ix_kstats(struct ix_softc *);
191 static void ix_rxq_kstats(struct ix_softc *, struct ix_rxring *);
192 static void ix_txq_kstats(struct ix_softc *, struct ix_txring *);
193 static void ix_kstats_tick(void *);
194 #endif
195
196 /*********************************************************************
197 * OpenBSD Device Interface Entry Points
198 *********************************************************************/
199
200 struct cfdriver ix_cd = {
201 NULL, "ix", DV_IFNET
202 };
203
204 const struct cfattach ix_ca = {
205 sizeof(struct ix_softc), ixgbe_probe, ixgbe_attach, ixgbe_detach,
206 ixgbe_activate
207 };
208
209 int ixgbe_smart_speed = ixgbe_smart_speed_on;
210 int ixgbe_enable_msix = 1;
211
212 /*********************************************************************
213 * Device identification routine
214 *
215 * ixgbe_probe determines if the driver should be loaded on
216 * adapter based on PCI vendor/device id of the adapter.
217 *
218 * return 0 on success, positive on failure
219 *********************************************************************/
220
221 int
ixgbe_probe(struct device * parent,void * match,void * aux)222 ixgbe_probe(struct device *parent, void *match, void *aux)
223 {
224 INIT_DEBUGOUT("ixgbe_probe: begin");
225
226 return (pci_matchbyid((struct pci_attach_args *)aux, ixgbe_devices,
227 nitems(ixgbe_devices)));
228 }
229
230 /*********************************************************************
231 * Device initialization routine
232 *
233 * The attach entry point is called when the driver is being loaded.
234 * This routine identifies the type of hardware, allocates all resources
235 * and initializes the hardware.
236 *
237 * return 0 on success, positive on failure
238 *********************************************************************/
239
240 void
ixgbe_attach(struct device * parent,struct device * self,void * aux)241 ixgbe_attach(struct device *parent, struct device *self, void *aux)
242 {
243 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
244 struct ix_softc *sc = (struct ix_softc *)self;
245 int error = 0;
246 uint16_t csum;
247 uint32_t ctrl_ext;
248 struct ixgbe_hw *hw = &sc->hw;
249
250 INIT_DEBUGOUT("ixgbe_attach: begin");
251
252 sc->osdep.os_sc = sc;
253 sc->osdep.os_pa = *pa;
254
255 rw_init(&sc->sfflock, "ixsff");
256
257 #if NKSTAT > 0
258 ix_kstats(sc);
259 #endif
260
261 /* Determine hardware revision */
262 ixgbe_identify_hardware(sc);
263
264 /* Indicate to RX setup to use Jumbo Clusters */
265 sc->num_tx_desc = DEFAULT_TXD;
266 sc->num_rx_desc = DEFAULT_RXD;
267
268 /* Do base PCI setup - map BAR0 */
269 if (ixgbe_allocate_pci_resources(sc))
270 goto err_out;
271
272 /* Allocate our TX/RX Queues */
273 if (ixgbe_allocate_queues(sc))
274 goto err_out;
275
276 /* Allocate multicast array memory. */
277 sc->mta = mallocarray(IXGBE_ETH_LENGTH_OF_ADDRESS,
278 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
279 if (sc->mta == NULL) {
280 printf(": Can not allocate multicast setup array\n");
281 goto err_late;
282 }
283
284 /* Initialize the shared code */
285 error = ixgbe_init_shared_code(hw);
286 if (error) {
287 printf(": Unable to initialize the shared code\n");
288 goto err_late;
289 }
290
291 /* Make sure we have a good EEPROM before we read from it */
292 if (sc->hw.eeprom.ops.validate_checksum(&sc->hw, &csum) < 0) {
293 printf(": The EEPROM Checksum Is Not Valid\n");
294 goto err_late;
295 }
296
297 error = ixgbe_init_hw(hw);
298 if (error == IXGBE_ERR_EEPROM_VERSION) {
299 printf(": This device is a pre-production adapter/"
300 "LOM. Please be aware there may be issues associated "
301 "with your hardware.\nIf you are experiencing problems "
302 "please contact your Intel or hardware representative "
303 "who provided you with this hardware.\n");
304 } else if (error && (error != IXGBE_ERR_SFP_NOT_PRESENT &&
305 error != IXGBE_ERR_SFP_NOT_SUPPORTED)) {
306 printf(": Hardware Initialization Failure\n");
307 goto err_late;
308 }
309
310 bcopy(sc->hw.mac.addr, sc->arpcom.ac_enaddr,
311 IXGBE_ETH_LENGTH_OF_ADDRESS);
312
313 if (sc->sc_intrmap)
314 error = ixgbe_allocate_msix(sc);
315 else
316 error = ixgbe_allocate_legacy(sc);
317 if (error)
318 goto err_late;
319
320 /* Enable the optics for 82599 SFP+ fiber */
321 if (sc->hw.mac.ops.enable_tx_laser)
322 sc->hw.mac.ops.enable_tx_laser(&sc->hw);
323
324 /* Enable power to the phy */
325 if (hw->phy.ops.set_phy_power)
326 hw->phy.ops.set_phy_power(&sc->hw, TRUE);
327
328 /* Setup OS specific network interface */
329 ixgbe_setup_interface(sc);
330
331 /* Get the PCI-E bus info and determine LAN ID */
332 hw->mac.ops.get_bus_info(hw);
333
334 /* Set an initial default flow control value */
335 sc->fc = ixgbe_fc_full;
336
337 /* let hardware know driver is loaded */
338 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
339 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
340 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
341
342 printf(", address %s\n", ether_sprintf(sc->hw.mac.addr));
343
344 INIT_DEBUGOUT("ixgbe_attach: end");
345 return;
346
347 err_late:
348 ixgbe_free_transmit_structures(sc);
349 ixgbe_free_receive_structures(sc);
350 err_out:
351 ixgbe_free_pci_resources(sc);
352 free(sc->mta, M_DEVBUF, IXGBE_ETH_LENGTH_OF_ADDRESS *
353 MAX_NUM_MULTICAST_ADDRESSES);
354 }
355
356 /*********************************************************************
357 * Device removal routine
358 *
359 * The detach entry point is called when the driver is being removed.
360 * This routine stops the adapter and deallocates all the resources
361 * that were allocated for driver operation.
362 *
363 * return 0 on success, positive on failure
364 *********************************************************************/
365
366 int
ixgbe_detach(struct device * self,int flags)367 ixgbe_detach(struct device *self, int flags)
368 {
369 struct ix_softc *sc = (struct ix_softc *)self;
370 struct ifnet *ifp = &sc->arpcom.ac_if;
371 uint32_t ctrl_ext;
372
373 INIT_DEBUGOUT("ixgbe_detach: begin");
374
375 ixgbe_stop(sc);
376
377 /* let hardware know driver is unloading */
378 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
379 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
380 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
381
382 ether_ifdetach(ifp);
383 if_detach(ifp);
384
385 ixgbe_free_pci_resources(sc);
386
387 ixgbe_free_transmit_structures(sc);
388 ixgbe_free_receive_structures(sc);
389 free(sc->mta, M_DEVBUF, IXGBE_ETH_LENGTH_OF_ADDRESS *
390 MAX_NUM_MULTICAST_ADDRESSES);
391
392 /* XXX kstat */
393
394 return (0);
395 }
396
397 int
ixgbe_activate(struct device * self,int act)398 ixgbe_activate(struct device *self, int act)
399 {
400 struct ix_softc *sc = (struct ix_softc *)self;
401 struct ifnet *ifp = &sc->arpcom.ac_if;
402 struct ixgbe_hw *hw = &sc->hw;
403 uint32_t ctrl_ext;
404
405 switch (act) {
406 case DVACT_QUIESCE:
407 if (ifp->if_flags & IFF_RUNNING)
408 ixgbe_stop(sc);
409 break;
410 case DVACT_RESUME:
411 ixgbe_init_hw(hw);
412
413 /* Enable the optics for 82599 SFP+ fiber */
414 if (sc->hw.mac.ops.enable_tx_laser)
415 sc->hw.mac.ops.enable_tx_laser(&sc->hw);
416
417 /* Enable power to the phy */
418 if (hw->phy.ops.set_phy_power)
419 hw->phy.ops.set_phy_power(&sc->hw, TRUE);
420
421 /* Get the PCI-E bus info and determine LAN ID */
422 hw->mac.ops.get_bus_info(hw);
423
424 /* let hardware know driver is loaded */
425 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
426 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
427 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
428
429 if (ifp->if_flags & IFF_UP)
430 ixgbe_init(sc);
431 break;
432 }
433 return (0);
434 }
435
436 /*********************************************************************
437 * Transmit entry point
438 *
439 * ixgbe_start is called by the stack to initiate a transmit.
440 * The driver will remain in this routine as long as there are
441 * packets to transmit and transmit resources are available.
442 * In case resources are not available stack is notified and
443 * the packet is requeued.
444 **********************************************************************/
445
446 void
ixgbe_start(struct ifqueue * ifq)447 ixgbe_start(struct ifqueue *ifq)
448 {
449 struct ifnet *ifp = ifq->ifq_if;
450 struct ix_softc *sc = ifp->if_softc;
451 struct ix_txring *txr = ifq->ifq_softc;
452 struct mbuf *m_head;
453 unsigned int head, free, used;
454 int post = 0;
455
456 if (!sc->link_up)
457 return;
458
459 head = txr->next_avail_desc;
460 free = txr->next_to_clean;
461 if (free <= head)
462 free += sc->num_tx_desc;
463 free -= head;
464
465 membar_consumer();
466
467 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
468 0, txr->txdma.dma_map->dm_mapsize,
469 BUS_DMASYNC_POSTWRITE);
470
471 for (;;) {
472 /* Check that we have the minimal number of TX descriptors. */
473 if (free <= IXGBE_TX_OP_THRESHOLD) {
474 ifq_set_oactive(ifq);
475 break;
476 }
477
478 m_head = ifq_dequeue(ifq);
479 if (m_head == NULL)
480 break;
481
482 used = ixgbe_encap(txr, m_head);
483 if (used == 0) {
484 m_freem(m_head);
485 continue;
486 }
487
488 free -= used;
489
490 #if NBPFILTER > 0
491 if (ifp->if_bpf)
492 bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
493 #endif
494
495 /* Set timeout in case hardware has problems transmitting */
496 txr->watchdog_timer = IXGBE_TX_TIMEOUT;
497 ifp->if_timer = IXGBE_TX_TIMEOUT;
498
499 post = 1;
500 }
501
502 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
503 0, txr->txdma.dma_map->dm_mapsize,
504 BUS_DMASYNC_PREWRITE);
505
506 /*
507 * Advance the Transmit Descriptor Tail (Tdt), this tells the
508 * hardware that this frame is available to transmit.
509 */
510 if (post)
511 IXGBE_WRITE_REG(&sc->hw, txr->tail, txr->next_avail_desc);
512 }
513
514 /*********************************************************************
515 * Ioctl entry point
516 *
517 * ixgbe_ioctl is called when the user wants to configure the
518 * interface.
519 *
520 * return 0 on success, positive on failure
521 **********************************************************************/
522
523 int
ixgbe_ioctl(struct ifnet * ifp,u_long command,caddr_t data)524 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
525 {
526 struct ix_softc *sc = ifp->if_softc;
527 struct ifreq *ifr = (struct ifreq *) data;
528 int s, error = 0;
529
530 s = splnet();
531
532 switch (command) {
533 case SIOCSIFADDR:
534 IOCTL_DEBUGOUT("ioctl: SIOCxIFADDR (Get/Set Interface Addr)");
535 ifp->if_flags |= IFF_UP;
536 if (!(ifp->if_flags & IFF_RUNNING))
537 ixgbe_init(sc);
538 break;
539
540 case SIOCSIFFLAGS:
541 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
542 if (ifp->if_flags & IFF_UP) {
543 if (ifp->if_flags & IFF_RUNNING)
544 error = ENETRESET;
545 else
546 ixgbe_init(sc);
547 } else {
548 if (ifp->if_flags & IFF_RUNNING)
549 ixgbe_stop(sc);
550 }
551 break;
552
553 case SIOCSIFMEDIA:
554 case SIOCGIFMEDIA:
555 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
556 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
557 break;
558
559 case SIOCGIFRXR:
560 error = ixgbe_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
561 break;
562
563 case SIOCGIFSFFPAGE:
564 error = rw_enter(&sc->sfflock, RW_WRITE|RW_INTR);
565 if (error != 0)
566 break;
567
568 error = ixgbe_get_sffpage(sc, (struct if_sffpage *)data);
569 rw_exit(&sc->sfflock);
570 break;
571
572 default:
573 error = ether_ioctl(ifp, &sc->arpcom, command, data);
574 }
575
576 if (error == ENETRESET) {
577 if (ifp->if_flags & IFF_RUNNING) {
578 ixgbe_disable_intr(sc);
579 ixgbe_iff(sc);
580 ixgbe_enable_intr(sc);
581 ixgbe_enable_queues(sc);
582 }
583 error = 0;
584 }
585
586 splx(s);
587 return (error);
588 }
589
590 int
ixgbe_get_sffpage(struct ix_softc * sc,struct if_sffpage * sff)591 ixgbe_get_sffpage(struct ix_softc *sc, struct if_sffpage *sff)
592 {
593 struct ixgbe_hw *hw = &sc->hw;
594 uint32_t swfw_mask = hw->phy.phy_semaphore_mask;
595 uint8_t page;
596 size_t i;
597 int error = EIO;
598
599 if (hw->phy.type == ixgbe_phy_fw)
600 return (ENODEV);
601
602 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
603 return (EBUSY); /* XXX */
604
605 if (sff->sff_addr == IFSFF_ADDR_EEPROM) {
606 if (hw->phy.ops.read_i2c_byte_unlocked(hw, 127,
607 IFSFF_ADDR_EEPROM, &page))
608 goto error;
609 if (page != sff->sff_page &&
610 hw->phy.ops.write_i2c_byte_unlocked(hw, 127,
611 IFSFF_ADDR_EEPROM, sff->sff_page))
612 goto error;
613 }
614
615 for (i = 0; i < sizeof(sff->sff_data); i++) {
616 if (hw->phy.ops.read_i2c_byte_unlocked(hw, i,
617 sff->sff_addr, &sff->sff_data[i]))
618 goto error;
619 }
620
621 if (sff->sff_addr == IFSFF_ADDR_EEPROM) {
622 if (page != sff->sff_page &&
623 hw->phy.ops.write_i2c_byte_unlocked(hw, 127,
624 IFSFF_ADDR_EEPROM, page))
625 goto error;
626 }
627
628 error = 0;
629 error:
630 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
631 return (error);
632 }
633
634 int
ixgbe_rxrinfo(struct ix_softc * sc,struct if_rxrinfo * ifri)635 ixgbe_rxrinfo(struct ix_softc *sc, struct if_rxrinfo *ifri)
636 {
637 struct if_rxring_info *ifr, ifr1;
638 struct ix_rxring *rxr;
639 int error, i;
640 u_int n = 0;
641
642 if (sc->num_queues > 1) {
643 ifr = mallocarray(sc->num_queues, sizeof(*ifr), M_DEVBUF,
644 M_WAITOK | M_ZERO);
645 } else
646 ifr = &ifr1;
647
648 for (i = 0; i < sc->num_queues; i++) {
649 rxr = &sc->rx_rings[i];
650 ifr[n].ifr_size = MCLBYTES;
651 snprintf(ifr[n].ifr_name, sizeof(ifr[n].ifr_name), "%d", i);
652 ifr[n].ifr_info = rxr->rx_ring;
653 n++;
654 }
655
656 error = if_rxr_info_ioctl(ifri, sc->num_queues, ifr);
657
658 if (sc->num_queues > 1)
659 free(ifr, M_DEVBUF, sc->num_queues * sizeof(*ifr));
660 return (error);
661 }
662
663 /*********************************************************************
664 * Watchdog entry point
665 *
666 **********************************************************************/
667
668 void
ixgbe_watchdog(struct ifnet * ifp)669 ixgbe_watchdog(struct ifnet * ifp)
670 {
671 struct ix_softc *sc = (struct ix_softc *)ifp->if_softc;
672 struct ix_txring *txr = sc->tx_rings;
673 struct ixgbe_hw *hw = &sc->hw;
674 int tx_hang = FALSE;
675 int i;
676
677 /*
678 * The timer is set to 5 every time ixgbe_start() queues a packet.
679 * Anytime all descriptors are clean the timer is set to 0.
680 */
681 for (i = 0; i < sc->num_queues; i++, txr++) {
682 if (txr->watchdog_timer == 0 || --txr->watchdog_timer)
683 continue;
684 else {
685 tx_hang = TRUE;
686 break;
687 }
688 }
689 if (tx_hang == FALSE)
690 return;
691
692 /*
693 * If we are in this routine because of pause frames, then don't
694 * reset the hardware.
695 */
696 if (!(IXGBE_READ_REG(hw, IXGBE_TFCS) & IXGBE_TFCS_TXON)) {
697 for (i = 0; i < sc->num_queues; i++, txr++)
698 txr->watchdog_timer = IXGBE_TX_TIMEOUT;
699 ifp->if_timer = IXGBE_TX_TIMEOUT;
700 return;
701 }
702
703
704 printf("%s: Watchdog timeout -- resetting\n", ifp->if_xname);
705 for (i = 0; i < sc->num_queues; i++, txr++) {
706 printf("%s: Queue(%d) tdh = %d, hw tdt = %d\n", ifp->if_xname, i,
707 IXGBE_READ_REG(hw, IXGBE_TDH(i)),
708 IXGBE_READ_REG(hw, sc->tx_rings[i].tail));
709 printf("%s: TX(%d) Next TX to Clean = %d\n", ifp->if_xname,
710 i, txr->next_to_clean);
711 }
712 ifp->if_flags &= ~IFF_RUNNING;
713
714 ixgbe_init(sc);
715 }
716
717 /*********************************************************************
718 * Init entry point
719 *
720 * This routine is used in two ways. It is used by the stack as
721 * init entry point in network interface structure. It is also used
722 * by the driver as a hw/sw initialization routine to get to a
723 * consistent state.
724 *
725 * return 0 on success, positive on failure
726 **********************************************************************/
727 #define IXGBE_MHADD_MFS_SHIFT 16
728
729 void
ixgbe_init(void * arg)730 ixgbe_init(void *arg)
731 {
732 struct ix_softc *sc = (struct ix_softc *)arg;
733 struct ifnet *ifp = &sc->arpcom.ac_if;
734 struct ix_rxring *rxr = sc->rx_rings;
735 uint32_t k, txdctl, rxdctl, rxctrl, mhadd, itr;
736 int i, s, err;
737
738 INIT_DEBUGOUT("ixgbe_init: begin");
739
740 s = splnet();
741
742 ixgbe_stop(sc);
743
744 /* reprogram the RAR[0] in case user changed it. */
745 ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
746
747 /* Get the latest mac address, User can use a LAA */
748 bcopy(sc->arpcom.ac_enaddr, sc->hw.mac.addr,
749 IXGBE_ETH_LENGTH_OF_ADDRESS);
750 ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, 1);
751 sc->hw.addr_ctrl.rar_used_count = 1;
752
753 /* Prepare transmit descriptors and buffers */
754 if (ixgbe_setup_transmit_structures(sc)) {
755 printf("%s: Could not setup transmit structures\n",
756 ifp->if_xname);
757 ixgbe_stop(sc);
758 splx(s);
759 return;
760 }
761
762 ixgbe_init_hw(&sc->hw);
763 ixgbe_initialize_transmit_units(sc);
764
765 /* Use 2k clusters, even for jumbo frames */
766 sc->rx_mbuf_sz = MCLBYTES + ETHER_ALIGN;
767
768 /* Prepare receive descriptors and buffers */
769 if (ixgbe_setup_receive_structures(sc)) {
770 printf("%s: Could not setup receive structures\n",
771 ifp->if_xname);
772 ixgbe_stop(sc);
773 splx(s);
774 return;
775 }
776
777 /* Configure RX settings */
778 ixgbe_initialize_receive_units(sc);
779
780 /* Enable SDP & MSIX interrupts based on adapter */
781 ixgbe_config_gpie(sc);
782
783 /* Program promiscuous mode and multicast filters. */
784 ixgbe_iff(sc);
785
786 /* Set MRU size */
787 mhadd = IXGBE_READ_REG(&sc->hw, IXGBE_MHADD);
788 mhadd &= ~IXGBE_MHADD_MFS_MASK;
789 mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
790 IXGBE_WRITE_REG(&sc->hw, IXGBE_MHADD, mhadd);
791
792 /* Now enable all the queues */
793 for (i = 0; i < sc->num_queues; i++) {
794 txdctl = IXGBE_READ_REG(&sc->hw, IXGBE_TXDCTL(i));
795 txdctl |= IXGBE_TXDCTL_ENABLE;
796 /* Set WTHRESH to 8, burst writeback */
797 txdctl |= (8 << 16);
798 /*
799 * When the internal queue falls below PTHRESH (16),
800 * start prefetching as long as there are at least
801 * HTHRESH (1) buffers ready.
802 */
803 txdctl |= (16 << 0) | (1 << 8);
804 IXGBE_WRITE_REG(&sc->hw, IXGBE_TXDCTL(i), txdctl);
805 }
806
807 for (i = 0; i < sc->num_queues; i++) {
808 rxdctl = IXGBE_READ_REG(&sc->hw, IXGBE_RXDCTL(i));
809 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
810 /*
811 * PTHRESH = 21
812 * HTHRESH = 4
813 * WTHRESH = 8
814 */
815 rxdctl &= ~0x3FFFFF;
816 rxdctl |= 0x080420;
817 }
818 rxdctl |= IXGBE_RXDCTL_ENABLE;
819 IXGBE_WRITE_REG(&sc->hw, IXGBE_RXDCTL(i), rxdctl);
820 for (k = 0; k < 10; k++) {
821 if (IXGBE_READ_REG(&sc->hw, IXGBE_RXDCTL(i)) &
822 IXGBE_RXDCTL_ENABLE)
823 break;
824 else
825 msec_delay(1);
826 }
827 IXGBE_WRITE_FLUSH(&sc->hw);
828 IXGBE_WRITE_REG(&sc->hw, rxr[i].tail, rxr->last_desc_filled);
829 }
830
831 /* Set up VLAN support and filter */
832 ixgbe_setup_vlan_hw_support(sc);
833
834 /* Enable Receive engine */
835 rxctrl = IXGBE_READ_REG(&sc->hw, IXGBE_RXCTRL);
836 if (sc->hw.mac.type == ixgbe_mac_82598EB)
837 rxctrl |= IXGBE_RXCTRL_DMBYPS;
838 rxctrl |= IXGBE_RXCTRL_RXEN;
839 sc->hw.mac.ops.enable_rx_dma(&sc->hw, rxctrl);
840
841 /* Set up MSI/X routing */
842 if (sc->sc_intrmap) {
843 ixgbe_configure_ivars(sc);
844 /* Set up auto-mask */
845 if (sc->hw.mac.type == ixgbe_mac_82598EB)
846 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
847 else {
848 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
849 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
850 }
851 } else { /* Simple settings for Legacy/MSI */
852 ixgbe_set_ivar(sc, 0, 0, 0);
853 ixgbe_set_ivar(sc, 0, 0, 1);
854 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
855 }
856
857 /* Check on any SFP devices that need to be kick-started */
858 if (sc->hw.phy.type == ixgbe_phy_none) {
859 err = sc->hw.phy.ops.identify(&sc->hw);
860 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
861 printf("Unsupported SFP+ module type was detected.\n");
862 splx(s);
863 return;
864 }
865 }
866
867 /* Setup interrupt moderation */
868 itr = (4000000 / IXGBE_INTS_PER_SEC) & 0xff8;
869 if (sc->hw.mac.type != ixgbe_mac_82598EB)
870 itr |= IXGBE_EITR_LLI_MOD | IXGBE_EITR_CNT_WDIS;
871 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(0), itr);
872
873 if (sc->sc_intrmap) {
874 /* Set moderation on the Link interrupt */
875 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(sc->linkvec),
876 IXGBE_LINK_ITR);
877 }
878
879 /* Enable power to the phy */
880 if (sc->hw.phy.ops.set_phy_power)
881 sc->hw.phy.ops.set_phy_power(&sc->hw, TRUE);
882
883 /* Config/Enable Link */
884 ixgbe_config_link(sc);
885
886 /* Hardware Packet Buffer & Flow Control setup */
887 ixgbe_config_delay_values(sc);
888
889 /* Initialize the FC settings */
890 sc->hw.mac.ops.start_hw(&sc->hw);
891
892 /* And now turn on interrupts */
893 ixgbe_enable_intr(sc);
894 ixgbe_enable_queues(sc);
895
896 /* Now inform the stack we're ready */
897 ifp->if_flags |= IFF_RUNNING;
898 for (i = 0; i < sc->num_queues; i++)
899 ifq_clr_oactive(ifp->if_ifqs[i]);
900
901 #if NKSTAT > 0
902 ix_kstats_tick(sc);
903 #endif
904
905 splx(s);
906 }
907
908 void
ixgbe_config_gpie(struct ix_softc * sc)909 ixgbe_config_gpie(struct ix_softc *sc)
910 {
911 struct ixgbe_hw *hw = &sc->hw;
912 uint32_t gpie;
913
914 gpie = IXGBE_READ_REG(&sc->hw, IXGBE_GPIE);
915
916 /* Fan Failure Interrupt */
917 if (hw->device_id == IXGBE_DEV_ID_82598AT)
918 gpie |= IXGBE_SDP1_GPIEN;
919
920 if (sc->hw.mac.type == ixgbe_mac_82599EB) {
921 /* Add for Module detection */
922 gpie |= IXGBE_SDP2_GPIEN;
923
924 /* Media ready */
925 if (hw->device_id != IXGBE_DEV_ID_82599_QSFP_SF_QP)
926 gpie |= IXGBE_SDP1_GPIEN;
927
928 /*
929 * Set LL interval to max to reduce the number of low latency
930 * interrupts hitting the card when the ring is getting full.
931 */
932 gpie |= 0xf << IXGBE_GPIE_LLI_DELAY_SHIFT;
933 }
934
935 if (sc->hw.mac.type == ixgbe_mac_X540 ||
936 sc->hw.mac.type == ixgbe_mac_X550EM_x ||
937 sc->hw.mac.type == ixgbe_mac_X550EM_a) {
938 /*
939 * Thermal Failure Detection (X540)
940 * Link Detection (X552 SFP+, X552/X557-AT)
941 */
942 gpie |= IXGBE_SDP0_GPIEN_X540;
943
944 /*
945 * Set LL interval to max to reduce the number of low latency
946 * interrupts hitting the card when the ring is getting full.
947 */
948 gpie |= 0xf << IXGBE_GPIE_LLI_DELAY_SHIFT;
949 }
950
951 if (sc->sc_intrmap) {
952 /* Enable Enhanced MSIX mode */
953 gpie |= IXGBE_GPIE_MSIX_MODE;
954 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
955 IXGBE_GPIE_OCD;
956 }
957
958 IXGBE_WRITE_REG(&sc->hw, IXGBE_GPIE, gpie);
959 }
960
961 /*
962 * Requires sc->max_frame_size to be set.
963 */
964 void
ixgbe_config_delay_values(struct ix_softc * sc)965 ixgbe_config_delay_values(struct ix_softc *sc)
966 {
967 struct ixgbe_hw *hw = &sc->hw;
968 uint32_t rxpb, frame, size, tmp;
969
970 frame = sc->max_frame_size;
971
972 /* Calculate High Water */
973 switch (hw->mac.type) {
974 case ixgbe_mac_X540:
975 case ixgbe_mac_X550:
976 case ixgbe_mac_X550EM_x:
977 case ixgbe_mac_X550EM_a:
978 tmp = IXGBE_DV_X540(frame, frame);
979 break;
980 default:
981 tmp = IXGBE_DV(frame, frame);
982 break;
983 }
984 size = IXGBE_BT2KB(tmp);
985 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
986 hw->fc.high_water[0] = rxpb - size;
987
988 /* Now calculate Low Water */
989 switch (hw->mac.type) {
990 case ixgbe_mac_X540:
991 case ixgbe_mac_X550:
992 case ixgbe_mac_X550EM_x:
993 case ixgbe_mac_X550EM_a:
994 tmp = IXGBE_LOW_DV_X540(frame);
995 break;
996 default:
997 tmp = IXGBE_LOW_DV(frame);
998 break;
999 }
1000 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
1001
1002 hw->fc.requested_mode = sc->fc;
1003 hw->fc.pause_time = IXGBE_FC_PAUSE;
1004 hw->fc.send_xon = TRUE;
1005 }
1006
1007 /*
1008 * MSIX Interrupt Handlers
1009 */
1010 void
ixgbe_enable_queue(struct ix_softc * sc,uint32_t vector)1011 ixgbe_enable_queue(struct ix_softc *sc, uint32_t vector)
1012 {
1013 uint64_t queue = 1ULL << vector;
1014 uint32_t mask;
1015
1016 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
1017 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1018 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS, mask);
1019 } else {
1020 mask = (queue & 0xFFFFFFFF);
1021 if (mask)
1022 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS_EX(0), mask);
1023 mask = (queue >> 32);
1024 if (mask)
1025 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS_EX(1), mask);
1026 }
1027 }
1028
1029 void
ixgbe_enable_queues(struct ix_softc * sc)1030 ixgbe_enable_queues(struct ix_softc *sc)
1031 {
1032 struct ix_queue *que;
1033 int i;
1034
1035 for (i = 0, que = sc->queues; i < sc->num_queues; i++, que++)
1036 ixgbe_enable_queue(sc, que->msix);
1037 }
1038
1039 void
ixgbe_disable_queue(struct ix_softc * sc,uint32_t vector)1040 ixgbe_disable_queue(struct ix_softc *sc, uint32_t vector)
1041 {
1042 uint64_t queue = 1ULL << vector;
1043 uint32_t mask;
1044
1045 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
1046 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1047 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, mask);
1048 } else {
1049 mask = (queue & 0xFFFFFFFF);
1050 if (mask)
1051 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), mask);
1052 mask = (queue >> 32);
1053 if (mask)
1054 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), mask);
1055 }
1056 }
1057
1058 /*
1059 * MSIX Interrupt Handlers
1060 */
1061 int
ixgbe_link_intr(void * vsc)1062 ixgbe_link_intr(void *vsc)
1063 {
1064 struct ix_softc *sc = (struct ix_softc *)vsc;
1065
1066 return ixgbe_intr(sc);
1067 }
1068
1069 int
ixgbe_queue_intr(void * vque)1070 ixgbe_queue_intr(void *vque)
1071 {
1072 struct ix_queue *que = vque;
1073 struct ix_softc *sc = que->sc;
1074 struct ifnet *ifp = &sc->arpcom.ac_if;
1075 struct ix_rxring *rxr = que->rxr;
1076 struct ix_txring *txr = que->txr;
1077
1078 if (ISSET(ifp->if_flags, IFF_RUNNING)) {
1079 ixgbe_rxeof(rxr);
1080 ixgbe_txeof(txr);
1081 ixgbe_rxrefill(rxr);
1082 }
1083
1084 ixgbe_enable_queue(sc, que->msix);
1085
1086 return (1);
1087 }
1088
1089 /*********************************************************************
1090 *
1091 * Legacy Interrupt Service routine
1092 *
1093 **********************************************************************/
1094
1095 int
ixgbe_legacy_intr(void * arg)1096 ixgbe_legacy_intr(void *arg)
1097 {
1098 struct ix_softc *sc = (struct ix_softc *)arg;
1099 struct ifnet *ifp = &sc->arpcom.ac_if;
1100 struct ix_rxring *rxr = sc->rx_rings;
1101 struct ix_txring *txr = sc->tx_rings;
1102 int rv;
1103
1104 rv = ixgbe_intr(sc);
1105 if (rv == 0) {
1106 return (0);
1107 }
1108
1109 if (ISSET(ifp->if_flags, IFF_RUNNING)) {
1110 ixgbe_rxeof(rxr);
1111 ixgbe_txeof(txr);
1112 ixgbe_rxrefill(rxr);
1113 }
1114
1115 ixgbe_enable_queues(sc);
1116 return (rv);
1117 }
1118
1119 int
ixgbe_intr(struct ix_softc * sc)1120 ixgbe_intr(struct ix_softc *sc)
1121 {
1122 struct ifnet *ifp = &sc->arpcom.ac_if;
1123 struct ixgbe_hw *hw = &sc->hw;
1124 uint32_t reg_eicr, mod_mask, msf_mask;
1125
1126 if (sc->sc_intrmap) {
1127 /* Pause other interrupts */
1128 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
1129 /* First get the cause */
1130 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1131 /* Be sure the queue bits are not cleared */
1132 reg_eicr &= ~IXGBE_EICR_RTX_QUEUE;
1133 /* Clear interrupt with write */
1134 IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
1135 } else {
1136 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1137 if (reg_eicr == 0) {
1138 ixgbe_enable_intr(sc);
1139 ixgbe_enable_queues(sc);
1140 return (0);
1141 }
1142 }
1143
1144 /* Link status change */
1145 if (reg_eicr & IXGBE_EICR_LSC) {
1146 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
1147 KERNEL_LOCK();
1148 ixgbe_update_link_status(sc);
1149 KERNEL_UNLOCK();
1150 }
1151
1152 if (hw->mac.type != ixgbe_mac_82598EB) {
1153 if (reg_eicr & IXGBE_EICR_ECC) {
1154 printf("%s: CRITICAL: ECC ERROR!! "
1155 "Please Reboot!!\n", sc->dev.dv_xname);
1156 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1157 }
1158 /* Check for over temp condition */
1159 if (reg_eicr & IXGBE_EICR_TS) {
1160 printf("%s: CRITICAL: OVER TEMP!! "
1161 "PHY IS SHUT DOWN!!\n", ifp->if_xname);
1162 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
1163 }
1164 }
1165
1166 /* Pluggable optics-related interrupt */
1167 if (ixgbe_is_sfp(hw)) {
1168 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) {
1169 mod_mask = IXGBE_EICR_GPI_SDP0_X540;
1170 msf_mask = IXGBE_EICR_GPI_SDP1_X540;
1171 } else if (hw->mac.type == ixgbe_mac_X540 ||
1172 hw->mac.type == ixgbe_mac_X550 ||
1173 hw->mac.type == ixgbe_mac_X550EM_x) {
1174 mod_mask = IXGBE_EICR_GPI_SDP2_X540;
1175 msf_mask = IXGBE_EICR_GPI_SDP1_X540;
1176 } else {
1177 mod_mask = IXGBE_EICR_GPI_SDP2;
1178 msf_mask = IXGBE_EICR_GPI_SDP1;
1179 }
1180 if (reg_eicr & mod_mask) {
1181 /* Clear the interrupt */
1182 IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask);
1183 KERNEL_LOCK();
1184 ixgbe_handle_mod(sc);
1185 KERNEL_UNLOCK();
1186 } else if ((hw->phy.media_type != ixgbe_media_type_copper) &&
1187 (reg_eicr & msf_mask)) {
1188 /* Clear the interrupt */
1189 IXGBE_WRITE_REG(hw, IXGBE_EICR, msf_mask);
1190 KERNEL_LOCK();
1191 ixgbe_handle_msf(sc);
1192 KERNEL_UNLOCK();
1193 }
1194 }
1195
1196 /* Check for fan failure */
1197 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1198 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1199 printf("%s: CRITICAL: FAN FAILURE!! "
1200 "REPLACE IMMEDIATELY!!\n", ifp->if_xname);
1201 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1202 }
1203
1204 /* External PHY interrupt */
1205 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1206 (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) {
1207 /* Clear the interrupt */
1208 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
1209 KERNEL_LOCK();
1210 ixgbe_handle_phy(sc);
1211 KERNEL_UNLOCK();
1212 }
1213
1214 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1215
1216 return (1);
1217 }
1218
1219 /*********************************************************************
1220 *
1221 * Media Ioctl callback
1222 *
1223 * This routine is called whenever the user queries the status of
1224 * the interface using ifconfig.
1225 *
1226 **********************************************************************/
1227 void
ixgbe_media_status(struct ifnet * ifp,struct ifmediareq * ifmr)1228 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq *ifmr)
1229 {
1230 struct ix_softc *sc = ifp->if_softc;
1231 uint64_t layer;
1232
1233 ifmr->ifm_active = IFM_ETHER;
1234 ifmr->ifm_status = IFM_AVALID;
1235
1236 INIT_DEBUGOUT("ixgbe_media_status: begin");
1237 ixgbe_update_link_status(sc);
1238
1239 if (!LINK_STATE_IS_UP(ifp->if_link_state))
1240 return;
1241
1242 ifmr->ifm_status |= IFM_ACTIVE;
1243 layer = sc->phy_layer;
1244
1245 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
1246 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
1247 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
1248 layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
1249 switch (sc->link_speed) {
1250 case IXGBE_LINK_SPEED_10GB_FULL:
1251 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1252 break;
1253 case IXGBE_LINK_SPEED_1GB_FULL:
1254 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1255 break;
1256 case IXGBE_LINK_SPEED_100_FULL:
1257 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1258 break;
1259 case IXGBE_LINK_SPEED_10_FULL:
1260 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
1261 break;
1262 }
1263 }
1264 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1265 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1266 switch (sc->link_speed) {
1267 case IXGBE_LINK_SPEED_10GB_FULL:
1268 ifmr->ifm_active |= IFM_10G_SFP_CU | IFM_FDX;
1269 break;
1270 }
1271 }
1272 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1273 switch (sc->link_speed) {
1274 case IXGBE_LINK_SPEED_10GB_FULL:
1275 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
1276 break;
1277 case IXGBE_LINK_SPEED_1GB_FULL:
1278 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1279 break;
1280 }
1281 }
1282 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
1283 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1284 switch (sc->link_speed) {
1285 case IXGBE_LINK_SPEED_10GB_FULL:
1286 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1287 break;
1288 case IXGBE_LINK_SPEED_1GB_FULL:
1289 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1290 break;
1291 }
1292 }
1293 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
1294 switch (sc->link_speed) {
1295 case IXGBE_LINK_SPEED_10GB_FULL:
1296 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1297 break;
1298 }
1299 }
1300 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1301 switch (sc->link_speed) {
1302 case IXGBE_LINK_SPEED_10GB_FULL:
1303 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
1304 break;
1305 case IXGBE_LINK_SPEED_2_5GB_FULL:
1306 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
1307 break;
1308 case IXGBE_LINK_SPEED_1GB_FULL:
1309 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
1310 break;
1311 }
1312 } else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
1313 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
1314 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1315 switch (sc->link_speed) {
1316 case IXGBE_LINK_SPEED_10GB_FULL:
1317 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
1318 break;
1319 case IXGBE_LINK_SPEED_2_5GB_FULL:
1320 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
1321 break;
1322 case IXGBE_LINK_SPEED_1GB_FULL:
1323 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
1324 break;
1325 }
1326 }
1327
1328 switch (sc->hw.fc.current_mode) {
1329 case ixgbe_fc_tx_pause:
1330 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
1331 break;
1332 case ixgbe_fc_rx_pause:
1333 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
1334 break;
1335 case ixgbe_fc_full:
1336 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE |
1337 IFM_ETH_TXPAUSE;
1338 break;
1339 default:
1340 ifmr->ifm_active &= ~(IFM_FLOW | IFM_ETH_RXPAUSE |
1341 IFM_ETH_TXPAUSE);
1342 break;
1343 }
1344 }
1345
1346 /*********************************************************************
1347 *
1348 * Media Ioctl callback
1349 *
1350 * This routine is called when the user changes speed/duplex using
1351 * media/mediopt option with ifconfig.
1352 *
1353 **********************************************************************/
1354 int
ixgbe_media_change(struct ifnet * ifp)1355 ixgbe_media_change(struct ifnet *ifp)
1356 {
1357 struct ix_softc *sc = ifp->if_softc;
1358 struct ixgbe_hw *hw = &sc->hw;
1359 struct ifmedia *ifm = &sc->media;
1360 ixgbe_link_speed speed = 0;
1361
1362 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1363 return (EINVAL);
1364
1365 if (hw->phy.media_type == ixgbe_media_type_backplane)
1366 return (ENODEV);
1367
1368 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1369 case IFM_AUTO:
1370 case IFM_10G_T:
1371 speed |= IXGBE_LINK_SPEED_100_FULL;
1372 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1373 speed |= IXGBE_LINK_SPEED_10GB_FULL;
1374 break;
1375 case IFM_10G_SR:
1376 case IFM_10G_KR:
1377 case IFM_10G_LR:
1378 case IFM_10G_LRM:
1379 case IFM_10G_CX4:
1380 case IFM_10G_KX4:
1381 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1382 speed |= IXGBE_LINK_SPEED_10GB_FULL;
1383 break;
1384 case IFM_10G_SFP_CU:
1385 speed |= IXGBE_LINK_SPEED_10GB_FULL;
1386 break;
1387 case IFM_1000_T:
1388 speed |= IXGBE_LINK_SPEED_100_FULL;
1389 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1390 break;
1391 case IFM_1000_LX:
1392 case IFM_1000_SX:
1393 case IFM_1000_CX:
1394 case IFM_1000_KX:
1395 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1396 break;
1397 case IFM_100_TX:
1398 speed |= IXGBE_LINK_SPEED_100_FULL;
1399 break;
1400 case IFM_10_T:
1401 speed |= IXGBE_LINK_SPEED_10_FULL;
1402 break;
1403 default:
1404 return (EINVAL);
1405 }
1406
1407 hw->mac.autotry_restart = TRUE;
1408 hw->mac.ops.setup_link(hw, speed, TRUE);
1409
1410 return (0);
1411 }
1412
1413 /*********************************************************************
1414 *
1415 * This routine maps the mbufs to tx descriptors, allowing the
1416 * TX engine to transmit the packets.
1417 * - return 0 on success, positive on failure
1418 *
1419 **********************************************************************/
1420
1421 int
ixgbe_encap(struct ix_txring * txr,struct mbuf * m_head)1422 ixgbe_encap(struct ix_txring *txr, struct mbuf *m_head)
1423 {
1424 struct ix_softc *sc = txr->sc;
1425 uint32_t olinfo_status = 0, cmd_type_len;
1426 int i, j, ntxc;
1427 int first, last = 0;
1428 bus_dmamap_t map;
1429 struct ixgbe_tx_buf *txbuf;
1430 union ixgbe_adv_tx_desc *txd = NULL;
1431
1432 /* Basic descriptor defines */
1433 cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
1434 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
1435
1436 /*
1437 * Important to capture the first descriptor
1438 * used because it will contain the index of
1439 * the one we tell the hardware to report back
1440 */
1441 first = txr->next_avail_desc;
1442 txbuf = &txr->tx_buffers[first];
1443 map = txbuf->map;
1444
1445 /*
1446 * Set the appropriate offload context
1447 * this will becomes the first descriptor.
1448 */
1449 ntxc = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
1450 if (ntxc == -1)
1451 goto xmit_fail;
1452
1453 /*
1454 * Map the packet for DMA.
1455 */
1456 switch (bus_dmamap_load_mbuf(txr->txdma.dma_tag, map,
1457 m_head, BUS_DMA_NOWAIT)) {
1458 case 0:
1459 break;
1460 case EFBIG:
1461 if (m_defrag(m_head, M_NOWAIT) == 0 &&
1462 bus_dmamap_load_mbuf(txr->txdma.dma_tag, map,
1463 m_head, BUS_DMA_NOWAIT) == 0)
1464 break;
1465 /* FALLTHROUGH */
1466 default:
1467 return (0);
1468 }
1469
1470 i = txr->next_avail_desc + ntxc;
1471 if (i >= sc->num_tx_desc)
1472 i -= sc->num_tx_desc;
1473
1474 for (j = 0; j < map->dm_nsegs; j++) {
1475 txd = &txr->tx_base[i];
1476
1477 txd->read.buffer_addr = htole64(map->dm_segs[j].ds_addr);
1478 txd->read.cmd_type_len = htole32(txr->txd_cmd |
1479 cmd_type_len | map->dm_segs[j].ds_len);
1480 txd->read.olinfo_status = htole32(olinfo_status);
1481 last = i; /* descriptor that will get completion IRQ */
1482
1483 if (++i == sc->num_tx_desc)
1484 i = 0;
1485 }
1486
1487 txd->read.cmd_type_len |=
1488 htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1489
1490 bus_dmamap_sync(txr->txdma.dma_tag, map, 0, map->dm_mapsize,
1491 BUS_DMASYNC_PREWRITE);
1492
1493 /* Set the index of the descriptor that will be marked done */
1494 txbuf->m_head = m_head;
1495 txbuf->eop_index = last;
1496
1497 membar_producer();
1498
1499 txr->next_avail_desc = i;
1500
1501 return (ntxc + j);
1502
1503 xmit_fail:
1504 bus_dmamap_unload(txr->txdma.dma_tag, txbuf->map);
1505 return (0);
1506 }
1507
1508 void
ixgbe_iff(struct ix_softc * sc)1509 ixgbe_iff(struct ix_softc *sc)
1510 {
1511 struct ifnet *ifp = &sc->arpcom.ac_if;
1512 struct arpcom *ac = &sc->arpcom;
1513 uint32_t fctrl;
1514 uint8_t *mta;
1515 uint8_t *update_ptr;
1516 struct ether_multi *enm;
1517 struct ether_multistep step;
1518 int mcnt = 0;
1519
1520 IOCTL_DEBUGOUT("ixgbe_iff: begin");
1521
1522 mta = sc->mta;
1523 bzero(mta, sizeof(uint8_t) * IXGBE_ETH_LENGTH_OF_ADDRESS *
1524 MAX_NUM_MULTICAST_ADDRESSES);
1525
1526 fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
1527 fctrl &= ~(IXGBE_FCTRL_MPE | IXGBE_FCTRL_UPE);
1528 ifp->if_flags &= ~IFF_ALLMULTI;
1529
1530 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
1531 ac->ac_multicnt > MAX_NUM_MULTICAST_ADDRESSES) {
1532 ifp->if_flags |= IFF_ALLMULTI;
1533 fctrl |= IXGBE_FCTRL_MPE;
1534 if (ifp->if_flags & IFF_PROMISC)
1535 fctrl |= IXGBE_FCTRL_UPE;
1536 } else {
1537 ETHER_FIRST_MULTI(step, &sc->arpcom, enm);
1538 while (enm != NULL) {
1539 bcopy(enm->enm_addrlo,
1540 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1541 IXGBE_ETH_LENGTH_OF_ADDRESS);
1542 mcnt++;
1543
1544 ETHER_NEXT_MULTI(step, enm);
1545 }
1546
1547 update_ptr = mta;
1548 sc->hw.mac.ops.update_mc_addr_list(&sc->hw, update_ptr, mcnt,
1549 ixgbe_mc_array_itr, TRUE);
1550 }
1551
1552 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl);
1553 }
1554
1555 /*
1556 * This is an iterator function now needed by the multicast
1557 * shared code. It simply feeds the shared code routine the
1558 * addresses in the array of ixgbe_iff() one by one.
1559 */
1560 uint8_t *
ixgbe_mc_array_itr(struct ixgbe_hw * hw,uint8_t ** update_ptr,uint32_t * vmdq)1561 ixgbe_mc_array_itr(struct ixgbe_hw *hw, uint8_t **update_ptr, uint32_t *vmdq)
1562 {
1563 uint8_t *addr = *update_ptr;
1564 uint8_t *newptr;
1565 *vmdq = 0;
1566
1567 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1568 *update_ptr = newptr;
1569 return addr;
1570 }
1571
1572 void
ixgbe_update_link_status(struct ix_softc * sc)1573 ixgbe_update_link_status(struct ix_softc *sc)
1574 {
1575 struct ifnet *ifp = &sc->arpcom.ac_if;
1576 int link_state = LINK_STATE_DOWN;
1577
1578 splassert(IPL_NET);
1579 KERNEL_ASSERT_LOCKED();
1580
1581 ixgbe_check_link(&sc->hw, &sc->link_speed, &sc->link_up, 0);
1582
1583 ifp->if_baudrate = 0;
1584 if (sc->link_up) {
1585 link_state = LINK_STATE_FULL_DUPLEX;
1586
1587 switch (sc->link_speed) {
1588 case IXGBE_LINK_SPEED_UNKNOWN:
1589 ifp->if_baudrate = 0;
1590 break;
1591 case IXGBE_LINK_SPEED_100_FULL:
1592 ifp->if_baudrate = IF_Mbps(100);
1593 break;
1594 case IXGBE_LINK_SPEED_1GB_FULL:
1595 ifp->if_baudrate = IF_Gbps(1);
1596 break;
1597 case IXGBE_LINK_SPEED_10GB_FULL:
1598 ifp->if_baudrate = IF_Gbps(10);
1599 break;
1600 }
1601
1602 /* Update any Flow Control changes */
1603 sc->hw.mac.ops.fc_enable(&sc->hw);
1604 }
1605 if (ifp->if_link_state != link_state) {
1606 ifp->if_link_state = link_state;
1607 if_link_state_change(ifp);
1608 }
1609 }
1610
1611
1612 /*********************************************************************
1613 *
1614 * This routine disables all traffic on the adapter by issuing a
1615 * global reset on the MAC and deallocates TX/RX buffers.
1616 *
1617 **********************************************************************/
1618
1619 void
ixgbe_stop(void * arg)1620 ixgbe_stop(void *arg)
1621 {
1622 struct ix_softc *sc = arg;
1623 struct ifnet *ifp = &sc->arpcom.ac_if;
1624 int i;
1625
1626 /* Tell the stack that the interface is no longer active */
1627 ifp->if_flags &= ~IFF_RUNNING;
1628
1629 #if NKSTAT > 0
1630 timeout_del(&sc->sc_kstat_tmo);
1631 #endif
1632 ifp->if_timer = 0;
1633
1634 INIT_DEBUGOUT("ixgbe_stop: begin\n");
1635 ixgbe_disable_intr(sc);
1636
1637 sc->hw.mac.ops.reset_hw(&sc->hw);
1638 sc->hw.adapter_stopped = FALSE;
1639 sc->hw.mac.ops.stop_adapter(&sc->hw);
1640 if (sc->hw.mac.type == ixgbe_mac_82599EB)
1641 sc->hw.mac.ops.stop_mac_link_on_d3(&sc->hw);
1642 /* Turn off the laser */
1643 if (sc->hw.mac.ops.disable_tx_laser)
1644 sc->hw.mac.ops.disable_tx_laser(&sc->hw);
1645
1646 /* reprogram the RAR[0] in case user changed it. */
1647 ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
1648
1649 intr_barrier(sc->tag);
1650 for (i = 0; i < sc->num_queues; i++) {
1651 struct ifqueue *ifq = ifp->if_ifqs[i];
1652 ifq_barrier(ifq);
1653 ifq_clr_oactive(ifq);
1654
1655 if (sc->queues[i].tag != NULL)
1656 intr_barrier(sc->queues[i].tag);
1657 timeout_del(&sc->rx_rings[i].rx_refill);
1658 }
1659
1660 KASSERT((ifp->if_flags & IFF_RUNNING) == 0);
1661
1662 /* Should we really clear all structures on stop? */
1663 ixgbe_free_transmit_structures(sc);
1664 ixgbe_free_receive_structures(sc);
1665
1666 ixgbe_update_link_status(sc);
1667 }
1668
1669
1670 /*********************************************************************
1671 *
1672 * Determine hardware revision.
1673 *
1674 **********************************************************************/
1675 void
ixgbe_identify_hardware(struct ix_softc * sc)1676 ixgbe_identify_hardware(struct ix_softc *sc)
1677 {
1678 struct ixgbe_osdep *os = &sc->osdep;
1679 struct pci_attach_args *pa = &os->os_pa;
1680 uint32_t reg;
1681
1682 /* Save off the information about this board */
1683 sc->hw.vendor_id = PCI_VENDOR(pa->pa_id);
1684 sc->hw.device_id = PCI_PRODUCT(pa->pa_id);
1685
1686 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
1687 sc->hw.revision_id = PCI_REVISION(reg);
1688
1689 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
1690 sc->hw.subsystem_vendor_id = PCI_VENDOR(reg);
1691 sc->hw.subsystem_device_id = PCI_PRODUCT(reg);
1692
1693 /* We need this here to set the num_segs below */
1694 ixgbe_set_mac_type(&sc->hw);
1695
1696 /* Pick up the 82599 and VF settings */
1697 if (sc->hw.mac.type != ixgbe_mac_82598EB)
1698 sc->hw.phy.smart_speed = ixgbe_smart_speed;
1699 sc->num_segs = IXGBE_82599_SCATTER;
1700 }
1701
1702 /*********************************************************************
1703 *
1704 * Setup the Legacy or MSI Interrupt handler
1705 *
1706 **********************************************************************/
1707 int
ixgbe_allocate_legacy(struct ix_softc * sc)1708 ixgbe_allocate_legacy(struct ix_softc *sc)
1709 {
1710 struct ixgbe_osdep *os = &sc->osdep;
1711 struct pci_attach_args *pa = &os->os_pa;
1712 const char *intrstr = NULL;
1713 pci_chipset_tag_t pc = pa->pa_pc;
1714 pci_intr_handle_t ih;
1715
1716 /* We allocate a single interrupt resource */
1717 if (pci_intr_map_msi(pa, &ih) != 0 &&
1718 pci_intr_map(pa, &ih) != 0) {
1719 printf(": couldn't map interrupt\n");
1720 return (ENXIO);
1721 }
1722
1723 #if 0
1724 /* XXX */
1725 /* Tasklets for Link, SFP and Multispeed Fiber */
1726 TASK_INIT(&sc->link_task, 0, ixgbe_handle_link, sc);
1727 TASK_INIT(&sc->mod_task, 0, ixgbe_handle_mod, sc);
1728 TASK_INIT(&sc->msf_task, 0, ixgbe_handle_msf, sc);
1729 #endif
1730
1731 intrstr = pci_intr_string(pc, ih);
1732 sc->tag = pci_intr_establish(pc, ih, IPL_NET | IPL_MPSAFE,
1733 ixgbe_legacy_intr, sc, sc->dev.dv_xname);
1734 if (sc->tag == NULL) {
1735 printf(": couldn't establish interrupt");
1736 if (intrstr != NULL)
1737 printf(" at %s", intrstr);
1738 printf("\n");
1739 return (ENXIO);
1740 }
1741 printf(": %s", intrstr);
1742
1743 /* For simplicity in the handlers */
1744 sc->que_mask = IXGBE_EIMS_ENABLE_MASK;
1745
1746 return (0);
1747 }
1748
1749 /*********************************************************************
1750 *
1751 * Setup the MSI-X Interrupt handlers
1752 *
1753 **********************************************************************/
1754 int
ixgbe_allocate_msix(struct ix_softc * sc)1755 ixgbe_allocate_msix(struct ix_softc *sc)
1756 {
1757 struct ixgbe_osdep *os = &sc->osdep;
1758 struct pci_attach_args *pa = &os->os_pa;
1759 int i = 0, error = 0;
1760 struct ix_queue *que;
1761 pci_intr_handle_t ih;
1762
1763 for (i = 0, que = sc->queues; i < sc->num_queues; i++, que++) {
1764 if (pci_intr_map_msix(pa, i, &ih)) {
1765 printf("ixgbe_allocate_msix: "
1766 "pci_intr_map_msix vec %d failed\n", i);
1767 error = ENOMEM;
1768 goto fail;
1769 }
1770
1771 que->tag = pci_intr_establish_cpu(pa->pa_pc, ih,
1772 IPL_NET | IPL_MPSAFE, intrmap_cpu(sc->sc_intrmap, i),
1773 ixgbe_queue_intr, que, que->name);
1774 if (que->tag == NULL) {
1775 printf("ixgbe_allocate_msix: "
1776 "pci_intr_establish vec %d failed\n", i);
1777 error = ENOMEM;
1778 goto fail;
1779 }
1780
1781 que->msix = i;
1782 }
1783
1784 /* Now the link status/control last MSI-X vector */
1785 if (pci_intr_map_msix(pa, i, &ih)) {
1786 printf("ixgbe_allocate_msix: "
1787 "pci_intr_map_msix link vector failed\n");
1788 error = ENOMEM;
1789 goto fail;
1790 }
1791
1792 sc->tag = pci_intr_establish(pa->pa_pc, ih, IPL_NET | IPL_MPSAFE,
1793 ixgbe_link_intr, sc, sc->dev.dv_xname);
1794 if (sc->tag == NULL) {
1795 printf("ixgbe_allocate_msix: "
1796 "pci_intr_establish link vector failed\n");
1797 error = ENOMEM;
1798 goto fail;
1799 }
1800 sc->linkvec = i;
1801 printf(", %s, %d queue%s", pci_intr_string(pa->pa_pc, ih),
1802 i, (i > 1) ? "s" : "");
1803
1804 return (0);
1805 fail:
1806 for (que = sc->queues; i > 0; i--, que++) {
1807 if (que->tag == NULL)
1808 continue;
1809 pci_intr_disestablish(pa->pa_pc, que->tag);
1810 que->tag = NULL;
1811 }
1812
1813 return (error);
1814 }
1815
1816 void
ixgbe_setup_msix(struct ix_softc * sc)1817 ixgbe_setup_msix(struct ix_softc *sc)
1818 {
1819 struct ixgbe_osdep *os = &sc->osdep;
1820 struct pci_attach_args *pa = &os->os_pa;
1821 int nmsix;
1822 unsigned int maxq;
1823
1824 if (!ixgbe_enable_msix)
1825 return;
1826
1827 nmsix = pci_intr_msix_count(pa);
1828 if (nmsix <= 1)
1829 return;
1830
1831 /* give one vector to events */
1832 nmsix--;
1833
1834 /* XXX the number of queues is limited to what we can keep stats on */
1835 maxq = (sc->hw.mac.type == ixgbe_mac_82598EB) ? 8 : 16;
1836
1837 sc->sc_intrmap = intrmap_create(&sc->dev, nmsix, maxq, 0);
1838 sc->num_queues = intrmap_count(sc->sc_intrmap);
1839 }
1840
1841 int
ixgbe_allocate_pci_resources(struct ix_softc * sc)1842 ixgbe_allocate_pci_resources(struct ix_softc *sc)
1843 {
1844 struct ixgbe_osdep *os = &sc->osdep;
1845 struct pci_attach_args *pa = &os->os_pa;
1846 int val;
1847
1848 val = pci_conf_read(pa->pa_pc, pa->pa_tag, PCIR_BAR(0));
1849 if (PCI_MAPREG_TYPE(val) != PCI_MAPREG_TYPE_MEM) {
1850 printf(": mmba is not mem space\n");
1851 return (ENXIO);
1852 }
1853
1854 if (pci_mapreg_map(pa, PCIR_BAR(0), PCI_MAPREG_MEM_TYPE(val), 0,
1855 &os->os_memt, &os->os_memh, &os->os_membase, &os->os_memsize, 0)) {
1856 printf(": cannot find mem space\n");
1857 return (ENXIO);
1858 }
1859 sc->hw.hw_addr = (uint8_t *)os->os_membase;
1860
1861 /* Legacy defaults */
1862 sc->num_queues = 1;
1863 sc->hw.back = os;
1864
1865 /* Now setup MSI or MSI/X, return us the number of supported vectors. */
1866 ixgbe_setup_msix(sc);
1867
1868 return (0);
1869 }
1870
1871 void
ixgbe_free_pci_resources(struct ix_softc * sc)1872 ixgbe_free_pci_resources(struct ix_softc * sc)
1873 {
1874 struct ixgbe_osdep *os = &sc->osdep;
1875 struct pci_attach_args *pa = &os->os_pa;
1876 struct ix_queue *que = sc->queues;
1877 int i;
1878
1879 /* Release all msix queue resources: */
1880 for (i = 0; i < sc->num_queues; i++, que++) {
1881 if (que->tag)
1882 pci_intr_disestablish(pa->pa_pc, que->tag);
1883 que->tag = NULL;
1884 }
1885
1886 if (sc->tag)
1887 pci_intr_disestablish(pa->pa_pc, sc->tag);
1888 sc->tag = NULL;
1889 if (os->os_membase != 0)
1890 bus_space_unmap(os->os_memt, os->os_memh, os->os_memsize);
1891 os->os_membase = 0;
1892 }
1893
1894 /*********************************************************************
1895 *
1896 * Setup networking device structure and register an interface.
1897 *
1898 **********************************************************************/
1899 void
ixgbe_setup_interface(struct ix_softc * sc)1900 ixgbe_setup_interface(struct ix_softc *sc)
1901 {
1902 struct ifnet *ifp = &sc->arpcom.ac_if;
1903 int i;
1904
1905 strlcpy(ifp->if_xname, sc->dev.dv_xname, IFNAMSIZ);
1906 ifp->if_softc = sc;
1907 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1908 ifp->if_xflags = IFXF_MPSAFE;
1909 ifp->if_ioctl = ixgbe_ioctl;
1910 ifp->if_qstart = ixgbe_start;
1911 ifp->if_timer = 0;
1912 ifp->if_watchdog = ixgbe_watchdog;
1913 ifp->if_hardmtu = IXGBE_MAX_FRAME_SIZE -
1914 ETHER_HDR_LEN - ETHER_CRC_LEN;
1915 ifq_init_maxlen(&ifp->if_snd, sc->num_tx_desc - 1);
1916
1917 ifp->if_capabilities = IFCAP_VLAN_MTU;
1918
1919 #if NVLAN > 0
1920 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1921 #endif
1922
1923 ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
1924 ifp->if_capabilities |= IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6;
1925 ifp->if_capabilities |= IFCAP_CSUM_IPv4;
1926
1927 ifp->if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6;
1928 if (sc->hw.mac.type != ixgbe_mac_82598EB) {
1929 #ifndef __sparc64__
1930 ifp->if_xflags |= IFXF_LRO;
1931 #endif
1932 ifp->if_capabilities |= IFCAP_LRO;
1933 }
1934
1935 /*
1936 * Specify the media types supported by this sc and register
1937 * callbacks to update media and link information
1938 */
1939 ifmedia_init(&sc->media, IFM_IMASK, ixgbe_media_change,
1940 ixgbe_media_status);
1941 ixgbe_add_media_types(sc);
1942 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1943
1944 if_attach(ifp);
1945 ether_ifattach(ifp);
1946
1947 if_attach_queues(ifp, sc->num_queues);
1948 if_attach_iqueues(ifp, sc->num_queues);
1949 for (i = 0; i < sc->num_queues; i++) {
1950 struct ifqueue *ifq = ifp->if_ifqs[i];
1951 struct ifiqueue *ifiq = ifp->if_iqs[i];
1952 struct ix_txring *txr = &sc->tx_rings[i];
1953 struct ix_rxring *rxr = &sc->rx_rings[i];
1954
1955 ifq->ifq_softc = txr;
1956 txr->ifq = ifq;
1957
1958 ifiq->ifiq_softc = rxr;
1959 rxr->ifiq = ifiq;
1960
1961 #if NKSTAT > 0
1962 ix_txq_kstats(sc, txr);
1963 ix_rxq_kstats(sc, rxr);
1964 #endif
1965 }
1966
1967 sc->max_frame_size = IXGBE_MAX_FRAME_SIZE;
1968 }
1969
1970 void
ixgbe_add_media_types(struct ix_softc * sc)1971 ixgbe_add_media_types(struct ix_softc *sc)
1972 {
1973 struct ixgbe_hw *hw = &sc->hw;
1974 uint64_t layer;
1975
1976 sc->phy_layer = hw->mac.ops.get_supported_physical_layer(hw);
1977 layer = sc->phy_layer;
1978
1979 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1980 ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1981 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1982 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1983 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1984 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1985 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1986 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1987 ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SFP_CU, 0, NULL);
1988 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1989 ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1990 if (hw->phy.multispeed_fiber)
1991 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_LX, 0,
1992 NULL);
1993 }
1994 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1995 ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1996 if (hw->phy.multispeed_fiber)
1997 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX, 0,
1998 NULL);
1999 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2000 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2001 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2002 ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2003 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2004 ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
2005 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
2006 ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
2007 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2008 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
2009 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
2010 ifmedia_add(&sc->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
2011
2012 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2013 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0,
2014 NULL);
2015 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2016 }
2017
2018 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2019 }
2020
2021 void
ixgbe_config_link(struct ix_softc * sc)2022 ixgbe_config_link(struct ix_softc *sc)
2023 {
2024 uint32_t autoneg, err = 0;
2025 bool negotiate;
2026
2027 if (ixgbe_is_sfp(&sc->hw)) {
2028 if (sc->hw.phy.multispeed_fiber) {
2029 sc->hw.mac.ops.setup_sfp(&sc->hw);
2030 if (sc->hw.mac.ops.enable_tx_laser)
2031 sc->hw.mac.ops.enable_tx_laser(&sc->hw);
2032 ixgbe_handle_msf(sc);
2033 } else
2034 ixgbe_handle_mod(sc);
2035 } else {
2036 if (sc->hw.mac.ops.check_link)
2037 err = sc->hw.mac.ops.check_link(&sc->hw, &autoneg,
2038 &sc->link_up, FALSE);
2039 if (err)
2040 return;
2041 autoneg = sc->hw.phy.autoneg_advertised;
2042 if ((!autoneg) && (sc->hw.mac.ops.get_link_capabilities))
2043 err = sc->hw.mac.ops.get_link_capabilities(&sc->hw,
2044 &autoneg, &negotiate);
2045 if (err)
2046 return;
2047 if (sc->hw.mac.ops.setup_link)
2048 sc->hw.mac.ops.setup_link(&sc->hw,
2049 autoneg, sc->link_up);
2050 }
2051 }
2052
2053 /********************************************************************
2054 * Manage DMA'able memory.
2055 *******************************************************************/
2056 int
ixgbe_dma_malloc(struct ix_softc * sc,bus_size_t size,struct ixgbe_dma_alloc * dma,int mapflags)2057 ixgbe_dma_malloc(struct ix_softc *sc, bus_size_t size,
2058 struct ixgbe_dma_alloc *dma, int mapflags)
2059 {
2060 struct ifnet *ifp = &sc->arpcom.ac_if;
2061 struct ixgbe_osdep *os = &sc->osdep;
2062 int r;
2063
2064 dma->dma_tag = os->os_pa.pa_dmat;
2065 r = bus_dmamap_create(dma->dma_tag, size, 1,
2066 size, 0, BUS_DMA_NOWAIT, &dma->dma_map);
2067 if (r != 0) {
2068 printf("%s: ixgbe_dma_malloc: bus_dmamap_create failed; "
2069 "error %u\n", ifp->if_xname, r);
2070 goto fail_0;
2071 }
2072
2073 r = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,
2074 1, &dma->dma_nseg, BUS_DMA_NOWAIT);
2075 if (r != 0) {
2076 printf("%s: ixgbe_dma_malloc: bus_dmamem_alloc failed; "
2077 "error %u\n", ifp->if_xname, r);
2078 goto fail_1;
2079 }
2080
2081 r = bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
2082 &dma->dma_vaddr, BUS_DMA_NOWAIT);
2083 if (r != 0) {
2084 printf("%s: ixgbe_dma_malloc: bus_dmamem_map failed; "
2085 "error %u\n", ifp->if_xname, r);
2086 goto fail_2;
2087 }
2088
2089 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2090 size, NULL, mapflags | BUS_DMA_NOWAIT);
2091 if (r != 0) {
2092 printf("%s: ixgbe_dma_malloc: bus_dmamap_load failed; "
2093 "error %u\n", ifp->if_xname, r);
2094 goto fail_3;
2095 }
2096
2097 dma->dma_size = size;
2098 return (0);
2099 fail_3:
2100 bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
2101 fail_2:
2102 bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
2103 fail_1:
2104 bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
2105 fail_0:
2106 dma->dma_map = NULL;
2107 dma->dma_tag = NULL;
2108 return (r);
2109 }
2110
2111 void
ixgbe_dma_free(struct ix_softc * sc,struct ixgbe_dma_alloc * dma)2112 ixgbe_dma_free(struct ix_softc *sc, struct ixgbe_dma_alloc *dma)
2113 {
2114 if (dma->dma_tag == NULL)
2115 return;
2116
2117 if (dma->dma_map != NULL) {
2118 bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,
2119 dma->dma_map->dm_mapsize,
2120 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2121 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2122 bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
2123 bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
2124 bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
2125 dma->dma_map = NULL;
2126 }
2127 }
2128
2129
2130 /*********************************************************************
2131 *
2132 * Allocate memory for the transmit and receive rings, and then
2133 * the descriptors associated with each, called only once at attach.
2134 *
2135 **********************************************************************/
2136 int
ixgbe_allocate_queues(struct ix_softc * sc)2137 ixgbe_allocate_queues(struct ix_softc *sc)
2138 {
2139 struct ifnet *ifp = &sc->arpcom.ac_if;
2140 struct ix_queue *que;
2141 struct ix_txring *txr;
2142 struct ix_rxring *rxr;
2143 int rsize, tsize;
2144 int txconf = 0, rxconf = 0, i;
2145
2146 /* First allocate the top level queue structs */
2147 if (!(sc->queues = mallocarray(sc->num_queues,
2148 sizeof(struct ix_queue), M_DEVBUF, M_NOWAIT | M_ZERO))) {
2149 printf("%s: Unable to allocate queue memory\n", ifp->if_xname);
2150 goto fail;
2151 }
2152
2153 /* Then allocate the TX ring struct memory */
2154 if (!(sc->tx_rings = mallocarray(sc->num_queues,
2155 sizeof(struct ix_txring), M_DEVBUF, M_NOWAIT | M_ZERO))) {
2156 printf("%s: Unable to allocate TX ring memory\n", ifp->if_xname);
2157 goto fail;
2158 }
2159
2160 /* Next allocate the RX */
2161 if (!(sc->rx_rings = mallocarray(sc->num_queues,
2162 sizeof(struct ix_rxring), M_DEVBUF, M_NOWAIT | M_ZERO))) {
2163 printf("%s: Unable to allocate RX ring memory\n", ifp->if_xname);
2164 goto rx_fail;
2165 }
2166
2167 /* For the ring itself */
2168 tsize = roundup2(sc->num_tx_desc *
2169 sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
2170
2171 /*
2172 * Now set up the TX queues, txconf is needed to handle the
2173 * possibility that things fail midcourse and we need to
2174 * undo memory gracefully
2175 */
2176 for (i = 0; i < sc->num_queues; i++, txconf++) {
2177 /* Set up some basics */
2178 txr = &sc->tx_rings[i];
2179 txr->sc = sc;
2180 txr->me = i;
2181
2182 if (ixgbe_dma_malloc(sc, tsize,
2183 &txr->txdma, BUS_DMA_NOWAIT)) {
2184 printf("%s: Unable to allocate TX Descriptor memory\n",
2185 ifp->if_xname);
2186 goto err_tx_desc;
2187 }
2188 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2189 bzero((void *)txr->tx_base, tsize);
2190 }
2191
2192 /*
2193 * Next the RX queues...
2194 */
2195 rsize = roundup2(sc->num_rx_desc *
2196 sizeof(union ixgbe_adv_rx_desc), 4096);
2197 for (i = 0; i < sc->num_queues; i++, rxconf++) {
2198 rxr = &sc->rx_rings[i];
2199 /* Set up some basics */
2200 rxr->sc = sc;
2201 rxr->me = i;
2202 timeout_set(&rxr->rx_refill, ixgbe_rxrefill, rxr);
2203
2204 if (ixgbe_dma_malloc(sc, rsize,
2205 &rxr->rxdma, BUS_DMA_NOWAIT)) {
2206 printf("%s: Unable to allocate RxDescriptor memory\n",
2207 ifp->if_xname);
2208 goto err_rx_desc;
2209 }
2210 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2211 bzero((void *)rxr->rx_base, rsize);
2212 }
2213
2214 /*
2215 * Finally set up the queue holding structs
2216 */
2217 for (i = 0; i < sc->num_queues; i++) {
2218 que = &sc->queues[i];
2219 que->sc = sc;
2220 que->txr = &sc->tx_rings[i];
2221 que->rxr = &sc->rx_rings[i];
2222 snprintf(que->name, sizeof(que->name), "%s:%d",
2223 sc->dev.dv_xname, i);
2224 }
2225
2226 return (0);
2227
2228 err_rx_desc:
2229 for (rxr = sc->rx_rings; rxconf > 0; rxr++, rxconf--)
2230 ixgbe_dma_free(sc, &rxr->rxdma);
2231 err_tx_desc:
2232 for (txr = sc->tx_rings; txconf > 0; txr++, txconf--)
2233 ixgbe_dma_free(sc, &txr->txdma);
2234 free(sc->rx_rings, M_DEVBUF, sc->num_queues * sizeof(struct ix_rxring));
2235 sc->rx_rings = NULL;
2236 rx_fail:
2237 free(sc->tx_rings, M_DEVBUF, sc->num_queues * sizeof(struct ix_txring));
2238 sc->tx_rings = NULL;
2239 fail:
2240 return (ENOMEM);
2241 }
2242
2243 /*********************************************************************
2244 *
2245 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2246 * the information needed to transmit a packet on the wire. This is
2247 * called only once at attach, setup is done every reset.
2248 *
2249 **********************************************************************/
2250 int
ixgbe_allocate_transmit_buffers(struct ix_txring * txr)2251 ixgbe_allocate_transmit_buffers(struct ix_txring *txr)
2252 {
2253 struct ix_softc *sc = txr->sc;
2254 struct ifnet *ifp = &sc->arpcom.ac_if;
2255 struct ixgbe_tx_buf *txbuf;
2256 int error, i;
2257
2258 if (!(txr->tx_buffers = mallocarray(sc->num_tx_desc,
2259 sizeof(struct ixgbe_tx_buf), M_DEVBUF, M_NOWAIT | M_ZERO))) {
2260 printf("%s: Unable to allocate tx_buffer memory\n",
2261 ifp->if_xname);
2262 error = ENOMEM;
2263 goto fail;
2264 }
2265 txr->txtag = txr->txdma.dma_tag;
2266
2267 /* Create the descriptor buffer dma maps */
2268 for (i = 0; i < sc->num_tx_desc; i++) {
2269 txbuf = &txr->tx_buffers[i];
2270 error = bus_dmamap_create(txr->txdma.dma_tag, MAXMCLBYTES,
2271 sc->num_segs, PAGE_SIZE, 0,
2272 BUS_DMA_NOWAIT, &txbuf->map);
2273
2274 if (error != 0) {
2275 printf("%s: Unable to create TX DMA map\n",
2276 ifp->if_xname);
2277 goto fail;
2278 }
2279 }
2280
2281 return 0;
2282 fail:
2283 return (error);
2284 }
2285
2286 /*********************************************************************
2287 *
2288 * Initialize a transmit ring.
2289 *
2290 **********************************************************************/
2291 int
ixgbe_setup_transmit_ring(struct ix_txring * txr)2292 ixgbe_setup_transmit_ring(struct ix_txring *txr)
2293 {
2294 struct ix_softc *sc = txr->sc;
2295 int error;
2296
2297 /* Now allocate transmit buffers for the ring */
2298 if ((error = ixgbe_allocate_transmit_buffers(txr)) != 0)
2299 return (error);
2300
2301 /* Clear the old ring contents */
2302 bzero((void *)txr->tx_base,
2303 (sizeof(union ixgbe_adv_tx_desc)) * sc->num_tx_desc);
2304
2305 /* Reset indices */
2306 txr->next_avail_desc = 0;
2307 txr->next_to_clean = 0;
2308
2309 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2310 0, txr->txdma.dma_map->dm_mapsize,
2311 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2312
2313 return (0);
2314 }
2315
2316 /*********************************************************************
2317 *
2318 * Initialize all transmit rings.
2319 *
2320 **********************************************************************/
2321 int
ixgbe_setup_transmit_structures(struct ix_softc * sc)2322 ixgbe_setup_transmit_structures(struct ix_softc *sc)
2323 {
2324 struct ix_txring *txr = sc->tx_rings;
2325 int i, error;
2326
2327 for (i = 0; i < sc->num_queues; i++, txr++) {
2328 if ((error = ixgbe_setup_transmit_ring(txr)) != 0)
2329 goto fail;
2330 }
2331
2332 return (0);
2333 fail:
2334 ixgbe_free_transmit_structures(sc);
2335 return (error);
2336 }
2337
2338 /*********************************************************************
2339 *
2340 * Enable transmit unit.
2341 *
2342 **********************************************************************/
2343 void
ixgbe_initialize_transmit_units(struct ix_softc * sc)2344 ixgbe_initialize_transmit_units(struct ix_softc *sc)
2345 {
2346 struct ifnet *ifp = &sc->arpcom.ac_if;
2347 struct ix_txring *txr;
2348 struct ixgbe_hw *hw = &sc->hw;
2349 int i;
2350 uint64_t tdba;
2351 uint32_t txctrl;
2352 uint32_t hlreg;
2353
2354 /* Setup the Base and Length of the Tx Descriptor Ring */
2355
2356 for (i = 0; i < sc->num_queues; i++) {
2357 txr = &sc->tx_rings[i];
2358
2359 /* Setup descriptor base address */
2360 tdba = txr->txdma.dma_map->dm_segs[0].ds_addr;
2361 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
2362 (tdba & 0x00000000ffffffffULL));
2363 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
2364 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
2365 sc->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
2366
2367 /* Set Tx Tail register */
2368 txr->tail = IXGBE_TDT(i);
2369
2370 /* Setup the HW Tx Head and Tail descriptor pointers */
2371 IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
2372 IXGBE_WRITE_REG(hw, txr->tail, 0);
2373
2374 /* Setup Transmit Descriptor Cmd Settings */
2375 txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2376 txr->queue_status = IXGBE_QUEUE_IDLE;
2377 txr->watchdog_timer = 0;
2378
2379 /* Disable Head Writeback */
2380 switch (hw->mac.type) {
2381 case ixgbe_mac_82598EB:
2382 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
2383 break;
2384 case ixgbe_mac_82599EB:
2385 case ixgbe_mac_X540:
2386 default:
2387 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
2388 break;
2389 }
2390 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2391 switch (hw->mac.type) {
2392 case ixgbe_mac_82598EB:
2393 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
2394 break;
2395 case ixgbe_mac_82599EB:
2396 case ixgbe_mac_X540:
2397 default:
2398 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
2399 break;
2400 }
2401 }
2402 ifp->if_timer = 0;
2403
2404 if (hw->mac.type != ixgbe_mac_82598EB) {
2405 uint32_t dmatxctl, rttdcs;
2406 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2407 dmatxctl |= IXGBE_DMATXCTL_TE;
2408 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2409 /* Disable arbiter to set MTQC */
2410 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2411 rttdcs |= IXGBE_RTTDCS_ARBDIS;
2412 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2413 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2414 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2415 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2416 }
2417
2418 /* Enable TCP/UDP padding when using TSO */
2419 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2420 hlreg |= IXGBE_HLREG0_TXPADEN;
2421 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
2422 }
2423
2424 /*********************************************************************
2425 *
2426 * Free all transmit rings.
2427 *
2428 **********************************************************************/
2429 void
ixgbe_free_transmit_structures(struct ix_softc * sc)2430 ixgbe_free_transmit_structures(struct ix_softc *sc)
2431 {
2432 struct ix_txring *txr = sc->tx_rings;
2433 int i;
2434
2435 for (i = 0; i < sc->num_queues; i++, txr++)
2436 ixgbe_free_transmit_buffers(txr);
2437 }
2438
2439 /*********************************************************************
2440 *
2441 * Free transmit ring related data structures.
2442 *
2443 **********************************************************************/
2444 void
ixgbe_free_transmit_buffers(struct ix_txring * txr)2445 ixgbe_free_transmit_buffers(struct ix_txring *txr)
2446 {
2447 struct ix_softc *sc = txr->sc;
2448 struct ixgbe_tx_buf *tx_buffer;
2449 int i;
2450
2451 INIT_DEBUGOUT("free_transmit_ring: begin");
2452
2453 if (txr->tx_buffers == NULL)
2454 return;
2455
2456 tx_buffer = txr->tx_buffers;
2457 for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
2458 if (tx_buffer->map != NULL && tx_buffer->map->dm_nsegs > 0) {
2459 bus_dmamap_sync(txr->txdma.dma_tag, tx_buffer->map,
2460 0, tx_buffer->map->dm_mapsize,
2461 BUS_DMASYNC_POSTWRITE);
2462 bus_dmamap_unload(txr->txdma.dma_tag,
2463 tx_buffer->map);
2464 }
2465 if (tx_buffer->m_head != NULL) {
2466 m_freem(tx_buffer->m_head);
2467 tx_buffer->m_head = NULL;
2468 }
2469 if (tx_buffer->map != NULL) {
2470 bus_dmamap_destroy(txr->txdma.dma_tag,
2471 tx_buffer->map);
2472 tx_buffer->map = NULL;
2473 }
2474 }
2475
2476 if (txr->tx_buffers != NULL)
2477 free(txr->tx_buffers, M_DEVBUF,
2478 sc->num_tx_desc * sizeof(struct ixgbe_tx_buf));
2479 txr->tx_buffers = NULL;
2480 txr->txtag = NULL;
2481 }
2482
2483 /*********************************************************************
2484 *
2485 * Advanced Context Descriptor setup for VLAN or CSUM
2486 *
2487 **********************************************************************/
2488
2489 static inline int
ixgbe_tx_offload(struct mbuf * mp,uint32_t * vlan_macip_lens,uint32_t * type_tucmd_mlhl,uint32_t * olinfo_status,uint32_t * cmd_type_len,uint32_t * mss_l4len_idx)2490 ixgbe_tx_offload(struct mbuf *mp, uint32_t *vlan_macip_lens,
2491 uint32_t *type_tucmd_mlhl, uint32_t *olinfo_status, uint32_t *cmd_type_len,
2492 uint32_t *mss_l4len_idx)
2493 {
2494 struct ether_extracted ext;
2495 int offload = 0;
2496
2497 ether_extract_headers(mp, &ext);
2498
2499 *vlan_macip_lens |= (sizeof(*ext.eh) << IXGBE_ADVTXD_MACLEN_SHIFT);
2500
2501 if (ext.ip4) {
2502 if (ISSET(mp->m_pkthdr.csum_flags, M_IPV4_CSUM_OUT)) {
2503 *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
2504 offload = 1;
2505 }
2506
2507 *type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2508 #ifdef INET6
2509 } else if (ext.ip6) {
2510 *type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
2511 #endif
2512 } else {
2513 if (mp->m_pkthdr.csum_flags & M_TCP_TSO)
2514 tcpstat_inc(tcps_outbadtso);
2515 return offload;
2516 }
2517
2518 *vlan_macip_lens |= ext.iphlen;
2519
2520 if (ext.tcp) {
2521 *type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2522 if (ISSET(mp->m_pkthdr.csum_flags, M_TCP_CSUM_OUT)) {
2523 *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
2524 offload = 1;
2525 }
2526 } else if (ext.udp) {
2527 *type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
2528 if (ISSET(mp->m_pkthdr.csum_flags, M_UDP_CSUM_OUT)) {
2529 *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
2530 offload = 1;
2531 }
2532 }
2533
2534 if (mp->m_pkthdr.csum_flags & M_TCP_TSO) {
2535 if (ext.tcp && mp->m_pkthdr.ph_mss > 0) {
2536 uint32_t hdrlen, thlen, paylen, outlen;
2537
2538 thlen = ext.tcphlen;
2539
2540 outlen = mp->m_pkthdr.ph_mss;
2541 *mss_l4len_idx |= outlen << IXGBE_ADVTXD_MSS_SHIFT;
2542 *mss_l4len_idx |= thlen << IXGBE_ADVTXD_L4LEN_SHIFT;
2543
2544 hdrlen = sizeof(*ext.eh) + ext.iphlen + thlen;
2545 paylen = mp->m_pkthdr.len - hdrlen;
2546 CLR(*olinfo_status, IXGBE_ADVTXD_PAYLEN_MASK
2547 << IXGBE_ADVTXD_PAYLEN_SHIFT);
2548 *olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
2549
2550 *cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
2551 offload = 1;
2552
2553 tcpstat_add(tcps_outpkttso,
2554 (paylen + outlen - 1) / outlen);
2555 } else
2556 tcpstat_inc(tcps_outbadtso);
2557 }
2558
2559 return offload;
2560 }
2561
2562 static int
ixgbe_tx_ctx_setup(struct ix_txring * txr,struct mbuf * mp,uint32_t * cmd_type_len,uint32_t * olinfo_status)2563 ixgbe_tx_ctx_setup(struct ix_txring *txr, struct mbuf *mp,
2564 uint32_t *cmd_type_len, uint32_t *olinfo_status)
2565 {
2566 struct ixgbe_adv_tx_context_desc *TXD;
2567 struct ixgbe_tx_buf *tx_buffer;
2568 uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2569 uint32_t mss_l4len_idx = 0;
2570 int ctxd = txr->next_avail_desc;
2571 int offload = 0;
2572
2573 /* Indicate the whole packet as payload when not doing TSO */
2574 *olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT;
2575
2576 #if NVLAN > 0
2577 if (ISSET(mp->m_flags, M_VLANTAG)) {
2578 uint32_t vtag = mp->m_pkthdr.ether_vtag;
2579 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2580 *cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
2581 offload |= 1;
2582 }
2583 #endif
2584
2585 offload |= ixgbe_tx_offload(mp, &vlan_macip_lens, &type_tucmd_mlhl,
2586 olinfo_status, cmd_type_len, &mss_l4len_idx);
2587
2588 if (!offload)
2589 return (0);
2590
2591 TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
2592 tx_buffer = &txr->tx_buffers[ctxd];
2593
2594 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2595
2596 /* Now copy bits into descriptor */
2597 TXD->vlan_macip_lens = htole32(vlan_macip_lens);
2598 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
2599 TXD->seqnum_seed = htole32(0);
2600 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
2601
2602 tx_buffer->m_head = NULL;
2603 tx_buffer->eop_index = -1;
2604
2605 return (1);
2606 }
2607
2608 /**********************************************************************
2609 *
2610 * Examine each tx_buffer in the used queue. If the hardware is done
2611 * processing the packet then free associated resources. The
2612 * tx_buffer is put back on the free queue.
2613 *
2614 **********************************************************************/
2615 int
ixgbe_txeof(struct ix_txring * txr)2616 ixgbe_txeof(struct ix_txring *txr)
2617 {
2618 struct ix_softc *sc = txr->sc;
2619 struct ifqueue *ifq = txr->ifq;
2620 struct ifnet *ifp = &sc->arpcom.ac_if;
2621 unsigned int head, tail, last;
2622 struct ixgbe_tx_buf *tx_buffer;
2623 struct ixgbe_legacy_tx_desc *tx_desc;
2624
2625 if (!ISSET(ifp->if_flags, IFF_RUNNING))
2626 return FALSE;
2627
2628 head = txr->next_avail_desc;
2629 tail = txr->next_to_clean;
2630
2631 membar_consumer();
2632
2633 if (head == tail)
2634 return (FALSE);
2635
2636 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2637 0, txr->txdma.dma_map->dm_mapsize,
2638 BUS_DMASYNC_POSTREAD);
2639
2640 for (;;) {
2641 tx_buffer = &txr->tx_buffers[tail];
2642 last = tx_buffer->eop_index;
2643 tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2644
2645 if (!ISSET(tx_desc->upper.fields.status, IXGBE_TXD_STAT_DD))
2646 break;
2647
2648 bus_dmamap_sync(txr->txdma.dma_tag, tx_buffer->map,
2649 0, tx_buffer->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2650 bus_dmamap_unload(txr->txdma.dma_tag, tx_buffer->map);
2651 m_freem(tx_buffer->m_head);
2652
2653 tx_buffer->m_head = NULL;
2654 tx_buffer->eop_index = -1;
2655
2656 tail = last + 1;
2657 if (tail == sc->num_tx_desc)
2658 tail = 0;
2659 if (head == tail) {
2660 /* All clean, turn off the timer */
2661 ifp->if_timer = 0;
2662 break;
2663 }
2664 }
2665
2666 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2667 0, txr->txdma.dma_map->dm_mapsize,
2668 BUS_DMASYNC_PREREAD);
2669
2670 membar_producer();
2671
2672 txr->next_to_clean = tail;
2673
2674 if (ifq_is_oactive(ifq))
2675 ifq_restart(ifq);
2676
2677 return TRUE;
2678 }
2679
2680 /*********************************************************************
2681 *
2682 * Get a buffer from system mbuf buffer pool.
2683 *
2684 **********************************************************************/
2685 int
ixgbe_get_buf(struct ix_rxring * rxr,int i)2686 ixgbe_get_buf(struct ix_rxring *rxr, int i)
2687 {
2688 struct ix_softc *sc = rxr->sc;
2689 struct ixgbe_rx_buf *rxbuf;
2690 struct mbuf *mp;
2691 int error;
2692 union ixgbe_adv_rx_desc *rxdesc;
2693
2694 rxbuf = &rxr->rx_buffers[i];
2695 rxdesc = &rxr->rx_base[i];
2696 if (rxbuf->buf) {
2697 printf("%s: ixgbe_get_buf: slot %d already has an mbuf\n",
2698 sc->dev.dv_xname, i);
2699 return (ENOBUFS);
2700 }
2701
2702 /* needed in any case so preallocate since this one will fail for sure */
2703 mp = MCLGETL(NULL, M_DONTWAIT, sc->rx_mbuf_sz);
2704 if (!mp)
2705 return (ENOBUFS);
2706
2707 mp->m_data += (mp->m_ext.ext_size - sc->rx_mbuf_sz);
2708 mp->m_len = mp->m_pkthdr.len = sc->rx_mbuf_sz;
2709
2710 error = bus_dmamap_load_mbuf(rxr->rxdma.dma_tag, rxbuf->map,
2711 mp, BUS_DMA_NOWAIT);
2712 if (error) {
2713 m_freem(mp);
2714 return (error);
2715 }
2716
2717 bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map,
2718 0, rxbuf->map->dm_mapsize, BUS_DMASYNC_PREREAD);
2719 rxbuf->buf = mp;
2720
2721 rxdesc->read.pkt_addr = htole64(rxbuf->map->dm_segs[0].ds_addr);
2722
2723 return (0);
2724 }
2725
2726 /*********************************************************************
2727 *
2728 * Allocate memory for rx_buffer structures. Since we use one
2729 * rx_buffer per received packet, the maximum number of rx_buffer's
2730 * that we'll need is equal to the number of receive descriptors
2731 * that we've allocated.
2732 *
2733 **********************************************************************/
2734 int
ixgbe_allocate_receive_buffers(struct ix_rxring * rxr)2735 ixgbe_allocate_receive_buffers(struct ix_rxring *rxr)
2736 {
2737 struct ix_softc *sc = rxr->sc;
2738 struct ifnet *ifp = &sc->arpcom.ac_if;
2739 struct ixgbe_rx_buf *rxbuf;
2740 int i, error;
2741
2742 if (!(rxr->rx_buffers = mallocarray(sc->num_rx_desc,
2743 sizeof(struct ixgbe_rx_buf), M_DEVBUF, M_NOWAIT | M_ZERO))) {
2744 printf("%s: Unable to allocate rx_buffer memory\n",
2745 ifp->if_xname);
2746 error = ENOMEM;
2747 goto fail;
2748 }
2749
2750 rxbuf = rxr->rx_buffers;
2751 for (i = 0; i < sc->num_rx_desc; i++, rxbuf++) {
2752 error = bus_dmamap_create(rxr->rxdma.dma_tag, 16 * 1024, 1,
2753 16 * 1024, 0, BUS_DMA_NOWAIT, &rxbuf->map);
2754 if (error) {
2755 printf("%s: Unable to create Pack DMA map\n",
2756 ifp->if_xname);
2757 goto fail;
2758 }
2759 }
2760 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,
2761 rxr->rxdma.dma_map->dm_mapsize,
2762 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2763
2764 return (0);
2765
2766 fail:
2767 return (error);
2768 }
2769
2770 /*********************************************************************
2771 *
2772 * Initialize a receive ring and its buffers.
2773 *
2774 **********************************************************************/
2775 int
ixgbe_setup_receive_ring(struct ix_rxring * rxr)2776 ixgbe_setup_receive_ring(struct ix_rxring *rxr)
2777 {
2778 struct ix_softc *sc = rxr->sc;
2779 struct ifnet *ifp = &sc->arpcom.ac_if;
2780 int rsize, error;
2781
2782 rsize = roundup2(sc->num_rx_desc *
2783 sizeof(union ixgbe_adv_rx_desc), 4096);
2784 /* Clear the ring contents */
2785 bzero((void *)rxr->rx_base, rsize);
2786
2787 if ((error = ixgbe_allocate_receive_buffers(rxr)) != 0)
2788 return (error);
2789
2790 /* Setup our descriptor indices */
2791 rxr->next_to_check = 0;
2792 rxr->last_desc_filled = sc->num_rx_desc - 1;
2793
2794 if_rxr_init(&rxr->rx_ring, 2 * ((ifp->if_hardmtu / MCLBYTES) + 1),
2795 sc->num_rx_desc - 1);
2796
2797 ixgbe_rxfill(rxr);
2798 if (if_rxr_inuse(&rxr->rx_ring) == 0) {
2799 printf("%s: unable to fill any rx descriptors\n",
2800 sc->dev.dv_xname);
2801 return (ENOBUFS);
2802 }
2803
2804 return (0);
2805 }
2806
2807 int
ixgbe_rxfill(struct ix_rxring * rxr)2808 ixgbe_rxfill(struct ix_rxring *rxr)
2809 {
2810 struct ix_softc *sc = rxr->sc;
2811 int post = 0;
2812 u_int slots;
2813 int i;
2814
2815 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2816 0, rxr->rxdma.dma_map->dm_mapsize,
2817 BUS_DMASYNC_POSTWRITE);
2818
2819 i = rxr->last_desc_filled;
2820 for (slots = if_rxr_get(&rxr->rx_ring, sc->num_rx_desc);
2821 slots > 0; slots--) {
2822 if (++i == sc->num_rx_desc)
2823 i = 0;
2824
2825 if (ixgbe_get_buf(rxr, i) != 0)
2826 break;
2827
2828 rxr->last_desc_filled = i;
2829 post = 1;
2830 }
2831
2832 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2833 0, rxr->rxdma.dma_map->dm_mapsize,
2834 BUS_DMASYNC_PREWRITE);
2835
2836 if_rxr_put(&rxr->rx_ring, slots);
2837
2838 return (post);
2839 }
2840
2841 void
ixgbe_rxrefill(void * xrxr)2842 ixgbe_rxrefill(void *xrxr)
2843 {
2844 struct ix_rxring *rxr = xrxr;
2845 struct ix_softc *sc = rxr->sc;
2846
2847 if (ixgbe_rxfill(rxr)) {
2848 /* Advance the Rx Queue "Tail Pointer" */
2849 IXGBE_WRITE_REG(&sc->hw, rxr->tail, rxr->last_desc_filled);
2850 } else if (if_rxr_inuse(&rxr->rx_ring) == 0)
2851 timeout_add(&rxr->rx_refill, 1);
2852
2853 }
2854
2855 /*********************************************************************
2856 *
2857 * Initialize all receive rings.
2858 *
2859 **********************************************************************/
2860 int
ixgbe_setup_receive_structures(struct ix_softc * sc)2861 ixgbe_setup_receive_structures(struct ix_softc *sc)
2862 {
2863 struct ix_rxring *rxr = sc->rx_rings;
2864 int i;
2865
2866 for (i = 0; i < sc->num_queues; i++, rxr++)
2867 if (ixgbe_setup_receive_ring(rxr))
2868 goto fail;
2869
2870 return (0);
2871 fail:
2872 ixgbe_free_receive_structures(sc);
2873 return (ENOBUFS);
2874 }
2875
2876 /*********************************************************************
2877 *
2878 * Setup receive registers and features.
2879 *
2880 **********************************************************************/
2881 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2882
2883 void
ixgbe_initialize_receive_units(struct ix_softc * sc)2884 ixgbe_initialize_receive_units(struct ix_softc *sc)
2885 {
2886 struct ifnet *ifp = &sc->arpcom.ac_if;
2887 struct ix_rxring *rxr = sc->rx_rings;
2888 struct ixgbe_hw *hw = &sc->hw;
2889 uint32_t bufsz, fctrl, srrctl, rxcsum, rdrxctl;
2890 uint32_t hlreg;
2891 int i;
2892
2893 /*
2894 * Make sure receives are disabled while
2895 * setting up the descriptor ring
2896 */
2897 ixgbe_disable_rx(hw);
2898
2899 /* Enable broadcasts */
2900 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2901 fctrl |= IXGBE_FCTRL_BAM;
2902 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
2903 fctrl |= IXGBE_FCTRL_DPF;
2904 fctrl |= IXGBE_FCTRL_PMCF;
2905 }
2906 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2907
2908 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2909 /* Always enable jumbo frame reception */
2910 hlreg |= IXGBE_HLREG0_JUMBOEN;
2911 /* Always enable CRC stripping */
2912 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
2913 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
2914
2915 if (ISSET(ifp->if_xflags, IFXF_LRO)) {
2916 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2917
2918 /* This field has to be set to zero. */
2919 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2920
2921 /* RSC Coalescing on ACK Change */
2922 rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
2923 rdrxctl |= IXGBE_RDRXCTL_FCOE_WRFIX;
2924
2925 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
2926 }
2927
2928 bufsz = (sc->rx_mbuf_sz - ETHER_ALIGN) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2929
2930 for (i = 0; i < sc->num_queues; i++, rxr++) {
2931 uint64_t rdba = rxr->rxdma.dma_map->dm_segs[0].ds_addr;
2932
2933 /* Setup the Base and Length of the Rx Descriptor Ring */
2934 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
2935 (rdba & 0x00000000ffffffffULL));
2936 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
2937 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
2938 sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
2939
2940 /* Set up the SRRCTL register */
2941 srrctl = bufsz | IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2942 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
2943
2944 /* Capture Rx Tail index */
2945 rxr->tail = IXGBE_RDT(i);
2946
2947 if (ISSET(ifp->if_xflags, IFXF_LRO)) {
2948 rdrxctl = IXGBE_READ_REG(&sc->hw, IXGBE_RSCCTL(i));
2949
2950 /* Enable Receive Side Coalescing */
2951 rdrxctl |= IXGBE_RSCCTL_RSCEN;
2952 rdrxctl |= IXGBE_RSCCTL_MAXDESC_16;
2953
2954 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(i), rdrxctl);
2955 }
2956
2957 /* Setup the HW Rx Head and Tail Descriptor Pointers */
2958 IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
2959 IXGBE_WRITE_REG(hw, rxr->tail, 0);
2960 }
2961
2962 if (sc->hw.mac.type != ixgbe_mac_82598EB) {
2963 uint32_t psrtype = IXGBE_PSRTYPE_TCPHDR |
2964 IXGBE_PSRTYPE_UDPHDR |
2965 IXGBE_PSRTYPE_IPV4HDR |
2966 IXGBE_PSRTYPE_IPV6HDR;
2967 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
2968 }
2969
2970 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2971 rxcsum &= ~IXGBE_RXCSUM_PCSD;
2972
2973 ixgbe_initialize_rss_mapping(sc);
2974
2975 /* Setup RSS */
2976 if (sc->num_queues > 1) {
2977 /* RSS and RX IPP Checksum are mutually exclusive */
2978 rxcsum |= IXGBE_RXCSUM_PCSD;
2979 }
2980
2981 /* Map QPRC/QPRDC/QPTC on a per queue basis */
2982 ixgbe_map_queue_statistics(sc);
2983
2984 /* This is useful for calculating UDP/IP fragment checksums */
2985 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
2986 rxcsum |= IXGBE_RXCSUM_IPPCSE;
2987
2988 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2989 }
2990
2991 void
ixgbe_initialize_rss_mapping(struct ix_softc * sc)2992 ixgbe_initialize_rss_mapping(struct ix_softc *sc)
2993 {
2994 struct ixgbe_hw *hw = &sc->hw;
2995 uint32_t reta = 0, mrqc, rss_key[10];
2996 int i, j, queue_id, table_size, index_mult;
2997
2998 /* set up random bits */
2999 stoeplitz_to_key(&rss_key, sizeof(rss_key));
3000
3001 /* Set multiplier for RETA setup and table size based on MAC */
3002 index_mult = 0x1;
3003 table_size = 128;
3004 switch (sc->hw.mac.type) {
3005 case ixgbe_mac_82598EB:
3006 index_mult = 0x11;
3007 break;
3008 case ixgbe_mac_X550:
3009 case ixgbe_mac_X550EM_x:
3010 case ixgbe_mac_X550EM_a:
3011 table_size = 512;
3012 break;
3013 default:
3014 break;
3015 }
3016
3017 /* Set up the redirection table */
3018 for (i = 0, j = 0; i < table_size; i++, j++) {
3019 if (j == sc->num_queues) j = 0;
3020 queue_id = (j * index_mult);
3021 /*
3022 * The low 8 bits are for hash value (n+0);
3023 * The next 8 bits are for hash value (n+1), etc.
3024 */
3025 reta = reta >> 8;
3026 reta = reta | ( ((uint32_t) queue_id) << 24);
3027 if ((i & 3) == 3) {
3028 if (i < 128)
3029 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3030 else
3031 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
3032 reta);
3033 reta = 0;
3034 }
3035 }
3036
3037 /* Now fill our hash function seeds */
3038 for (i = 0; i < 10; i++)
3039 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
3040
3041 /*
3042 * Disable UDP - IP fragments aren't currently being handled
3043 * and so we end up with a mix of 2-tuple and 4-tuple
3044 * traffic.
3045 */
3046 mrqc = IXGBE_MRQC_RSSEN
3047 | IXGBE_MRQC_RSS_FIELD_IPV4
3048 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
3049 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
3050 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
3051 | IXGBE_MRQC_RSS_FIELD_IPV6
3052 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
3053 ;
3054 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3055 }
3056
3057 /*********************************************************************
3058 *
3059 * Free all receive rings.
3060 *
3061 **********************************************************************/
3062 void
ixgbe_free_receive_structures(struct ix_softc * sc)3063 ixgbe_free_receive_structures(struct ix_softc *sc)
3064 {
3065 struct ix_rxring *rxr;
3066 int i;
3067
3068 for (i = 0, rxr = sc->rx_rings; i < sc->num_queues; i++, rxr++)
3069 if_rxr_init(&rxr->rx_ring, 0, 0);
3070
3071 for (i = 0, rxr = sc->rx_rings; i < sc->num_queues; i++, rxr++)
3072 ixgbe_free_receive_buffers(rxr);
3073 }
3074
3075 /*********************************************************************
3076 *
3077 * Free receive ring data structures
3078 *
3079 **********************************************************************/
3080 void
ixgbe_free_receive_buffers(struct ix_rxring * rxr)3081 ixgbe_free_receive_buffers(struct ix_rxring *rxr)
3082 {
3083 struct ix_softc *sc;
3084 struct ixgbe_rx_buf *rxbuf;
3085 int i;
3086
3087 sc = rxr->sc;
3088 if (rxr->rx_buffers != NULL) {
3089 for (i = 0; i < sc->num_rx_desc; i++) {
3090 rxbuf = &rxr->rx_buffers[i];
3091 if (rxbuf->buf != NULL) {
3092 bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map,
3093 0, rxbuf->map->dm_mapsize,
3094 BUS_DMASYNC_POSTREAD);
3095 bus_dmamap_unload(rxr->rxdma.dma_tag,
3096 rxbuf->map);
3097 m_freem(rxbuf->buf);
3098 rxbuf->buf = NULL;
3099 }
3100 if (rxbuf->map != NULL) {
3101 bus_dmamap_destroy(rxr->rxdma.dma_tag,
3102 rxbuf->map);
3103 rxbuf->map = NULL;
3104 }
3105 }
3106 free(rxr->rx_buffers, M_DEVBUF,
3107 sc->num_rx_desc * sizeof(struct ixgbe_rx_buf));
3108 rxr->rx_buffers = NULL;
3109 }
3110 }
3111
3112 /*********************************************************************
3113 *
3114 * This routine executes in interrupt context. It replenishes
3115 * the mbufs in the descriptor and sends data which has been
3116 * dma'ed into host memory to upper layer.
3117 *
3118 *********************************************************************/
3119 int
ixgbe_rxeof(struct ix_rxring * rxr)3120 ixgbe_rxeof(struct ix_rxring *rxr)
3121 {
3122 struct ix_softc *sc = rxr->sc;
3123 struct ifnet *ifp = &sc->arpcom.ac_if;
3124 struct mbuf_list ml = MBUF_LIST_INITIALIZER();
3125 struct mbuf *mp, *sendmp;
3126 uint8_t eop = 0;
3127 uint16_t len, vtag;
3128 uint32_t staterr = 0;
3129 struct ixgbe_rx_buf *rxbuf, *nxbuf;
3130 union ixgbe_adv_rx_desc *rxdesc;
3131 size_t dsize = sizeof(union ixgbe_adv_rx_desc);
3132 int i, nextp, rsccnt;
3133
3134 if (!ISSET(ifp->if_flags, IFF_RUNNING))
3135 return FALSE;
3136
3137 i = rxr->next_to_check;
3138 while (if_rxr_inuse(&rxr->rx_ring) > 0) {
3139 uint32_t hash;
3140 uint16_t hashtype;
3141
3142 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3143 dsize * i, dsize, BUS_DMASYNC_POSTREAD);
3144
3145 rxdesc = &rxr->rx_base[i];
3146 staterr = letoh32(rxdesc->wb.upper.status_error);
3147 if (!ISSET(staterr, IXGBE_RXD_STAT_DD)) {
3148 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3149 dsize * i, dsize,
3150 BUS_DMASYNC_PREREAD);
3151 break;
3152 }
3153
3154 /* Zero out the receive descriptors status */
3155 rxdesc->wb.upper.status_error = 0;
3156 rxbuf = &rxr->rx_buffers[i];
3157
3158 /* pull the mbuf off the ring */
3159 bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 0,
3160 rxbuf->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
3161 bus_dmamap_unload(rxr->rxdma.dma_tag, rxbuf->map);
3162
3163 mp = rxbuf->buf;
3164 len = letoh16(rxdesc->wb.upper.length);
3165 vtag = letoh16(rxdesc->wb.upper.vlan);
3166 eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
3167 hash = lemtoh32(&rxdesc->wb.lower.hi_dword.rss);
3168 hashtype =
3169 lemtoh16(&rxdesc->wb.lower.lo_dword.hs_rss.pkt_info) &
3170 IXGBE_RXDADV_RSSTYPE_MASK;
3171 rsccnt = lemtoh32(&rxdesc->wb.lower.lo_dword.data) &
3172 IXGBE_RXDADV_RSCCNT_MASK;
3173 rsccnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
3174
3175 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
3176 if (rxbuf->fmp) {
3177 m_freem(rxbuf->fmp);
3178 } else {
3179 m_freem(mp);
3180 }
3181 rxbuf->fmp = NULL;
3182 rxbuf->buf = NULL;
3183 goto next_desc;
3184 }
3185
3186 if (mp == NULL) {
3187 panic("%s: ixgbe_rxeof: NULL mbuf in slot %d "
3188 "(nrx %d, filled %d)", sc->dev.dv_xname,
3189 i, if_rxr_inuse(&rxr->rx_ring),
3190 rxr->last_desc_filled);
3191 }
3192
3193 if (!eop) {
3194 /*
3195 * Figure out the next descriptor of this frame.
3196 */
3197 if (rsccnt) {
3198 nextp = staterr & IXGBE_RXDADV_NEXTP_MASK;
3199 nextp >>= IXGBE_RXDADV_NEXTP_SHIFT;
3200 } else {
3201 nextp = i + 1;
3202 }
3203 if (nextp == sc->num_rx_desc)
3204 nextp = 0;
3205 nxbuf = &rxr->rx_buffers[nextp];
3206 /* prefetch(nxbuf); */
3207 }
3208
3209 /*
3210 * Rather than using the fmp/lmp global pointers
3211 * we now keep the head of a packet chain in the
3212 * buffer struct and pass this along from one
3213 * descriptor to the next, until we get EOP.
3214 */
3215 mp->m_len = len;
3216 /*
3217 * See if there is a stored head
3218 * that determines what we are
3219 */
3220 sendmp = rxbuf->fmp;
3221 rxbuf->buf = rxbuf->fmp = NULL;
3222
3223 if (sendmp == NULL) {
3224 /* first desc of a non-ps chain */
3225 sendmp = mp;
3226 sendmp->m_pkthdr.len = 0;
3227 sendmp->m_pkthdr.ph_mss = 0;
3228 } else {
3229 mp->m_flags &= ~M_PKTHDR;
3230 }
3231 sendmp->m_pkthdr.len += mp->m_len;
3232 /*
3233 * This function iterates over interleaved descriptors.
3234 * Thus, we reuse ph_mss as global segment counter per
3235 * TCP connection, instead of introducing a new variable
3236 * in m_pkthdr.
3237 */
3238 if (rsccnt)
3239 sendmp->m_pkthdr.ph_mss += rsccnt - 1;
3240
3241 /* Pass the head pointer on */
3242 if (eop == 0) {
3243 nxbuf->fmp = sendmp;
3244 sendmp = NULL;
3245 mp->m_next = nxbuf->buf;
3246 } else { /* Sending this frame? */
3247 ixgbe_rx_offload(staterr, vtag, sendmp);
3248
3249 if (hashtype != IXGBE_RXDADV_RSSTYPE_NONE) {
3250 sendmp->m_pkthdr.ph_flowid = hash;
3251 SET(sendmp->m_pkthdr.csum_flags, M_FLOWID);
3252 }
3253
3254 ml_enqueue(&ml, sendmp);
3255 }
3256 next_desc:
3257 if_rxr_put(&rxr->rx_ring, 1);
3258 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3259 dsize * i, dsize,
3260 BUS_DMASYNC_PREREAD);
3261
3262 /* Advance our pointers to the next descriptor. */
3263 if (++i == sc->num_rx_desc)
3264 i = 0;
3265 }
3266 rxr->next_to_check = i;
3267
3268 if (ifiq_input(rxr->ifiq, &ml))
3269 if_rxr_livelocked(&rxr->rx_ring);
3270
3271 if (!(staterr & IXGBE_RXD_STAT_DD))
3272 return FALSE;
3273
3274 return TRUE;
3275 }
3276
3277 /*********************************************************************
3278 *
3279 * Check VLAN indication from hardware and inform the stack about the
3280 * annotated TAG.
3281 *
3282 * Verify that the hardware indicated that the checksum is valid.
3283 * Inform the stack about the status of checksum so that stack
3284 * doesn't spend time verifying the checksum.
3285 *
3286 * Propagate TCP LRO packet from hardware to the stack with MSS annotation.
3287 *
3288 *********************************************************************/
3289 void
ixgbe_rx_offload(uint32_t staterr,uint16_t vtag,struct mbuf * m)3290 ixgbe_rx_offload(uint32_t staterr, uint16_t vtag, struct mbuf *m)
3291 {
3292 uint16_t status = (uint16_t) staterr;
3293 uint8_t errors = (uint8_t) (staterr >> 24);
3294 int16_t pkts;
3295
3296 /*
3297 * VLAN Offload
3298 */
3299
3300 #if NVLAN > 0
3301 if (ISSET(staterr, IXGBE_RXD_STAT_VP)) {
3302 m->m_pkthdr.ether_vtag = vtag;
3303 SET(m->m_flags, M_VLANTAG);
3304 }
3305 #endif
3306
3307 /*
3308 * Checksum Offload
3309 */
3310
3311 if (ISSET(status, IXGBE_RXD_STAT_IPCS)) {
3312 if (ISSET(errors, IXGBE_RXD_ERR_IPE))
3313 SET(m->m_pkthdr.csum_flags, M_IPV4_CSUM_IN_BAD);
3314 else
3315 SET(m->m_pkthdr.csum_flags, M_IPV4_CSUM_IN_OK);
3316 }
3317 if (ISSET(status, IXGBE_RXD_STAT_L4CS) &&
3318 !ISSET(status, IXGBE_RXD_STAT_UDPCS)) {
3319 if (ISSET(errors, IXGBE_RXD_ERR_TCPE)) {
3320 /* on some hardware IPv6 + TCP + Bad is broken */
3321 if (ISSET(status, IXGBE_RXD_STAT_IPCS))
3322 SET(m->m_pkthdr.csum_flags, M_TCP_CSUM_IN_BAD);
3323 } else
3324 SET(m->m_pkthdr.csum_flags, M_TCP_CSUM_IN_OK);
3325 }
3326 if (ISSET(status, IXGBE_RXD_STAT_L4CS) &&
3327 ISSET(status, IXGBE_RXD_STAT_UDPCS)) {
3328 if (ISSET(errors, IXGBE_RXD_ERR_TCPE))
3329 SET(m->m_pkthdr.csum_flags, M_UDP_CSUM_IN_BAD);
3330 else
3331 SET(m->m_pkthdr.csum_flags, M_UDP_CSUM_IN_OK);
3332 }
3333
3334 /*
3335 * TCP Large Receive Offload
3336 */
3337
3338 pkts = m->m_pkthdr.ph_mss;
3339 m->m_pkthdr.ph_mss = 0;
3340
3341 if (pkts > 1) {
3342 struct ether_extracted ext;
3343 uint32_t paylen;
3344
3345 /*
3346 * Calculate the payload size:
3347 *
3348 * The packet length returned by the NIC (m->m_pkthdr.len)
3349 * can contain padding, which we don't want to count in to the
3350 * payload size. Therefore, we calculate the real payload size
3351 * based on the total ip length field (ext.iplen).
3352 */
3353 ether_extract_headers(m, &ext);
3354 paylen = ext.iplen;
3355 if (ext.ip4 || ext.ip6)
3356 paylen -= ext.iphlen;
3357 if (ext.tcp) {
3358 paylen -= ext.tcphlen;
3359 tcpstat_inc(tcps_inhwlro);
3360 tcpstat_add(tcps_inpktlro, pkts);
3361 } else {
3362 tcpstat_inc(tcps_inbadlro);
3363 }
3364
3365 /*
3366 * If we gonna forward this packet, we have to mark it as TSO,
3367 * set a correct mss, and recalculate the TCP checksum.
3368 */
3369 if (ext.tcp && paylen >= pkts) {
3370 SET(m->m_pkthdr.csum_flags, M_TCP_TSO);
3371 m->m_pkthdr.ph_mss = paylen / pkts;
3372 }
3373 if (ext.tcp && ISSET(m->m_pkthdr.csum_flags, M_TCP_CSUM_IN_OK))
3374 SET(m->m_pkthdr.csum_flags, M_TCP_CSUM_OUT);
3375 }
3376 }
3377
3378 void
ixgbe_setup_vlan_hw_support(struct ix_softc * sc)3379 ixgbe_setup_vlan_hw_support(struct ix_softc *sc)
3380 {
3381 uint32_t ctrl;
3382 int i;
3383
3384 /*
3385 * A soft reset zero's out the VFTA, so
3386 * we need to repopulate it now.
3387 */
3388 for (i = 0; i < IXGBE_VFTA_SIZE; i++) {
3389 if (sc->shadow_vfta[i] != 0)
3390 IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTA(i),
3391 sc->shadow_vfta[i]);
3392 }
3393
3394 ctrl = IXGBE_READ_REG(&sc->hw, IXGBE_VLNCTRL);
3395 #if 0
3396 /* Enable the Filter Table if enabled */
3397 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3398 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3399 ctrl |= IXGBE_VLNCTRL_VFE;
3400 }
3401 #endif
3402 if (sc->hw.mac.type == ixgbe_mac_82598EB)
3403 ctrl |= IXGBE_VLNCTRL_VME;
3404 IXGBE_WRITE_REG(&sc->hw, IXGBE_VLNCTRL, ctrl);
3405
3406 /* On 82599 the VLAN enable is per/queue in RXDCTL */
3407 if (sc->hw.mac.type != ixgbe_mac_82598EB) {
3408 for (i = 0; i < sc->num_queues; i++) {
3409 ctrl = IXGBE_READ_REG(&sc->hw, IXGBE_RXDCTL(i));
3410 ctrl |= IXGBE_RXDCTL_VME;
3411 IXGBE_WRITE_REG(&sc->hw, IXGBE_RXDCTL(i), ctrl);
3412 }
3413 }
3414 }
3415
3416 void
ixgbe_enable_intr(struct ix_softc * sc)3417 ixgbe_enable_intr(struct ix_softc *sc)
3418 {
3419 struct ixgbe_hw *hw = &sc->hw;
3420 uint32_t mask, fwsm;
3421
3422 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3423 /* Enable Fan Failure detection */
3424 if (hw->device_id == IXGBE_DEV_ID_82598AT)
3425 mask |= IXGBE_EIMS_GPI_SDP1;
3426
3427 switch (sc->hw.mac.type) {
3428 case ixgbe_mac_82599EB:
3429 mask |= IXGBE_EIMS_ECC;
3430 /* Temperature sensor on some adapters */
3431 mask |= IXGBE_EIMS_GPI_SDP0;
3432 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3433 mask |= IXGBE_EIMS_GPI_SDP1;
3434 mask |= IXGBE_EIMS_GPI_SDP2;
3435 break;
3436 case ixgbe_mac_X540:
3437 mask |= IXGBE_EIMS_ECC;
3438 /* Detect if Thermal Sensor is enabled */
3439 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3440 if (fwsm & IXGBE_FWSM_TS_ENABLED)
3441 mask |= IXGBE_EIMS_TS;
3442 break;
3443 case ixgbe_mac_X550:
3444 case ixgbe_mac_X550EM_x:
3445 case ixgbe_mac_X550EM_a:
3446 mask |= IXGBE_EIMS_ECC;
3447 /* MAC thermal sensor is automatically enabled */
3448 mask |= IXGBE_EIMS_TS;
3449 /* Some devices use SDP0 for important information */
3450 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3451 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3452 mask |= IXGBE_EIMS_GPI_SDP0_X540;
3453 default:
3454 break;
3455 }
3456
3457 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3458
3459 /* With MSI-X we use auto clear */
3460 if (sc->sc_intrmap) {
3461 mask = IXGBE_EIMS_ENABLE_MASK;
3462 /* Don't autoclear Link */
3463 mask &= ~IXGBE_EIMS_OTHER;
3464 mask &= ~IXGBE_EIMS_LSC;
3465 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3466 }
3467
3468 IXGBE_WRITE_FLUSH(hw);
3469 }
3470
3471 void
ixgbe_disable_intr(struct ix_softc * sc)3472 ixgbe_disable_intr(struct ix_softc *sc)
3473 {
3474 if (sc->sc_intrmap)
3475 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0);
3476 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
3477 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0);
3478 } else {
3479 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, 0xFFFF0000);
3480 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), ~0);
3481 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), ~0);
3482 }
3483 IXGBE_WRITE_FLUSH(&sc->hw);
3484 }
3485
3486 uint16_t
ixgbe_read_pci_cfg(struct ixgbe_hw * hw,uint32_t reg)3487 ixgbe_read_pci_cfg(struct ixgbe_hw *hw, uint32_t reg)
3488 {
3489 struct pci_attach_args *pa;
3490 uint32_t value;
3491 int high = 0;
3492
3493 if (reg & 0x2) {
3494 high = 1;
3495 reg &= ~0x2;
3496 }
3497 pa = &((struct ixgbe_osdep *)hw->back)->os_pa;
3498 value = pci_conf_read(pa->pa_pc, pa->pa_tag, reg);
3499
3500 if (high)
3501 value >>= 16;
3502
3503 return (value & 0xffff);
3504 }
3505
3506 void
ixgbe_write_pci_cfg(struct ixgbe_hw * hw,uint32_t reg,uint16_t value)3507 ixgbe_write_pci_cfg(struct ixgbe_hw *hw, uint32_t reg, uint16_t value)
3508 {
3509 struct pci_attach_args *pa;
3510 uint32_t rv;
3511 int high = 0;
3512
3513 /* Need to do read/mask/write... because 16 vs 32 bit!!! */
3514 if (reg & 0x2) {
3515 high = 1;
3516 reg &= ~0x2;
3517 }
3518 pa = &((struct ixgbe_osdep *)hw->back)->os_pa;
3519 rv = pci_conf_read(pa->pa_pc, pa->pa_tag, reg);
3520 if (!high)
3521 rv = (rv & 0xffff0000) | value;
3522 else
3523 rv = (rv & 0xffff) | ((uint32_t)value << 16);
3524 pci_conf_write(pa->pa_pc, pa->pa_tag, reg, rv);
3525 }
3526
3527 /*
3528 * Setup the correct IVAR register for a particular MSIX interrupt
3529 * (yes this is all very magic and confusing :)
3530 * - entry is the register array entry
3531 * - vector is the MSIX vector for this queue
3532 * - type is RX/TX/MISC
3533 */
3534 void
ixgbe_set_ivar(struct ix_softc * sc,uint8_t entry,uint8_t vector,int8_t type)3535 ixgbe_set_ivar(struct ix_softc *sc, uint8_t entry, uint8_t vector, int8_t type)
3536 {
3537 struct ixgbe_hw *hw = &sc->hw;
3538 uint32_t ivar, index;
3539
3540 vector |= IXGBE_IVAR_ALLOC_VAL;
3541
3542 switch (hw->mac.type) {
3543
3544 case ixgbe_mac_82598EB:
3545 if (type == -1)
3546 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3547 else
3548 entry += (type * 64);
3549 index = (entry >> 2) & 0x1F;
3550 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3551 ivar &= ~((uint32_t)0xFF << (8 * (entry & 0x3)));
3552 ivar |= ((uint32_t)vector << (8 * (entry & 0x3)));
3553 IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar);
3554 break;
3555
3556 case ixgbe_mac_82599EB:
3557 case ixgbe_mac_X540:
3558 case ixgbe_mac_X550:
3559 case ixgbe_mac_X550EM_x:
3560 case ixgbe_mac_X550EM_a:
3561 if (type == -1) { /* MISC IVAR */
3562 index = (entry & 1) * 8;
3563 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3564 ivar &= ~((uint32_t)0xFF << index);
3565 ivar |= ((uint32_t)vector << index);
3566 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3567 } else { /* RX/TX IVARS */
3568 index = (16 * (entry & 1)) + (8 * type);
3569 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3570 ivar &= ~((uint32_t)0xFF << index);
3571 ivar |= ((uint32_t)vector << index);
3572 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3573 }
3574
3575 default:
3576 break;
3577 }
3578 }
3579
3580 void
ixgbe_configure_ivars(struct ix_softc * sc)3581 ixgbe_configure_ivars(struct ix_softc *sc)
3582 {
3583 struct ix_queue *que = sc->queues;
3584 uint32_t newitr;
3585 int i;
3586
3587 newitr = (4000000 / IXGBE_INTS_PER_SEC) & 0x0FF8;
3588
3589 for (i = 0; i < sc->num_queues; i++, que++) {
3590 /* First the RX queue entry */
3591 ixgbe_set_ivar(sc, i, que->msix, 0);
3592 /* ... and the TX */
3593 ixgbe_set_ivar(sc, i, que->msix, 1);
3594 /* Set an Initial EITR value */
3595 IXGBE_WRITE_REG(&sc->hw,
3596 IXGBE_EITR(que->msix), newitr);
3597 }
3598
3599 /* For the Link interrupt */
3600 ixgbe_set_ivar(sc, 1, sc->linkvec, -1);
3601 }
3602
3603 /*
3604 * SFP module interrupts handler
3605 */
3606 void
ixgbe_handle_mod(struct ix_softc * sc)3607 ixgbe_handle_mod(struct ix_softc *sc)
3608 {
3609 struct ixgbe_hw *hw = &sc->hw;
3610 uint32_t err;
3611
3612 err = hw->phy.ops.identify_sfp(hw);
3613 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3614 printf("%s: Unsupported SFP+ module type was detected!\n",
3615 sc->dev.dv_xname);
3616 return;
3617 }
3618 err = hw->mac.ops.setup_sfp(hw);
3619 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3620 printf("%s: Setup failure - unsupported SFP+ module type!\n",
3621 sc->dev.dv_xname);
3622 return;
3623 }
3624
3625 ixgbe_handle_msf(sc);
3626 }
3627
3628
3629 /*
3630 * MSF (multispeed fiber) interrupts handler
3631 */
3632 void
ixgbe_handle_msf(struct ix_softc * sc)3633 ixgbe_handle_msf(struct ix_softc *sc)
3634 {
3635 struct ixgbe_hw *hw = &sc->hw;
3636 uint32_t autoneg;
3637 bool negotiate;
3638
3639 autoneg = hw->phy.autoneg_advertised;
3640 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) {
3641 if (hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate))
3642 return;
3643 }
3644 if (hw->mac.ops.setup_link)
3645 hw->mac.ops.setup_link(hw, autoneg, TRUE);
3646
3647 ifmedia_delete_instance(&sc->media, IFM_INST_ANY);
3648 ixgbe_add_media_types(sc);
3649 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
3650 }
3651
3652 /*
3653 * External PHY interrupts handler
3654 */
3655 void
ixgbe_handle_phy(struct ix_softc * sc)3656 ixgbe_handle_phy(struct ix_softc *sc)
3657 {
3658 struct ixgbe_hw *hw = &sc->hw;
3659 int error;
3660
3661 error = hw->phy.ops.handle_lasi(hw);
3662 if (error == IXGBE_ERR_OVERTEMP)
3663 printf("%s: CRITICAL: EXTERNAL PHY OVER TEMP!! "
3664 " PHY will downshift to lower power state!\n",
3665 sc->dev.dv_xname);
3666 else if (error)
3667 printf("%s: Error handling LASI interrupt: %d\n",
3668 sc->dev.dv_xname, error);
3669
3670 }
3671
3672 #if NKSTAT > 0
3673 enum ix_counter_idx {
3674 ix_counter_crcerrs,
3675 ix_counter_lxontxc,
3676 ix_counter_lxonrxc,
3677 ix_counter_lxofftxc,
3678 ix_counter_lxoffrxc,
3679 ix_counter_prc64,
3680 ix_counter_prc127,
3681 ix_counter_prc255,
3682 ix_counter_prc511,
3683 ix_counter_prc1023,
3684 ix_counter_prc1522,
3685 ix_counter_gptc,
3686 ix_counter_gorc,
3687 ix_counter_gotc,
3688 ix_counter_ruc,
3689 ix_counter_rfc,
3690 ix_counter_roc,
3691 ix_counter_rjc,
3692 ix_counter_tor,
3693 ix_counter_tpr,
3694 ix_counter_tpt,
3695 ix_counter_gprc,
3696 ix_counter_bprc,
3697 ix_counter_mprc,
3698 ix_counter_ptc64,
3699 ix_counter_ptc127,
3700 ix_counter_ptc255,
3701 ix_counter_ptc511,
3702 ix_counter_ptc1023,
3703 ix_counter_ptc1522,
3704 ix_counter_mptc,
3705 ix_counter_bptc,
3706
3707 ix_counter_num,
3708 };
3709
3710 CTASSERT(KSTAT_KV_U_PACKETS <= 0xff);
3711 CTASSERT(KSTAT_KV_U_BYTES <= 0xff);
3712
3713 struct ix_counter {
3714 char name[KSTAT_KV_NAMELEN];
3715 uint32_t reg;
3716 uint8_t width;
3717 uint8_t unit;
3718 };
3719
3720 static const struct ix_counter ix_counters[ix_counter_num] = {
3721 [ix_counter_crcerrs] = { "crc errs", IXGBE_CRCERRS, 32,
3722 KSTAT_KV_U_PACKETS },
3723 [ix_counter_lxontxc] = { "tx link xon", IXGBE_LXONTXC, 32,
3724 KSTAT_KV_U_PACKETS },
3725 [ix_counter_lxonrxc] = { "rx link xon", 0, 32,
3726 KSTAT_KV_U_PACKETS },
3727 [ix_counter_lxofftxc] = { "tx link xoff", IXGBE_LXOFFTXC, 32,
3728 KSTAT_KV_U_PACKETS },
3729 [ix_counter_lxoffrxc] = { "rx link xoff", 0, 32,
3730 KSTAT_KV_U_PACKETS },
3731 [ix_counter_prc64] = { "rx 64B", IXGBE_PRC64, 32,
3732 KSTAT_KV_U_PACKETS },
3733 [ix_counter_prc127] = { "rx 65-127B", IXGBE_PRC127, 32,
3734 KSTAT_KV_U_PACKETS },
3735 [ix_counter_prc255] = { "rx 128-255B", IXGBE_PRC255, 32,
3736 KSTAT_KV_U_PACKETS },
3737 [ix_counter_prc511] = { "rx 256-511B", IXGBE_PRC511, 32,
3738 KSTAT_KV_U_PACKETS },
3739 [ix_counter_prc1023] = { "rx 512-1023B", IXGBE_PRC1023, 32,
3740 KSTAT_KV_U_PACKETS },
3741 [ix_counter_prc1522] = { "rx 1024-maxB", IXGBE_PRC1522, 32,
3742 KSTAT_KV_U_PACKETS },
3743 [ix_counter_gptc] = { "tx good", IXGBE_GPTC, 32,
3744 KSTAT_KV_U_PACKETS },
3745 [ix_counter_gorc] = { "rx good", IXGBE_GORCL, 36,
3746 KSTAT_KV_U_BYTES },
3747 [ix_counter_gotc] = { "tx good", IXGBE_GOTCL, 36,
3748 KSTAT_KV_U_BYTES },
3749 [ix_counter_ruc] = { "rx undersize", IXGBE_RUC, 32,
3750 KSTAT_KV_U_PACKETS },
3751 [ix_counter_rfc] = { "rx fragment", IXGBE_RFC, 32,
3752 KSTAT_KV_U_PACKETS },
3753 [ix_counter_roc] = { "rx oversize", IXGBE_ROC, 32,
3754 KSTAT_KV_U_PACKETS },
3755 [ix_counter_rjc] = { "rx jabber", IXGBE_RJC, 32,
3756 KSTAT_KV_U_PACKETS },
3757 [ix_counter_tor] = { "rx total", IXGBE_TORL, 36,
3758 KSTAT_KV_U_BYTES },
3759 [ix_counter_tpr] = { "rx total", IXGBE_TPR, 32,
3760 KSTAT_KV_U_PACKETS },
3761 [ix_counter_tpt] = { "tx total", IXGBE_TPT, 32,
3762 KSTAT_KV_U_PACKETS },
3763 [ix_counter_gprc] = { "rx good", IXGBE_GPRC, 32,
3764 KSTAT_KV_U_PACKETS },
3765 [ix_counter_bprc] = { "rx bcast", IXGBE_BPRC, 32,
3766 KSTAT_KV_U_PACKETS },
3767 [ix_counter_mprc] = { "rx mcast", IXGBE_MPRC, 32,
3768 KSTAT_KV_U_PACKETS },
3769 [ix_counter_ptc64] = { "tx 64B", IXGBE_PTC64, 32,
3770 KSTAT_KV_U_PACKETS },
3771 [ix_counter_ptc127] = { "tx 65-127B", IXGBE_PTC127, 32,
3772 KSTAT_KV_U_PACKETS },
3773 [ix_counter_ptc255] = { "tx 128-255B", IXGBE_PTC255, 32,
3774 KSTAT_KV_U_PACKETS },
3775 [ix_counter_ptc511] = { "tx 256-511B", IXGBE_PTC511, 32,
3776 KSTAT_KV_U_PACKETS },
3777 [ix_counter_ptc1023] = { "tx 512-1023B", IXGBE_PTC1023, 32,
3778 KSTAT_KV_U_PACKETS },
3779 [ix_counter_ptc1522] = { "tx 1024-maxB", IXGBE_PTC1522, 32,
3780 KSTAT_KV_U_PACKETS },
3781 [ix_counter_mptc] = { "tx mcast", IXGBE_MPTC, 32,
3782 KSTAT_KV_U_PACKETS },
3783 [ix_counter_bptc] = { "tx bcast", IXGBE_BPTC, 32,
3784 KSTAT_KV_U_PACKETS },
3785 };
3786
3787 struct ix_rxq_kstats {
3788 struct kstat_kv qprc;
3789 struct kstat_kv qbrc;
3790 struct kstat_kv qprdc;
3791 };
3792
3793 static const struct ix_rxq_kstats ix_rxq_kstats_tpl = {
3794 KSTAT_KV_UNIT_INITIALIZER("packets",
3795 KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
3796 KSTAT_KV_UNIT_INITIALIZER("bytes",
3797 KSTAT_KV_T_COUNTER64, KSTAT_KV_U_BYTES),
3798 KSTAT_KV_UNIT_INITIALIZER("qdrops",
3799 KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
3800 };
3801
3802 struct ix_txq_kstats {
3803 struct kstat_kv qptc;
3804 struct kstat_kv qbtc;
3805 };
3806
3807 static const struct ix_txq_kstats ix_txq_kstats_tpl = {
3808 KSTAT_KV_UNIT_INITIALIZER("packets",
3809 KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
3810 KSTAT_KV_UNIT_INITIALIZER("bytes",
3811 KSTAT_KV_T_COUNTER64, KSTAT_KV_U_BYTES),
3812 };
3813
3814 static int ix_kstats_read(struct kstat *ks);
3815 static int ix_rxq_kstats_read(struct kstat *ks);
3816 static int ix_txq_kstats_read(struct kstat *ks);
3817
3818 static void
ix_kstats(struct ix_softc * sc)3819 ix_kstats(struct ix_softc *sc)
3820 {
3821 struct kstat *ks;
3822 struct kstat_kv *kvs;
3823 unsigned int i;
3824
3825 mtx_init(&sc->sc_kstat_mtx, IPL_SOFTCLOCK);
3826 timeout_set(&sc->sc_kstat_tmo, ix_kstats_tick, sc);
3827
3828 ks = kstat_create(sc->dev.dv_xname, 0, "ix-stats", 0,
3829 KSTAT_T_KV, 0);
3830 if (ks == NULL)
3831 return;
3832
3833 kvs = mallocarray(nitems(ix_counters), sizeof(*kvs),
3834 M_DEVBUF, M_WAITOK|M_ZERO);
3835
3836 for (i = 0; i < nitems(ix_counters); i++) {
3837 const struct ix_counter *ixc = &ix_counters[i];
3838
3839 kstat_kv_unit_init(&kvs[i], ixc->name,
3840 KSTAT_KV_T_COUNTER64, ixc->unit);
3841 }
3842
3843 kstat_set_mutex(ks, &sc->sc_kstat_mtx);
3844 ks->ks_softc = sc;
3845 ks->ks_data = kvs;
3846 ks->ks_datalen = nitems(ix_counters) * sizeof(*kvs);
3847 ks->ks_read = ix_kstats_read;
3848
3849 sc->sc_kstat = ks;
3850 kstat_install(ks);
3851 }
3852
3853 static void
ix_rxq_kstats(struct ix_softc * sc,struct ix_rxring * rxr)3854 ix_rxq_kstats(struct ix_softc *sc, struct ix_rxring *rxr)
3855 {
3856 struct ix_rxq_kstats *stats;
3857 struct kstat *ks;
3858
3859 ks = kstat_create(sc->dev.dv_xname, 0, "ix-rxq", rxr->me,
3860 KSTAT_T_KV, 0);
3861 if (ks == NULL)
3862 return;
3863
3864 stats = malloc(sizeof(*stats), M_DEVBUF, M_WAITOK|M_ZERO);
3865 *stats = ix_rxq_kstats_tpl;
3866
3867 kstat_set_mutex(ks, &sc->sc_kstat_mtx);
3868 ks->ks_softc = rxr;
3869 ks->ks_data = stats;
3870 ks->ks_datalen = sizeof(*stats);
3871 ks->ks_read = ix_rxq_kstats_read;
3872
3873 rxr->kstat = ks;
3874 kstat_install(ks);
3875 }
3876
3877 static void
ix_txq_kstats(struct ix_softc * sc,struct ix_txring * txr)3878 ix_txq_kstats(struct ix_softc *sc, struct ix_txring *txr)
3879 {
3880 struct ix_txq_kstats *stats;
3881 struct kstat *ks;
3882
3883 ks = kstat_create(sc->dev.dv_xname, 0, "ix-txq", txr->me,
3884 KSTAT_T_KV, 0);
3885 if (ks == NULL)
3886 return;
3887
3888 stats = malloc(sizeof(*stats), M_DEVBUF, M_WAITOK|M_ZERO);
3889 *stats = ix_txq_kstats_tpl;
3890
3891 kstat_set_mutex(ks, &sc->sc_kstat_mtx);
3892 ks->ks_softc = txr;
3893 ks->ks_data = stats;
3894 ks->ks_datalen = sizeof(*stats);
3895 ks->ks_read = ix_txq_kstats_read;
3896
3897 txr->kstat = ks;
3898 kstat_install(ks);
3899 }
3900
3901 /**********************************************************************
3902 *
3903 * Update the board statistics counters.
3904 *
3905 **********************************************************************/
3906
3907 static void
ix_kstats_tick(void * arg)3908 ix_kstats_tick(void *arg)
3909 {
3910 struct ix_softc *sc = arg;
3911 int i;
3912
3913 timeout_add_sec(&sc->sc_kstat_tmo, 1);
3914
3915 mtx_enter(&sc->sc_kstat_mtx);
3916 ix_kstats_read(sc->sc_kstat);
3917 for (i = 0; i < sc->num_queues; i++) {
3918 ix_rxq_kstats_read(sc->rx_rings[i].kstat);
3919 ix_txq_kstats_read(sc->tx_rings[i].kstat);
3920 }
3921 mtx_leave(&sc->sc_kstat_mtx);
3922 }
3923
3924 static uint64_t
ix_read36(struct ixgbe_hw * hw,bus_size_t loreg,bus_size_t hireg)3925 ix_read36(struct ixgbe_hw *hw, bus_size_t loreg, bus_size_t hireg)
3926 {
3927 uint64_t lo, hi;
3928
3929 lo = IXGBE_READ_REG(hw, loreg);
3930 hi = IXGBE_READ_REG(hw, hireg);
3931
3932 return (((hi & 0xf) << 32) | lo);
3933 }
3934
3935 static int
ix_kstats_read(struct kstat * ks)3936 ix_kstats_read(struct kstat *ks)
3937 {
3938 struct ix_softc *sc = ks->ks_softc;
3939 struct kstat_kv *kvs = ks->ks_data;
3940 struct ixgbe_hw *hw = &sc->hw;
3941 unsigned int i;
3942
3943 for (i = 0; i < nitems(ix_counters); i++) {
3944 const struct ix_counter *ixc = &ix_counters[i];
3945 uint32_t reg = ixc->reg;
3946 uint64_t v;
3947
3948 if (reg == 0)
3949 continue;
3950
3951 if (ixc->width > 32) {
3952 if (sc->hw.mac.type == ixgbe_mac_82598EB)
3953 v = IXGBE_READ_REG(hw, reg + 4);
3954 else
3955 v = ix_read36(hw, reg, reg + 4);
3956 } else
3957 v = IXGBE_READ_REG(hw, reg);
3958
3959 kstat_kv_u64(&kvs[i]) += v;
3960 }
3961
3962 /* handle the exceptions */
3963 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
3964 kstat_kv_u64(&kvs[ix_counter_lxonrxc]) +=
3965 IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3966 kstat_kv_u64(&kvs[ix_counter_lxoffrxc]) +=
3967 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3968 } else {
3969 kstat_kv_u64(&kvs[ix_counter_lxonrxc]) +=
3970 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
3971 kstat_kv_u64(&kvs[ix_counter_lxoffrxc]) +=
3972 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
3973 }
3974
3975 getnanouptime(&ks->ks_updated);
3976
3977 return (0);
3978 }
3979
3980 int
ix_rxq_kstats_read(struct kstat * ks)3981 ix_rxq_kstats_read(struct kstat *ks)
3982 {
3983 struct ix_rxq_kstats *stats = ks->ks_data;
3984 struct ix_rxring *rxr = ks->ks_softc;
3985 struct ix_softc *sc = rxr->sc;
3986 struct ixgbe_hw *hw = &sc->hw;
3987 uint32_t i = rxr->me;
3988
3989 kstat_kv_u64(&stats->qprc) += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
3990 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
3991 kstat_kv_u64(&stats->qprdc) +=
3992 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
3993 kstat_kv_u64(&stats->qbrc) +=
3994 IXGBE_READ_REG(hw, IXGBE_QBRC(i));
3995 } else {
3996 kstat_kv_u64(&stats->qprdc) +=
3997 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
3998 kstat_kv_u64(&stats->qbrc) +=
3999 ix_read36(hw, IXGBE_QBRC_L(i), IXGBE_QBRC_H(i));
4000 }
4001
4002 getnanouptime(&ks->ks_updated);
4003
4004 return (0);
4005 }
4006
4007 int
ix_txq_kstats_read(struct kstat * ks)4008 ix_txq_kstats_read(struct kstat *ks)
4009 {
4010 struct ix_txq_kstats *stats = ks->ks_data;
4011 struct ix_txring *txr = ks->ks_softc;
4012 struct ix_softc *sc = txr->sc;
4013 struct ixgbe_hw *hw = &sc->hw;
4014 uint32_t i = txr->me;
4015
4016 kstat_kv_u64(&stats->qptc) += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
4017 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
4018 kstat_kv_u64(&stats->qbtc) +=
4019 IXGBE_READ_REG(hw, IXGBE_QBTC(i));
4020 } else {
4021 kstat_kv_u64(&stats->qbtc) +=
4022 ix_read36(hw, IXGBE_QBTC_L(i), IXGBE_QBTC_H(i));
4023 }
4024
4025 getnanouptime(&ks->ks_updated);
4026
4027 return (0);
4028 }
4029 #endif /* NKVSTAT > 0 */
4030
4031 void
ixgbe_map_queue_statistics(struct ix_softc * sc)4032 ixgbe_map_queue_statistics(struct ix_softc *sc)
4033 {
4034 int i;
4035 uint32_t r;
4036
4037 for (i = 0; i < 32; i++) {
4038 /*
4039 * Queues 0-15 are mapped 1:1
4040 * Queue 0 -> Counter 0
4041 * Queue 1 -> Counter 1
4042 * Queue 2 -> Counter 2....
4043 * Queues 16-127 are mapped to Counter 0
4044 */
4045 if (i < 4) {
4046 r = (i * 4 + 0);
4047 r |= (i * 4 + 1) << 8;
4048 r |= (i * 4 + 2) << 16;
4049 r |= (i * 4 + 3) << 24;
4050 } else
4051 r = 0;
4052
4053 IXGBE_WRITE_REG(&sc->hw, IXGBE_RQSMR(i), r);
4054 IXGBE_WRITE_REG(&sc->hw, IXGBE_TQSM(i), r);
4055 }
4056 }
4057