xref: /openbsd/sys/dev/pci/if_igc.c (revision 0f9891f1)
1 /*	$OpenBSD: if_igc.c,v 1.25 2024/05/24 06:02:53 jsg Exp $	*/
2 /*-
3  * SPDX-License-Identifier: BSD-2-Clause
4  *
5  * Copyright (c) 2016 Nicole Graziano <nicole@nextbsd.org>
6  * All rights reserved.
7  * Copyright (c) 2021 Rubicon Communications, LLC (Netgate)
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include "bpfilter.h"
32 #include "vlan.h"
33 #include "kstat.h"
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/sockio.h>
38 #include <sys/mbuf.h>
39 #include <sys/malloc.h>
40 #include <sys/socket.h>
41 #include <sys/device.h>
42 #include <sys/endian.h>
43 #include <sys/intrmap.h>
44 #include <sys/kstat.h>
45 
46 #include <net/if.h>
47 #include <net/if_media.h>
48 #include <net/route.h>
49 #include <net/toeplitz.h>
50 
51 #include <netinet/in.h>
52 #include <netinet/if_ether.h>
53 #include <netinet/tcp.h>
54 #include <netinet/tcp_timer.h>
55 #include <netinet/tcp_var.h>
56 
57 #if NBPFILTER > 0
58 #include <net/bpf.h>
59 #endif
60 
61 #include <machine/bus.h>
62 #include <machine/intr.h>
63 
64 #include <dev/pci/pcivar.h>
65 #include <dev/pci/pcireg.h>
66 #include <dev/pci/pcidevs.h>
67 #include <dev/pci/if_igc.h>
68 #include <dev/pci/igc_hw.h>
69 
70 const struct pci_matchid igc_devices[] = {
71 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I220_V },
72 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I221_V },
73 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_BLANK_NVM },
74 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_I },
75 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_IT },
76 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_K },
77 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_K2 },
78 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_LM },
79 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_LMVP },
80 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_V },
81 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_BLANK_NVM },
82 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_IT },
83 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_LM },
84 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_K },
85 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_V }
86 };
87 
88 /*********************************************************************
89  *  Function Prototypes
90  *********************************************************************/
91 int	igc_match(struct device *, void *, void *);
92 void	igc_attach(struct device *, struct device *, void *);
93 int	igc_detach(struct device *, int);
94 
95 void	igc_identify_hardware(struct igc_softc *);
96 int	igc_allocate_pci_resources(struct igc_softc *);
97 int	igc_allocate_queues(struct igc_softc *);
98 void	igc_free_pci_resources(struct igc_softc *);
99 void	igc_reset(struct igc_softc *);
100 void	igc_init_dmac(struct igc_softc *, uint32_t);
101 int	igc_allocate_msix(struct igc_softc *);
102 void	igc_setup_msix(struct igc_softc *);
103 int	igc_dma_malloc(struct igc_softc *, bus_size_t, struct igc_dma_alloc *);
104 void	igc_dma_free(struct igc_softc *, struct igc_dma_alloc *);
105 void	igc_setup_interface(struct igc_softc *);
106 
107 void	igc_init(void *);
108 void	igc_start(struct ifqueue *);
109 int	igc_txeof(struct igc_txring *);
110 void	igc_stop(struct igc_softc *);
111 int	igc_ioctl(struct ifnet *, u_long, caddr_t);
112 int	igc_rxrinfo(struct igc_softc *, struct if_rxrinfo *);
113 int	igc_rxfill(struct igc_rxring *);
114 void	igc_rxrefill(void *);
115 int	igc_rxeof(struct igc_rxring *);
116 void	igc_rx_checksum(uint32_t, struct mbuf *, uint32_t);
117 void	igc_watchdog(struct ifnet *);
118 void	igc_media_status(struct ifnet *, struct ifmediareq *);
119 int	igc_media_change(struct ifnet *);
120 void	igc_iff(struct igc_softc *);
121 void	igc_update_link_status(struct igc_softc *);
122 int	igc_get_buf(struct igc_rxring *, int);
123 int	igc_tx_ctx_setup(struct igc_txring *, struct mbuf *, int, uint32_t *,
124 	    uint32_t *);
125 
126 void	igc_configure_queues(struct igc_softc *);
127 void	igc_set_queues(struct igc_softc *, uint32_t, uint32_t, int);
128 void	igc_enable_queue(struct igc_softc *, uint32_t);
129 void	igc_enable_intr(struct igc_softc *);
130 void	igc_disable_intr(struct igc_softc *);
131 int	igc_intr_link(void *);
132 int	igc_intr_queue(void *);
133 
134 int	igc_allocate_transmit_buffers(struct igc_txring *);
135 int	igc_setup_transmit_structures(struct igc_softc *);
136 int	igc_setup_transmit_ring(struct igc_txring *);
137 void	igc_initialize_transmit_unit(struct igc_softc *);
138 void	igc_free_transmit_structures(struct igc_softc *);
139 void	igc_free_transmit_buffers(struct igc_txring *);
140 int	igc_allocate_receive_buffers(struct igc_rxring *);
141 int	igc_setup_receive_structures(struct igc_softc *);
142 int	igc_setup_receive_ring(struct igc_rxring *);
143 void	igc_initialize_receive_unit(struct igc_softc *);
144 void	igc_free_receive_structures(struct igc_softc *);
145 void	igc_free_receive_buffers(struct igc_rxring *);
146 void	igc_initialize_rss_mapping(struct igc_softc *);
147 
148 void	igc_get_hw_control(struct igc_softc *);
149 void	igc_release_hw_control(struct igc_softc *);
150 int	igc_is_valid_ether_addr(uint8_t *);
151 
152 #if NKSTAT > 0
153 void	igc_kstat_attach(struct igc_softc *);
154 #endif
155 
156 /*********************************************************************
157  *  OpenBSD Device Interface Entry Points
158  *********************************************************************/
159 
160 struct cfdriver igc_cd = {
161 	NULL, "igc", DV_IFNET
162 };
163 
164 const struct cfattach igc_ca = {
165 	sizeof(struct igc_softc), igc_match, igc_attach, igc_detach
166 };
167 
168 /*********************************************************************
169  *  Device identification routine
170  *
171  *  igc_match determines if the driver should be loaded on
172  *  adapter based on PCI vendor/device id of the adapter.
173  *
174  *  return 0 on success, positive on failure
175  *********************************************************************/
176 int
igc_match(struct device * parent,void * match,void * aux)177 igc_match(struct device *parent, void *match, void *aux)
178 {
179 	return pci_matchbyid((struct pci_attach_args *)aux, igc_devices,
180 	    nitems(igc_devices));
181 }
182 
183 /*********************************************************************
184  *  Device initialization routine
185  *
186  *  The attach entry point is called when the driver is being loaded.
187  *  This routine identifies the type of hardware, allocates all resources
188  *  and initializes the hardware.
189  *
190  *  return 0 on success, positive on failure
191  *********************************************************************/
192 void
igc_attach(struct device * parent,struct device * self,void * aux)193 igc_attach(struct device *parent, struct device *self, void *aux)
194 {
195 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
196 	struct igc_softc *sc = (struct igc_softc *)self;
197 	struct igc_hw *hw = &sc->hw;
198 
199 	sc->osdep.os_sc = sc;
200 	sc->osdep.os_pa = *pa;
201 
202 	/* Determine hardware and mac info */
203 	igc_identify_hardware(sc);
204 
205 	sc->num_tx_desc = IGC_DEFAULT_TXD;
206 	sc->num_rx_desc = IGC_DEFAULT_RXD;
207 
208 	 /* Setup PCI resources */
209 	if (igc_allocate_pci_resources(sc))
210 		 goto err_pci;
211 
212 	/* Allocate TX/RX queues */
213 	if (igc_allocate_queues(sc))
214 		 goto err_pci;
215 
216 	/* Do shared code initialization */
217 	if (igc_setup_init_funcs(hw, true)) {
218 		printf(": Setup of shared code failed\n");
219 		goto err_pci;
220 	}
221 
222 	hw->mac.autoneg = DO_AUTO_NEG;
223 	hw->phy.autoneg_wait_to_complete = false;
224 	hw->phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
225 
226 	/* Copper options. */
227 	if (hw->phy.media_type == igc_media_type_copper)
228 		hw->phy.mdix = AUTO_ALL_MODES;
229 
230 	/* Set the max frame size. */
231 	sc->hw.mac.max_frame_size = 9234;
232 
233 	/* Allocate multicast array memory. */
234 	sc->mta = mallocarray(ETHER_ADDR_LEN, MAX_NUM_MULTICAST_ADDRESSES,
235 	    M_DEVBUF, M_NOWAIT);
236 	if (sc->mta == NULL) {
237 		printf(": Can not allocate multicast setup array\n");
238 		goto err_late;
239 	}
240 
241 	/* Check SOL/IDER usage. */
242 	if (igc_check_reset_block(hw))
243 		printf(": PHY reset is blocked due to SOL/IDER session\n");
244 
245 	/* Disable Energy Efficient Ethernet. */
246 	sc->hw.dev_spec._i225.eee_disable = true;
247 
248 	igc_reset_hw(hw);
249 
250 	/* Make sure we have a good EEPROM before we read from it. */
251 	if (igc_validate_nvm_checksum(hw) < 0) {
252 		/*
253 		 * Some PCI-E parts fail the first check due to
254 		 * the link being in sleep state, call it again,
255 		 * if it fails a second time its a real issue.
256 		 */
257 		if (igc_validate_nvm_checksum(hw) < 0) {
258 			printf(": The EEPROM checksum is not valid\n");
259 			goto err_late;
260 		}
261 	}
262 
263 	/* Copy the permanent MAC address out of the EEPROM. */
264 	if (igc_read_mac_addr(hw) < 0) {
265 		printf(": EEPROM read error while reading MAC address\n");
266 		goto err_late;
267 	}
268 
269 	if (!igc_is_valid_ether_addr(hw->mac.addr)) {
270 		printf(": Invalid MAC address\n");
271 		goto err_late;
272 	}
273 
274 	memcpy(sc->sc_ac.ac_enaddr, sc->hw.mac.addr, ETHER_ADDR_LEN);
275 
276 	if (igc_allocate_msix(sc))
277 		goto err_late;
278 
279 	/* Setup OS specific network interface. */
280 	igc_setup_interface(sc);
281 
282 	igc_reset(sc);
283 	hw->mac.get_link_status = true;
284 	igc_update_link_status(sc);
285 
286 	/* The driver can now take control from firmware. */
287 	igc_get_hw_control(sc);
288 
289 	printf(", address %s\n", ether_sprintf(sc->hw.mac.addr));
290 
291 #if NKSTAT > 0
292 	igc_kstat_attach(sc);
293 #endif
294 	return;
295 
296 err_late:
297 	igc_release_hw_control(sc);
298 err_pci:
299 	igc_free_pci_resources(sc);
300 	free(sc->mta, M_DEVBUF, ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
301 }
302 
303 /*********************************************************************
304  *  Device removal routine
305  *
306  *  The detach entry point is called when the driver is being removed.
307  *  This routine stops the adapter and deallocates all the resources
308  *  that were allocated for driver operation.
309  *
310  *  return 0 on success, positive on failure
311  *********************************************************************/
312 int
igc_detach(struct device * self,int flags)313 igc_detach(struct device *self, int flags)
314 {
315 	struct igc_softc *sc = (struct igc_softc *)self;
316 	struct ifnet *ifp = &sc->sc_ac.ac_if;
317 
318 	igc_stop(sc);
319 
320 	igc_phy_hw_reset(&sc->hw);
321 	igc_release_hw_control(sc);
322 
323 	ether_ifdetach(ifp);
324 	if_detach(ifp);
325 
326 	igc_free_pci_resources(sc);
327 
328 	igc_free_transmit_structures(sc);
329 	igc_free_receive_structures(sc);
330 	free(sc->mta, M_DEVBUF, ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
331 
332 	return 0;
333 }
334 
335 void
igc_identify_hardware(struct igc_softc * sc)336 igc_identify_hardware(struct igc_softc *sc)
337 {
338 	struct igc_osdep *os = &sc->osdep;
339 	struct pci_attach_args *pa = &os->os_pa;
340 
341 	/* Save off the information about this board. */
342 	sc->hw.device_id = PCI_PRODUCT(pa->pa_id);
343 
344 	/* Do shared code init and setup. */
345 	if (igc_set_mac_type(&sc->hw)) {
346 		printf(": Setup init failure\n");
347 		return;
348         }
349 }
350 
351 int
igc_allocate_pci_resources(struct igc_softc * sc)352 igc_allocate_pci_resources(struct igc_softc *sc)
353 {
354 	struct igc_osdep *os = &sc->osdep;
355 	struct pci_attach_args *pa = &os->os_pa;
356 	pcireg_t memtype;
357 
358 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IGC_PCIREG);
359 	if (pci_mapreg_map(pa, IGC_PCIREG, memtype, 0, &os->os_memt,
360 	    &os->os_memh, &os->os_membase, &os->os_memsize, 0)) {
361 		printf(": unable to map registers\n");
362 		return ENXIO;
363 	}
364 	sc->hw.hw_addr = (uint8_t *)os->os_membase;
365 	sc->hw.back = os;
366 
367 	igc_setup_msix(sc);
368 
369 	return 0;
370 }
371 
372 int
igc_allocate_queues(struct igc_softc * sc)373 igc_allocate_queues(struct igc_softc *sc)
374 {
375 	struct igc_queue *iq;
376 	struct igc_txring *txr;
377 	struct igc_rxring *rxr;
378 	int i, rsize, rxconf, tsize, txconf;
379 
380 	/* Allocate the top level queue structs. */
381 	sc->queues = mallocarray(sc->sc_nqueues, sizeof(struct igc_queue),
382 	    M_DEVBUF, M_NOWAIT | M_ZERO);
383 	if (sc->queues == NULL) {
384 		printf("%s: unable to allocate queue\n", DEVNAME(sc));
385 		goto fail;
386 	}
387 
388 	/* Allocate the TX ring. */
389 	sc->tx_rings = mallocarray(sc->sc_nqueues, sizeof(struct igc_txring),
390 	    M_DEVBUF, M_NOWAIT | M_ZERO);
391 	if (sc->tx_rings == NULL) {
392 		printf("%s: unable to allocate TX ring\n", DEVNAME(sc));
393 		goto fail;
394 	}
395 
396 	/* Allocate the RX ring. */
397 	sc->rx_rings = mallocarray(sc->sc_nqueues, sizeof(struct igc_rxring),
398 	    M_DEVBUF, M_NOWAIT | M_ZERO);
399 	if (sc->rx_rings == NULL) {
400 		printf("%s: unable to allocate RX ring\n", DEVNAME(sc));
401 		goto rx_fail;
402 	}
403 
404 	txconf = rxconf = 0;
405 
406 	/* Set up the TX queues. */
407 	tsize = roundup2(sc->num_tx_desc * sizeof(union igc_adv_tx_desc),
408 	    IGC_DBA_ALIGN);
409 	for (i = 0; i < sc->sc_nqueues; i++, txconf++) {
410 		txr = &sc->tx_rings[i];
411 		txr->sc = sc;
412 		txr->me = i;
413 
414 		if (igc_dma_malloc(sc, tsize, &txr->txdma)) {
415 			printf("%s: unable to allocate TX descriptor\n",
416 			    DEVNAME(sc));
417 			goto err_tx_desc;
418 		}
419 		txr->tx_base = (union igc_adv_tx_desc *)txr->txdma.dma_vaddr;
420 		bzero((void *)txr->tx_base, tsize);
421 	}
422 
423 	/* Set up the RX queues. */
424 	rsize = roundup2(sc->num_rx_desc * sizeof(union igc_adv_rx_desc),
425 	    IGC_DBA_ALIGN);
426 	for (i = 0; i < sc->sc_nqueues; i++, rxconf++) {
427 		rxr = &sc->rx_rings[i];
428 		rxr->sc = sc;
429 		rxr->me = i;
430 		timeout_set(&rxr->rx_refill, igc_rxrefill, rxr);
431 
432 		if (igc_dma_malloc(sc, rsize, &rxr->rxdma)) {
433 			printf("%s: unable to allocate RX descriptor\n",
434 			    DEVNAME(sc));
435 			goto err_rx_desc;
436 		}
437 		rxr->rx_base = (union igc_adv_rx_desc *)rxr->rxdma.dma_vaddr;
438 		bzero((void *)rxr->rx_base, rsize);
439 	}
440 
441 	/* Set up the queue holding structs. */
442 	for (i = 0; i < sc->sc_nqueues; i++) {
443 		iq = &sc->queues[i];
444 		iq->sc = sc;
445 		iq->txr = &sc->tx_rings[i];
446 		iq->rxr = &sc->rx_rings[i];
447 		snprintf(iq->name, sizeof(iq->name), "%s:%d", DEVNAME(sc), i);
448 	}
449 
450 	return 0;
451 
452 err_rx_desc:
453 	for (rxr = sc->rx_rings; rxconf > 0; rxr++, rxconf--)
454 		igc_dma_free(sc, &rxr->rxdma);
455 err_tx_desc:
456 	for (txr = sc->tx_rings; txconf > 0; txr++, txconf--)
457 		igc_dma_free(sc, &txr->txdma);
458 	free(sc->rx_rings, M_DEVBUF,
459 	    sc->sc_nqueues * sizeof(struct igc_rxring));
460 	sc->rx_rings = NULL;
461 rx_fail:
462 	free(sc->tx_rings, M_DEVBUF,
463 	    sc->sc_nqueues * sizeof(struct igc_txring));
464 	sc->tx_rings = NULL;
465 fail:
466 	return ENOMEM;
467 }
468 
469 void
igc_free_pci_resources(struct igc_softc * sc)470 igc_free_pci_resources(struct igc_softc *sc)
471 {
472 	struct igc_osdep *os = &sc->osdep;
473 	struct pci_attach_args *pa = &os->os_pa;
474 	struct igc_queue *iq = sc->queues;
475 	int i;
476 
477 	/* Release all msix queue resources. */
478 	for (i = 0; i < sc->sc_nqueues; i++, iq++) {
479 		if (iq->tag)
480 			pci_intr_disestablish(pa->pa_pc, iq->tag);
481 		iq->tag = NULL;
482 	}
483 
484 	if (sc->tag)
485 		pci_intr_disestablish(pa->pa_pc, sc->tag);
486 	sc->tag = NULL;
487 	if (os->os_membase != 0)
488 		bus_space_unmap(os->os_memt, os->os_memh, os->os_memsize);
489 	os->os_membase = 0;
490 }
491 
492 /*********************************************************************
493  *
494  *  Initialize the hardware to a configuration as specified by the
495  *  adapter structure.
496  *
497  **********************************************************************/
498 void
igc_reset(struct igc_softc * sc)499 igc_reset(struct igc_softc *sc)
500 {
501 	struct igc_hw *hw = &sc->hw;
502 	uint32_t pba;
503 	uint16_t rx_buffer_size;
504 
505 	/* Let the firmware know the OS is in control */
506 	igc_get_hw_control(sc);
507 
508 	/*
509 	 * Packet Buffer Allocation (PBA)
510 	 * Writing PBA sets the receive portion of the buffer
511 	 * the remainder is used for the transmit buffer.
512 	 */
513 	pba = IGC_PBA_34K;
514 
515 	/*
516 	 * These parameters control the automatic generation (Tx) and
517 	 * response (Rx) to Ethernet PAUSE frames.
518 	 * - High water mark should allow for at least two frames to be
519 	 *   received after sending an XOFF.
520 	 * - Low water mark works best when it is very near the high water mark.
521 	 *   This allows the receiver to restart by sending XON when it has
522 	 *   drained a bit. Here we use an arbitrary value of 1500 which will
523 	 *   restart after one full frame is pulled from the buffer. There
524 	 *   could be several smaller frames in the buffer and if so they will
525 	 *   not trigger the XON until their total number reduces the buffer
526 	 *   by 1500.
527 	 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
528 	 */
529 	rx_buffer_size = (pba & 0xffff) << 10;
530 	hw->fc.high_water = rx_buffer_size -
531 	    roundup2(sc->hw.mac.max_frame_size, 1024);
532 	/* 16-byte granularity */
533 	hw->fc.low_water = hw->fc.high_water - 16;
534 
535 	if (sc->fc) /* locally set flow control value? */
536 		hw->fc.requested_mode = sc->fc;
537 	else
538 		hw->fc.requested_mode = igc_fc_full;
539 
540 	hw->fc.pause_time = IGC_FC_PAUSE_TIME;
541 
542 	hw->fc.send_xon = true;
543 
544 	/* Issue a global reset */
545 	igc_reset_hw(hw);
546 	IGC_WRITE_REG(hw, IGC_WUC, 0);
547 
548 	/* and a re-init */
549 	if (igc_init_hw(hw) < 0) {
550 		printf(": Hardware Initialization Failed\n");
551 		return;
552 	}
553 
554 	/* Setup DMA Coalescing */
555 	igc_init_dmac(sc, pba);
556 
557 	IGC_WRITE_REG(hw, IGC_VET, ETHERTYPE_VLAN);
558 	igc_get_phy_info(hw);
559 	igc_check_for_link(hw);
560 }
561 
562 /*********************************************************************
563  *
564  *  Initialize the DMA Coalescing feature
565  *
566  **********************************************************************/
567 void
igc_init_dmac(struct igc_softc * sc,uint32_t pba)568 igc_init_dmac(struct igc_softc *sc, uint32_t pba)
569 {
570 	struct igc_hw *hw = &sc->hw;
571 	uint32_t dmac, reg = ~IGC_DMACR_DMAC_EN;
572 	uint16_t hwm, max_frame_size;
573 	int status;
574 
575 	max_frame_size = sc->hw.mac.max_frame_size;
576 
577 	if (sc->dmac == 0) { /* Disabling it */
578 		IGC_WRITE_REG(hw, IGC_DMACR, reg);
579 		return;
580 	} else
581 		printf(": DMA Coalescing enabled\n");
582 
583 	/* Set starting threshold */
584 	IGC_WRITE_REG(hw, IGC_DMCTXTH, 0);
585 
586 	hwm = 64 * pba - max_frame_size / 16;
587 	if (hwm < 64 * (pba - 6))
588 		hwm = 64 * (pba - 6);
589 	reg = IGC_READ_REG(hw, IGC_FCRTC);
590 	reg &= ~IGC_FCRTC_RTH_COAL_MASK;
591 	reg |= ((hwm << IGC_FCRTC_RTH_COAL_SHIFT)
592 		& IGC_FCRTC_RTH_COAL_MASK);
593 	IGC_WRITE_REG(hw, IGC_FCRTC, reg);
594 
595 	dmac = pba - max_frame_size / 512;
596 	if (dmac < pba - 10)
597 		dmac = pba - 10;
598 	reg = IGC_READ_REG(hw, IGC_DMACR);
599 	reg &= ~IGC_DMACR_DMACTHR_MASK;
600 	reg |= ((dmac << IGC_DMACR_DMACTHR_SHIFT)
601 		& IGC_DMACR_DMACTHR_MASK);
602 
603 	/* transition to L0x or L1 if available..*/
604 	reg |= (IGC_DMACR_DMAC_EN | IGC_DMACR_DMAC_LX_MASK);
605 
606 	/* Check if status is 2.5Gb backplane connection
607 	 * before configuration of watchdog timer, which is
608 	 * in msec values in 12.8usec intervals
609 	 * watchdog timer= msec values in 32usec intervals
610 	 * for non 2.5Gb connection
611 	 */
612 	status = IGC_READ_REG(hw, IGC_STATUS);
613 	if ((status & IGC_STATUS_2P5_SKU) &&
614 	    (!(status & IGC_STATUS_2P5_SKU_OVER)))
615 		reg |= ((sc->dmac * 5) >> 6);
616 	else
617 		reg |= (sc->dmac >> 5);
618 
619 	IGC_WRITE_REG(hw, IGC_DMACR, reg);
620 
621 	IGC_WRITE_REG(hw, IGC_DMCRTRH, 0);
622 
623 	/* Set the interval before transition */
624 	reg = IGC_READ_REG(hw, IGC_DMCTLX);
625 	reg |= IGC_DMCTLX_DCFLUSH_DIS;
626 
627 	/*
628 	** in 2.5Gb connection, TTLX unit is 0.4 usec
629 	** which is 0x4*2 = 0xA. But delay is still 4 usec
630 	*/
631 	status = IGC_READ_REG(hw, IGC_STATUS);
632 	if ((status & IGC_STATUS_2P5_SKU) &&
633 	    (!(status & IGC_STATUS_2P5_SKU_OVER)))
634 		reg |= 0xA;
635 	else
636 		reg |= 0x4;
637 
638 	IGC_WRITE_REG(hw, IGC_DMCTLX, reg);
639 
640 	/* free space in tx packet buffer to wake from DMA coal */
641 	IGC_WRITE_REG(hw, IGC_DMCTXTH, (IGC_TXPBSIZE -
642 	    (2 * max_frame_size)) >> 6);
643 
644 	/* make low power state decision controlled by DMA coal */
645 	reg = IGC_READ_REG(hw, IGC_PCIEMISC);
646 	reg &= ~IGC_PCIEMISC_LX_DECISION;
647 	IGC_WRITE_REG(hw, IGC_PCIEMISC, reg);
648 }
649 
650 int
igc_allocate_msix(struct igc_softc * sc)651 igc_allocate_msix(struct igc_softc *sc)
652 {
653 	struct igc_osdep *os = &sc->osdep;
654 	struct pci_attach_args *pa = &os->os_pa;
655 	struct igc_queue *iq;
656 	pci_intr_handle_t ih;
657 	int i, error = 0;
658 
659 	for (i = 0, iq = sc->queues; i < sc->sc_nqueues; i++, iq++) {
660 		if (pci_intr_map_msix(pa, i, &ih)) {
661 			printf("%s: unable to map msi-x vector %d\n",
662 			    DEVNAME(sc), i);
663 			error = ENOMEM;
664 			goto fail;
665 		}
666 
667 		iq->tag = pci_intr_establish_cpu(pa->pa_pc, ih,
668 		    IPL_NET | IPL_MPSAFE, intrmap_cpu(sc->sc_intrmap, i),
669 		    igc_intr_queue, iq, iq->name);
670 		if (iq->tag == NULL) {
671 			printf("%s: unable to establish interrupt %d\n",
672 			    DEVNAME(sc), i);
673 			error = ENOMEM;
674 			goto fail;
675 		}
676 
677 		iq->msix = i;
678 		iq->eims = 1 << i;
679 	}
680 
681 	/* Now the link status/control last MSI-X vector. */
682 	if (pci_intr_map_msix(pa, i, &ih)) {
683 		printf("%s: unable to map link vector\n", DEVNAME(sc));
684 		error = ENOMEM;
685 		goto fail;
686 	}
687 
688 	sc->tag = pci_intr_establish(pa->pa_pc, ih, IPL_NET | IPL_MPSAFE,
689 	    igc_intr_link, sc, sc->sc_dev.dv_xname);
690 	if (sc->tag == NULL) {
691 		printf("%s: unable to establish link interrupt\n", DEVNAME(sc));
692 		error = ENOMEM;
693 		goto fail;
694 	}
695 
696 	sc->linkvec = i;
697 	printf(", %s, %d queue%s", pci_intr_string(pa->pa_pc, ih),
698 	    i, (i > 1) ? "s" : "");
699 
700 	return 0;
701 fail:
702 	for (iq = sc->queues; i > 0; i--, iq++) {
703 		if (iq->tag == NULL)
704 			continue;
705 		pci_intr_disestablish(pa->pa_pc, iq->tag);
706 		iq->tag = NULL;
707 	}
708 
709 	return error;
710 }
711 
712 void
igc_setup_msix(struct igc_softc * sc)713 igc_setup_msix(struct igc_softc *sc)
714 {
715 	struct igc_osdep *os = &sc->osdep;
716 	struct pci_attach_args *pa = &os->os_pa;
717 	int nmsix;
718 
719 	nmsix = pci_intr_msix_count(pa);
720 	if (nmsix <= 1)
721 		printf(": not enough msi-x vectors\n");
722 
723 	/* Give one vector to events. */
724 	nmsix--;
725 
726 	sc->sc_intrmap = intrmap_create(&sc->sc_dev, nmsix, IGC_MAX_VECTORS,
727 	    INTRMAP_POWEROF2);
728 	sc->sc_nqueues = intrmap_count(sc->sc_intrmap);
729 }
730 
731 int
igc_dma_malloc(struct igc_softc * sc,bus_size_t size,struct igc_dma_alloc * dma)732 igc_dma_malloc(struct igc_softc *sc, bus_size_t size, struct igc_dma_alloc *dma)
733 {
734 	struct igc_osdep *os = &sc->osdep;
735 
736 	dma->dma_tag = os->os_pa.pa_dmat;
737 
738 	if (bus_dmamap_create(dma->dma_tag, size, 1, size, 0, BUS_DMA_NOWAIT,
739 	    &dma->dma_map))
740 		return 1;
741 	if (bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,
742 	    1, &dma->dma_nseg, BUS_DMA_NOWAIT))
743 		goto destroy;
744 	if (bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
745 	    &dma->dma_vaddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT))
746 		goto free;
747 	if (bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size,
748 	    NULL, BUS_DMA_NOWAIT))
749 		goto unmap;
750 
751 	dma->dma_size = size;
752 
753 	return 0;
754 unmap:
755 	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
756 free:
757 	bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
758 destroy:
759 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
760 	dma->dma_map = NULL;
761 	dma->dma_tag = NULL;
762 	return 1;
763 }
764 
765 void
igc_dma_free(struct igc_softc * sc,struct igc_dma_alloc * dma)766 igc_dma_free(struct igc_softc *sc, struct igc_dma_alloc *dma)
767 {
768 	if (dma->dma_tag == NULL)
769 		return;
770 
771 	if (dma->dma_map != NULL) {
772 		bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,
773 		    dma->dma_map->dm_mapsize,
774 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
775 		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
776 		bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
777 		bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
778 		bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
779 		dma->dma_map = NULL;
780 	}
781 }
782 
783 /*********************************************************************
784  *
785  *  Setup networking device structure and register an interface.
786  *
787  **********************************************************************/
788 void
igc_setup_interface(struct igc_softc * sc)789 igc_setup_interface(struct igc_softc *sc)
790 {
791 	struct ifnet *ifp = &sc->sc_ac.ac_if;
792 	int i;
793 
794 	ifp->if_softc = sc;
795 	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
796 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
797 	ifp->if_xflags = IFXF_MPSAFE;
798 	ifp->if_ioctl = igc_ioctl;
799 	ifp->if_qstart = igc_start;
800 	ifp->if_watchdog = igc_watchdog;
801 	ifp->if_hardmtu = sc->hw.mac.max_frame_size - ETHER_HDR_LEN -
802 	    ETHER_CRC_LEN;
803 	ifq_init_maxlen(&ifp->if_snd, sc->num_tx_desc - 1);
804 
805 	ifp->if_capabilities = IFCAP_VLAN_MTU;
806 
807 #if NVLAN > 0
808 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
809 #endif
810 
811 	ifp->if_capabilities |= IFCAP_CSUM_IPv4;
812 	ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
813 	ifp->if_capabilities |= IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6;
814 	ifp->if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6;
815 
816 	/* Initialize ifmedia structures. */
817 	ifmedia_init(&sc->media, IFM_IMASK, igc_media_change, igc_media_status);
818 	ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
819 	ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
820 	ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
821 	ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
822 	ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
823 	ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
824 	ifmedia_add(&sc->media, IFM_ETHER | IFM_2500_T, 0, NULL);
825 
826 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
827 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
828 
829 	if_attach(ifp);
830 	ether_ifattach(ifp);
831 
832 	if_attach_queues(ifp, sc->sc_nqueues);
833 	if_attach_iqueues(ifp, sc->sc_nqueues);
834 	for (i = 0; i < sc->sc_nqueues; i++) {
835 		struct ifqueue *ifq = ifp->if_ifqs[i];
836 		struct ifiqueue *ifiq = ifp->if_iqs[i];
837 		struct igc_txring *txr = &sc->tx_rings[i];
838 		struct igc_rxring *rxr = &sc->rx_rings[i];
839 
840 		ifq->ifq_softc = txr;
841 		txr->ifq = ifq;
842 
843 		ifiq->ifiq_softc = rxr;
844 		rxr->ifiq = ifiq;
845 	}
846 }
847 
848 void
igc_init(void * arg)849 igc_init(void *arg)
850 {
851 	struct igc_softc *sc = (struct igc_softc *)arg;
852 	struct ifnet *ifp = &sc->sc_ac.ac_if;
853 	struct igc_rxring *rxr;
854 	uint32_t ctrl = 0;
855 	int i, s;
856 
857 	s = splnet();
858 
859 	igc_stop(sc);
860 
861 	/* Get the latest mac address, user can use a LAA. */
862 	bcopy(sc->sc_ac.ac_enaddr, sc->hw.mac.addr, ETHER_ADDR_LEN);
863 
864 	/* Put the address into the receive address array. */
865 	igc_rar_set(&sc->hw, sc->hw.mac.addr, 0);
866 
867 	/* Initialize the hardware. */
868 	igc_reset(sc);
869 	igc_update_link_status(sc);
870 
871 	/* Setup VLAN support, basic and offload if available. */
872 	IGC_WRITE_REG(&sc->hw, IGC_VET, ETHERTYPE_VLAN);
873 
874 	/* Prepare transmit descriptors and buffers. */
875 	if (igc_setup_transmit_structures(sc)) {
876 		printf("%s: Could not setup transmit structures\n",
877 		    DEVNAME(sc));
878 		igc_stop(sc);
879 		splx(s);
880 		return;
881 	}
882 	igc_initialize_transmit_unit(sc);
883 
884 	sc->rx_mbuf_sz = MCLBYTES + ETHER_ALIGN;
885 	/* Prepare receive descriptors and buffers. */
886 	if (igc_setup_receive_structures(sc)) {
887 		printf("%s: Could not setup receive structures\n",
888 		    DEVNAME(sc));
889 		igc_stop(sc);
890 		splx(s);
891 		return;
892         }
893 	igc_initialize_receive_unit(sc);
894 
895 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) {
896 		ctrl = IGC_READ_REG(&sc->hw, IGC_CTRL);
897 		ctrl |= IGC_CTRL_VME;
898 		IGC_WRITE_REG(&sc->hw, IGC_CTRL, ctrl);
899 	}
900 
901 	/* Setup multicast table. */
902 	igc_iff(sc);
903 
904 	igc_clear_hw_cntrs_base_generic(&sc->hw);
905 
906 	igc_configure_queues(sc);
907 
908 	/* This clears any pending interrupts */
909 	IGC_READ_REG(&sc->hw, IGC_ICR);
910 	IGC_WRITE_REG(&sc->hw, IGC_ICS, IGC_ICS_LSC);
911 
912 	/* The driver can now take control from firmware. */
913 	igc_get_hw_control(sc);
914 
915 	/* Set Energy Efficient Ethernet. */
916 	igc_set_eee_i225(&sc->hw, true, true, true);
917 
918 	for (i = 0; i < sc->sc_nqueues; i++) {
919 		rxr = &sc->rx_rings[i];
920 		igc_rxfill(rxr);
921 		if (if_rxr_inuse(&rxr->rx_ring) == 0) {
922 			printf("%s: Unable to fill any rx descriptors\n",
923 			    DEVNAME(sc));
924 			igc_stop(sc);
925 			splx(s);
926 		}
927 		IGC_WRITE_REG(&sc->hw, IGC_RDT(i),
928 		    (rxr->last_desc_filled + 1) % sc->num_rx_desc);
929 	}
930 
931 	igc_enable_intr(sc);
932 
933 	ifp->if_flags |= IFF_RUNNING;
934 	for (i = 0; i < sc->sc_nqueues; i++)
935 		ifq_clr_oactive(ifp->if_ifqs[i]);
936 
937 	splx(s);
938 }
939 
940 static inline int
igc_load_mbuf(bus_dma_tag_t dmat,bus_dmamap_t map,struct mbuf * m)941 igc_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m)
942 {
943 	int error;
944 
945 	error = bus_dmamap_load_mbuf(dmat, map, m,
946 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT);
947 	if (error != EFBIG)
948 		return (error);
949 
950 	error = m_defrag(m, M_DONTWAIT);
951 	if (error != 0)
952 		return (error);
953 
954 	return (bus_dmamap_load_mbuf(dmat, map, m,
955 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT));
956 }
957 
958 void
igc_start(struct ifqueue * ifq)959 igc_start(struct ifqueue *ifq)
960 {
961 	struct ifnet *ifp = ifq->ifq_if;
962 	struct igc_softc *sc = ifp->if_softc;
963 	struct igc_txring *txr = ifq->ifq_softc;
964 	union igc_adv_tx_desc *txdesc;
965 	struct igc_tx_buf *txbuf;
966 	bus_dmamap_t map;
967 	struct mbuf *m;
968 	unsigned int prod, free, last, i;
969 	unsigned int mask;
970 	uint32_t cmd_type_len;
971 	uint32_t olinfo_status;
972 	int post = 0;
973 #if NBPFILTER > 0
974 	caddr_t if_bpf;
975 #endif
976 
977 	if (!sc->link_active) {
978 		ifq_purge(ifq);
979 		return;
980 	}
981 
982 	prod = txr->next_avail_desc;
983 	free = txr->next_to_clean;
984 	if (free <= prod)
985 		free += sc->num_tx_desc;
986 	free -= prod;
987 
988 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,
989 	    txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
990 
991 	mask = sc->num_tx_desc - 1;
992 
993 	for (;;) {
994 		if (free <= IGC_MAX_SCATTER + 1) {
995 			ifq_set_oactive(ifq);
996 			break;
997 		}
998 
999 		m = ifq_dequeue(ifq);
1000 		if (m == NULL)
1001 			break;
1002 
1003 		txbuf = &txr->tx_buffers[prod];
1004 		map = txbuf->map;
1005 
1006 		if (igc_load_mbuf(txr->txdma.dma_tag, map, m) != 0) {
1007 			ifq->ifq_errors++;
1008 			m_freem(m);
1009 			continue;
1010 		}
1011 
1012 		olinfo_status = m->m_pkthdr.len << IGC_ADVTXD_PAYLEN_SHIFT;
1013 
1014 		bus_dmamap_sync(txr->txdma.dma_tag, map, 0,
1015 		    map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1016 
1017 		cmd_type_len = IGC_ADVTXD_DCMD_IFCS | IGC_ADVTXD_DTYP_DATA |
1018 		    IGC_ADVTXD_DCMD_DEXT;
1019 
1020 		if (igc_tx_ctx_setup(txr, m, prod, &cmd_type_len,
1021 		    &olinfo_status)) {
1022 			/* Consume the first descriptor */
1023 			prod++;
1024 			prod &= mask;
1025 			free--;
1026 		}
1027 
1028 		for (i = 0; i < map->dm_nsegs; i++) {
1029 			txdesc = &txr->tx_base[prod];
1030 
1031 			CLR(cmd_type_len, IGC_ADVTXD_DTALEN_MASK);
1032 			cmd_type_len |= map->dm_segs[i].ds_len;
1033 			if (i == map->dm_nsegs - 1)
1034 				cmd_type_len |= IGC_ADVTXD_DCMD_EOP |
1035 				    IGC_ADVTXD_DCMD_RS;
1036 
1037 			htolem64(&txdesc->read.buffer_addr,
1038 			    map->dm_segs[i].ds_addr);
1039 			htolem32(&txdesc->read.cmd_type_len, cmd_type_len);
1040 			htolem32(&txdesc->read.olinfo_status, olinfo_status);
1041 
1042 			last = prod;
1043 
1044 			prod++;
1045 			prod &= mask;
1046 		}
1047 
1048 		txbuf->m_head = m;
1049 		txbuf->eop_index = last;
1050 
1051 #if NBPFILTER > 0
1052 		if_bpf = ifp->if_bpf;
1053 		if (if_bpf)
1054 			bpf_mtap_ether(if_bpf, m, BPF_DIRECTION_OUT);
1055 #endif
1056 
1057 		free -= i;
1058 		post = 1;
1059 	}
1060 
1061 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,
1062 	    txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1063 
1064 	if (post) {
1065 		txr->next_avail_desc = prod;
1066 		IGC_WRITE_REG(&sc->hw, IGC_TDT(txr->me), prod);
1067 	}
1068 }
1069 
1070 int
igc_txeof(struct igc_txring * txr)1071 igc_txeof(struct igc_txring *txr)
1072 {
1073 	struct igc_softc *sc = txr->sc;
1074 	struct ifqueue *ifq = txr->ifq;
1075 	union igc_adv_tx_desc *txdesc;
1076 	struct igc_tx_buf *txbuf;
1077 	bus_dmamap_t map;
1078 	unsigned int cons, prod, last;
1079 	unsigned int mask;
1080 	int done = 0;
1081 
1082 	prod = txr->next_avail_desc;
1083 	cons = txr->next_to_clean;
1084 
1085 	if (cons == prod)
1086 		return (0);
1087 
1088 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,
1089 	    txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1090 
1091 	mask = sc->num_tx_desc - 1;
1092 
1093 	do {
1094 		txbuf = &txr->tx_buffers[cons];
1095 		last = txbuf->eop_index;
1096 		txdesc = &txr->tx_base[last];
1097 
1098 		if (!(txdesc->wb.status & htole32(IGC_TXD_STAT_DD)))
1099 			break;
1100 
1101 		map = txbuf->map;
1102 
1103 		bus_dmamap_sync(txr->txdma.dma_tag, map, 0, map->dm_mapsize,
1104 		    BUS_DMASYNC_POSTWRITE);
1105 		bus_dmamap_unload(txr->txdma.dma_tag, map);
1106 		m_freem(txbuf->m_head);
1107 
1108 		txbuf->m_head = NULL;
1109 		txbuf->eop_index = -1;
1110 
1111 		cons = last + 1;
1112 		cons &= mask;
1113 
1114 		done = 1;
1115 	} while (cons != prod);
1116 
1117 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,
1118 	    txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1119 
1120 	txr->next_to_clean = cons;
1121 
1122 	if (ifq_is_oactive(ifq))
1123 		ifq_restart(ifq);
1124 
1125 	return (done);
1126 }
1127 
1128 /*********************************************************************
1129  *
1130  *  This routine disables all traffic on the adapter by issuing a
1131  *  global reset on the MAC.
1132  *
1133  **********************************************************************/
1134 void
igc_stop(struct igc_softc * sc)1135 igc_stop(struct igc_softc *sc)
1136 {
1137 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1138 	int i;
1139 
1140 	/* Tell the stack that the interface is no longer active. */
1141         ifp->if_flags &= ~IFF_RUNNING;
1142 
1143 	igc_disable_intr(sc);
1144 
1145 	igc_reset_hw(&sc->hw);
1146 	IGC_WRITE_REG(&sc->hw, IGC_WUC, 0);
1147 
1148 	intr_barrier(sc->tag);
1149         for (i = 0; i < sc->sc_nqueues; i++) {
1150                 struct ifqueue *ifq = ifp->if_ifqs[i];
1151                 ifq_barrier(ifq);
1152                 ifq_clr_oactive(ifq);
1153 
1154                 if (sc->queues[i].tag != NULL)
1155                         intr_barrier(sc->queues[i].tag);
1156                 timeout_del(&sc->rx_rings[i].rx_refill);
1157         }
1158 
1159         igc_free_transmit_structures(sc);
1160         igc_free_receive_structures(sc);
1161 
1162 	igc_update_link_status(sc);
1163 }
1164 
1165 /*********************************************************************
1166  *  Ioctl entry point
1167  *
1168  *  igc_ioctl is called when the user wants to configure the
1169  *  interface.
1170  *
1171  *  return 0 on success, positive on failure
1172  **********************************************************************/
1173 int
igc_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)1174 igc_ioctl(struct ifnet * ifp, u_long cmd, caddr_t data)
1175 {
1176 	struct igc_softc *sc = ifp->if_softc;
1177 	struct ifreq *ifr = (struct ifreq *)data;
1178 	int s, error = 0;
1179 
1180 	s = splnet();
1181 
1182 	switch (cmd) {
1183 	case SIOCSIFADDR:
1184 		ifp->if_flags |= IFF_UP;
1185 		if (!(ifp->if_flags & IFF_RUNNING))
1186 			igc_init(sc);
1187 		break;
1188 	case SIOCSIFFLAGS:
1189 		if (ifp->if_flags & IFF_UP) {
1190 			if (ifp->if_flags & IFF_RUNNING)
1191 				error = ENETRESET;
1192 			else
1193 				igc_init(sc);
1194 		} else {
1195 			if (ifp->if_flags & IFF_RUNNING)
1196 				igc_stop(sc);
1197 		}
1198 		break;
1199 	case SIOCSIFMEDIA:
1200 	case SIOCGIFMEDIA:
1201 		error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
1202 		break;
1203 	case SIOCGIFRXR:
1204 		error = igc_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
1205 		break;
1206 	default:
1207 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
1208 	}
1209 
1210 	if (error == ENETRESET) {
1211 		if (ifp->if_flags & IFF_RUNNING) {
1212 			igc_disable_intr(sc);
1213 			igc_iff(sc);
1214 			igc_enable_intr(sc);
1215 		}
1216 		error = 0;
1217 	}
1218 
1219 	splx(s);
1220 	return error;
1221 }
1222 
1223 int
igc_rxrinfo(struct igc_softc * sc,struct if_rxrinfo * ifri)1224 igc_rxrinfo(struct igc_softc *sc, struct if_rxrinfo *ifri)
1225 {
1226 	struct if_rxring_info *ifr;
1227 	struct igc_rxring *rxr;
1228 	int error, i, n = 0;
1229 
1230 	ifr = mallocarray(sc->sc_nqueues, sizeof(*ifr), M_DEVBUF,
1231 	    M_WAITOK | M_ZERO);
1232 
1233 	for (i = 0; i < sc->sc_nqueues; i++) {
1234 		rxr = &sc->rx_rings[i];
1235 		ifr[n].ifr_size = MCLBYTES;
1236 		snprintf(ifr[n].ifr_name, sizeof(ifr[n].ifr_name), "%d", i);
1237 		ifr[n].ifr_info = rxr->rx_ring;
1238 		n++;
1239 	}
1240 
1241 	error = if_rxr_info_ioctl(ifri, sc->sc_nqueues, ifr);
1242 	free(ifr, M_DEVBUF, sc->sc_nqueues * sizeof(*ifr));
1243 
1244 	return error;
1245 }
1246 
1247 int
igc_rxfill(struct igc_rxring * rxr)1248 igc_rxfill(struct igc_rxring *rxr)
1249 {
1250 	struct igc_softc *sc = rxr->sc;
1251 	int i, post = 0;
1252 	u_int slots;
1253 
1254 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,
1255 	    rxr->rxdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1256 
1257 	i = rxr->last_desc_filled;
1258 	for (slots = if_rxr_get(&rxr->rx_ring, sc->num_rx_desc); slots > 0;
1259 	    slots--) {
1260 		if (++i == sc->num_rx_desc)
1261 			i = 0;
1262 
1263 		if (igc_get_buf(rxr, i) != 0)
1264 			break;
1265 
1266 		rxr->last_desc_filled = i;
1267 		post = 1;
1268 	}
1269 
1270 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,
1271 	    rxr->rxdma.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1272 
1273 	if_rxr_put(&rxr->rx_ring, slots);
1274 
1275 	return post;
1276 }
1277 
1278 void
igc_rxrefill(void * xrxr)1279 igc_rxrefill(void *xrxr)
1280 {
1281 	struct igc_rxring *rxr = xrxr;
1282 	struct igc_softc *sc = rxr->sc;
1283 
1284 	if (igc_rxfill(rxr)) {
1285 		IGC_WRITE_REG(&sc->hw, IGC_RDT(rxr->me),
1286 		    (rxr->last_desc_filled + 1) % sc->num_rx_desc);
1287 	}
1288 	else if (if_rxr_inuse(&rxr->rx_ring) == 0)
1289 		timeout_add(&rxr->rx_refill, 1);
1290 }
1291 
1292 /*********************************************************************
1293  *
1294  *  This routine executes in interrupt context. It replenishes
1295  *  the mbufs in the descriptor and sends data which has been
1296  *  dma'ed into host memory to upper layer.
1297  *
1298  *********************************************************************/
1299 int
igc_rxeof(struct igc_rxring * rxr)1300 igc_rxeof(struct igc_rxring *rxr)
1301 {
1302 	struct igc_softc *sc = rxr->sc;
1303 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1304 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1305 	struct mbuf *mp, *m;
1306 	struct igc_rx_buf *rxbuf, *nxbuf;
1307 	union igc_adv_rx_desc *rxdesc;
1308 	uint32_t ptype, staterr = 0;
1309 	uint16_t len, vtag;
1310 	uint8_t eop = 0;
1311 	int i, nextp;
1312 
1313 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
1314 		return 0;
1315 
1316 	i = rxr->next_to_check;
1317 	while (if_rxr_inuse(&rxr->rx_ring) > 0) {
1318 		uint32_t hash;
1319 		uint16_t hashtype;
1320 
1321 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
1322 		    i * sizeof(union igc_adv_rx_desc),
1323 		    sizeof(union igc_adv_rx_desc), BUS_DMASYNC_POSTREAD);
1324 
1325 		rxdesc = &rxr->rx_base[i];
1326 		staterr = letoh32(rxdesc->wb.upper.status_error);
1327 		if (!ISSET(staterr, IGC_RXD_STAT_DD)) {
1328 			bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
1329 			    i * sizeof(union igc_adv_rx_desc),
1330 			    sizeof(union igc_adv_rx_desc), BUS_DMASYNC_PREREAD);
1331 			break;
1332 		}
1333 
1334 		/* Zero out the receive descriptors status. */
1335 		rxdesc->wb.upper.status_error = 0;
1336 		rxbuf = &rxr->rx_buffers[i];
1337 
1338 		/* Pull the mbuf off the ring. */
1339 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 0,
1340 		    rxbuf->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1341 		bus_dmamap_unload(rxr->rxdma.dma_tag, rxbuf->map);
1342 
1343 		mp = rxbuf->buf;
1344 		len = letoh16(rxdesc->wb.upper.length);
1345 		vtag = letoh16(rxdesc->wb.upper.vlan);
1346 		eop = ((staterr & IGC_RXD_STAT_EOP) == IGC_RXD_STAT_EOP);
1347 		ptype = letoh32(rxdesc->wb.lower.lo_dword.data) &
1348 		    IGC_PKTTYPE_MASK;
1349 		hash = letoh32(rxdesc->wb.lower.hi_dword.rss);
1350 		hashtype = le16toh(rxdesc->wb.lower.lo_dword.hs_rss.pkt_info) &
1351 		    IGC_RXDADV_RSSTYPE_MASK;
1352 
1353 		if (staterr & IGC_RXDEXT_STATERR_RXE) {
1354 			if (rxbuf->fmp) {
1355 				m_freem(rxbuf->fmp);
1356 				rxbuf->fmp = NULL;
1357 			}
1358 
1359 			m_freem(mp);
1360 			rxbuf->buf = NULL;
1361 			goto next_desc;
1362 		}
1363 
1364 		if (mp == NULL) {
1365 			panic("%s: igc_rxeof: NULL mbuf in slot %d "
1366 			    "(nrx %d, filled %d)", DEVNAME(sc), i,
1367 			    if_rxr_inuse(&rxr->rx_ring), rxr->last_desc_filled);
1368 		}
1369 
1370 		if (!eop) {
1371 			/*
1372 			 * Figure out the next descriptor of this frame.
1373 			 */
1374 			nextp = i + 1;
1375 			if (nextp == sc->num_rx_desc)
1376 				nextp = 0;
1377 			nxbuf = &rxr->rx_buffers[nextp];
1378 			/* prefetch(nxbuf); */
1379 		}
1380 
1381 		mp->m_len = len;
1382 
1383 		m = rxbuf->fmp;
1384 		rxbuf->buf = rxbuf->fmp = NULL;
1385 
1386 		if (m != NULL)
1387 			m->m_pkthdr.len += mp->m_len;
1388 		else {
1389 			m = mp;
1390 			m->m_pkthdr.len = mp->m_len;
1391 #if NVLAN > 0
1392 			if (staterr & IGC_RXD_STAT_VP) {
1393 				m->m_pkthdr.ether_vtag = vtag;
1394 				m->m_flags |= M_VLANTAG;
1395 			}
1396 #endif
1397 		}
1398 
1399 		/* Pass the head pointer on */
1400 		if (eop == 0) {
1401 			nxbuf->fmp = m;
1402 			m = NULL;
1403 			mp->m_next = nxbuf->buf;
1404 		} else {
1405 			igc_rx_checksum(staterr, m, ptype);
1406 
1407 			if (hashtype != IGC_RXDADV_RSSTYPE_NONE) {
1408 				m->m_pkthdr.ph_flowid = hash;
1409 				SET(m->m_pkthdr.csum_flags, M_FLOWID);
1410 			}
1411 
1412 			ml_enqueue(&ml, m);
1413 		}
1414 next_desc:
1415 		if_rxr_put(&rxr->rx_ring, 1);
1416 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
1417 		    i * sizeof(union igc_adv_rx_desc),
1418 		    sizeof(union igc_adv_rx_desc), BUS_DMASYNC_PREREAD);
1419 
1420 		/* Advance our pointers to the next descriptor. */
1421 		if (++i == sc->num_rx_desc)
1422 			i = 0;
1423 	}
1424 	rxr->next_to_check = i;
1425 
1426 	if (ifiq_input(rxr->ifiq, &ml))
1427 		if_rxr_livelocked(&rxr->rx_ring);
1428 
1429 	if (!(staterr & IGC_RXD_STAT_DD))
1430 		return 0;
1431 
1432 	return 1;
1433 }
1434 
1435 /*********************************************************************
1436  *
1437  *  Verify that the hardware indicated that the checksum is valid.
1438  *  Inform the stack about the status of checksum so that stack
1439  *  doesn't spend time verifying the checksum.
1440  *
1441  *********************************************************************/
1442 void
igc_rx_checksum(uint32_t staterr,struct mbuf * m,uint32_t ptype)1443 igc_rx_checksum(uint32_t staterr, struct mbuf *m, uint32_t ptype)
1444 {
1445 	uint16_t status = (uint16_t)staterr;
1446 	uint8_t errors = (uint8_t)(staterr >> 24);
1447 
1448 	if (status & IGC_RXD_STAT_IPCS) {
1449 		if (!(errors & IGC_RXD_ERR_IPE)) {
1450 			/* IP Checksum Good */
1451 			m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
1452 		} else
1453 			m->m_pkthdr.csum_flags = 0;
1454 	}
1455 
1456 	if (status & (IGC_RXD_STAT_TCPCS | IGC_RXD_STAT_UDPCS)) {
1457 		if (!(errors & IGC_RXD_ERR_TCPE))
1458 			m->m_pkthdr.csum_flags |=
1459 			    M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
1460 	}
1461 }
1462 
1463 void
igc_watchdog(struct ifnet * ifp)1464 igc_watchdog(struct ifnet * ifp)
1465 {
1466 }
1467 
1468 /*********************************************************************
1469  *
1470  *  Media Ioctl callback
1471  *
1472  *  This routine is called whenever the user queries the status of
1473  *  the interface using ifconfig.
1474  *
1475  **********************************************************************/
1476 void
igc_media_status(struct ifnet * ifp,struct ifmediareq * ifmr)1477 igc_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1478 {
1479 	struct igc_softc *sc = ifp->if_softc;
1480 
1481 	igc_update_link_status(sc);
1482 
1483 	ifmr->ifm_status = IFM_AVALID;
1484 	ifmr->ifm_active = IFM_ETHER;
1485 
1486 	if (!sc->link_active) {
1487 		ifmr->ifm_active |= IFM_NONE;
1488 		return;
1489 	}
1490 
1491 	ifmr->ifm_status |= IFM_ACTIVE;
1492 
1493 	switch (sc->link_speed) {
1494 	case 10:
1495 		ifmr->ifm_active |= IFM_10_T;
1496 		break;
1497 	case 100:
1498 		ifmr->ifm_active |= IFM_100_TX;
1499                 break;
1500 	case 1000:
1501 		ifmr->ifm_active |= IFM_1000_T;
1502 		break;
1503 	case 2500:
1504                 ifmr->ifm_active |= IFM_2500_T;
1505                 break;
1506 	}
1507 
1508 	if (sc->link_duplex == FULL_DUPLEX)
1509 		ifmr->ifm_active |= IFM_FDX;
1510 	else
1511 		ifmr->ifm_active |= IFM_HDX;
1512 
1513 	switch (sc->hw.fc.current_mode) {
1514 	case igc_fc_tx_pause:
1515 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
1516 		break;
1517 	case igc_fc_rx_pause:
1518 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
1519 		break;
1520 	case igc_fc_full:
1521 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE |
1522 		    IFM_ETH_TXPAUSE;
1523 		break;
1524 	default:
1525 		ifmr->ifm_active &= ~(IFM_FLOW | IFM_ETH_RXPAUSE |
1526 		    IFM_ETH_TXPAUSE);
1527 		break;
1528 	}
1529 }
1530 
1531 /*********************************************************************
1532  *
1533  *  Media Ioctl callback
1534  *
1535  *  This routine is called when the user changes speed/duplex using
1536  *  media/mediopt option with ifconfig.
1537  *
1538  **********************************************************************/
1539 int
igc_media_change(struct ifnet * ifp)1540 igc_media_change(struct ifnet *ifp)
1541 {
1542 	struct igc_softc *sc = ifp->if_softc;
1543 	struct ifmedia *ifm = &sc->media;
1544 
1545 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1546 		return (EINVAL);
1547 
1548 	sc->hw.mac.autoneg = DO_AUTO_NEG;
1549 
1550 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1551 	case IFM_AUTO:
1552 		sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1553 		break;
1554         case IFM_2500_T:
1555                 sc->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL;
1556                 break;
1557 	case IFM_1000_T:
1558 		sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1559 		break;
1560 	case IFM_100_TX:
1561 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1562 			sc->hw.phy.autoneg_advertised = ADVERTISE_100_FULL;
1563 		else
1564 			sc->hw.phy.autoneg_advertised = ADVERTISE_100_HALF;
1565 		break;
1566 	case IFM_10_T:
1567 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1568 			sc->hw.phy.autoneg_advertised = ADVERTISE_10_FULL;
1569 		else
1570 			sc->hw.phy.autoneg_advertised = ADVERTISE_10_HALF;
1571 		break;
1572 	default:
1573 		return EINVAL;
1574 	}
1575 
1576 	igc_init(sc);
1577 
1578 	return 0;
1579 }
1580 
1581 void
igc_iff(struct igc_softc * sc)1582 igc_iff(struct igc_softc *sc)
1583 {
1584 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1585         struct arpcom *ac = &sc->sc_ac;
1586 	struct ether_multi *enm;
1587 	struct ether_multistep step;
1588 	uint32_t reg_rctl = 0;
1589 	uint8_t *mta;
1590 	int mcnt = 0;
1591 
1592 	mta = sc->mta;
1593         bzero(mta, sizeof(uint8_t) * ETHER_ADDR_LEN *
1594 	    MAX_NUM_MULTICAST_ADDRESSES);
1595 
1596 	reg_rctl = IGC_READ_REG(&sc->hw, IGC_RCTL);
1597 	reg_rctl &= ~(IGC_RCTL_UPE | IGC_RCTL_MPE);
1598 	ifp->if_flags &= ~IFF_ALLMULTI;
1599 
1600 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
1601 	    ac->ac_multicnt > MAX_NUM_MULTICAST_ADDRESSES) {
1602 		ifp->if_flags |= IFF_ALLMULTI;
1603 		reg_rctl |= IGC_RCTL_MPE;
1604 		if (ifp->if_flags & IFF_PROMISC)
1605 			reg_rctl |= IGC_RCTL_UPE;
1606 	} else {
1607 		ETHER_FIRST_MULTI(step, ac, enm);
1608 		while (enm != NULL) {
1609 			bcopy(enm->enm_addrlo,
1610 			    &mta[mcnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN);
1611 			mcnt++;
1612 
1613 			ETHER_NEXT_MULTI(step, enm);
1614 		}
1615 
1616 		igc_update_mc_addr_list(&sc->hw, mta, mcnt);
1617 	}
1618 
1619 	IGC_WRITE_REG(&sc->hw, IGC_RCTL, reg_rctl);
1620 }
1621 
1622 void
igc_update_link_status(struct igc_softc * sc)1623 igc_update_link_status(struct igc_softc *sc)
1624 {
1625 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1626 	struct igc_hw *hw = &sc->hw;
1627 	int link_state;
1628 
1629 	if (hw->mac.get_link_status == true)
1630 		igc_check_for_link(hw);
1631 
1632 	if (IGC_READ_REG(&sc->hw, IGC_STATUS) & IGC_STATUS_LU) {
1633 		if (sc->link_active == 0) {
1634 			igc_get_speed_and_duplex(hw, &sc->link_speed,
1635 			    &sc->link_duplex);
1636 			sc->link_active = 1;
1637 			ifp->if_baudrate = IF_Mbps(sc->link_speed);
1638 		}
1639 		link_state = (sc->link_duplex == FULL_DUPLEX) ?
1640 		    LINK_STATE_FULL_DUPLEX : LINK_STATE_HALF_DUPLEX;
1641 	} else {
1642 		if (sc->link_active == 1) {
1643 			ifp->if_baudrate = sc->link_speed = 0;
1644 			sc->link_duplex = 0;
1645 			sc->link_active = 0;
1646 		}
1647 		link_state = LINK_STATE_DOWN;
1648 	}
1649 	if (ifp->if_link_state != link_state) {
1650 		ifp->if_link_state = link_state;
1651 		if_link_state_change(ifp);
1652 	}
1653 }
1654 
1655 /*********************************************************************
1656  *
1657  *  Get a buffer from system mbuf buffer pool.
1658  *
1659  **********************************************************************/
1660 int
igc_get_buf(struct igc_rxring * rxr,int i)1661 igc_get_buf(struct igc_rxring *rxr, int i)
1662 {
1663 	struct igc_softc *sc = rxr->sc;
1664 	struct igc_rx_buf *rxbuf;
1665 	struct mbuf *m;
1666 	union igc_adv_rx_desc *rxdesc;
1667 	int error;
1668 
1669 	rxbuf = &rxr->rx_buffers[i];
1670 	rxdesc = &rxr->rx_base[i];
1671 	if (rxbuf->buf) {
1672 		printf("%s: slot %d already has an mbuf\n", DEVNAME(sc), i);
1673 		return ENOBUFS;
1674 	}
1675 
1676 	m = MCLGETL(NULL, M_DONTWAIT, sc->rx_mbuf_sz);
1677 	if (!m)
1678 		return ENOBUFS;
1679 
1680 	m->m_data += (m->m_ext.ext_size - sc->rx_mbuf_sz);
1681 	m->m_len = m->m_pkthdr.len = sc->rx_mbuf_sz;
1682 
1683 	error = bus_dmamap_load_mbuf(rxr->rxdma.dma_tag, rxbuf->map, m,
1684 	    BUS_DMA_NOWAIT);
1685 	if (error) {
1686 		m_freem(m);
1687 		return error;
1688 	}
1689 
1690 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 0,
1691 	    rxbuf->map->dm_mapsize, BUS_DMASYNC_PREREAD);
1692 	rxbuf->buf = m;
1693 
1694 	rxdesc->read.pkt_addr = htole64(rxbuf->map->dm_segs[0].ds_addr);
1695 
1696 	return 0;
1697 }
1698 
1699 void
igc_configure_queues(struct igc_softc * sc)1700 igc_configure_queues(struct igc_softc *sc)
1701 {
1702 	struct igc_hw *hw = &sc->hw;
1703 	struct igc_queue *iq = sc->queues;
1704 	uint32_t ivar, newitr = 0;
1705 	int i;
1706 
1707 	/* First turn on RSS capability */
1708 	IGC_WRITE_REG(hw, IGC_GPIE, IGC_GPIE_MSIX_MODE | IGC_GPIE_EIAME |
1709 	    IGC_GPIE_PBA | IGC_GPIE_NSICR);
1710 
1711 	/* Set the starting interrupt rate */
1712 	newitr = (4000000 / MAX_INTS_PER_SEC) & 0x7FFC;
1713 
1714 	newitr |= IGC_EITR_CNT_IGNR;
1715 
1716 	/* Turn on MSI-X */
1717 	for (i = 0; i < sc->sc_nqueues; i++, iq++) {
1718 		/* RX entries */
1719 		igc_set_queues(sc, i, iq->msix, 0);
1720 		/* TX entries */
1721 		igc_set_queues(sc, i, iq->msix, 1);
1722 		sc->msix_queuesmask |= iq->eims;
1723 		IGC_WRITE_REG(hw, IGC_EITR(iq->msix), newitr);
1724 	}
1725 
1726 	/* And for the link interrupt */
1727 	ivar = (sc->linkvec | IGC_IVAR_VALID) << 8;
1728 	sc->msix_linkmask = 1 << sc->linkvec;
1729 	IGC_WRITE_REG(hw, IGC_IVAR_MISC, ivar);
1730 }
1731 
1732 void
igc_set_queues(struct igc_softc * sc,uint32_t entry,uint32_t vector,int type)1733 igc_set_queues(struct igc_softc *sc, uint32_t entry, uint32_t vector, int type)
1734 {
1735 	struct igc_hw *hw = &sc->hw;
1736 	uint32_t ivar, index;
1737 
1738 	index = entry >> 1;
1739 	ivar = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, index);
1740 	if (type) {
1741 		if (entry & 1) {
1742 			ivar &= 0x00FFFFFF;
1743 			ivar |= (vector | IGC_IVAR_VALID) << 24;
1744 		} else {
1745 			ivar &= 0xFFFF00FF;
1746 			ivar |= (vector | IGC_IVAR_VALID) << 8;
1747 		}
1748 	} else {
1749 		if (entry & 1) {
1750 			ivar &= 0xFF00FFFF;
1751 			ivar |= (vector | IGC_IVAR_VALID) << 16;
1752 		} else {
1753 			ivar &= 0xFFFFFF00;
1754 			ivar |= vector | IGC_IVAR_VALID;
1755 		}
1756 	}
1757 	IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, index, ivar);
1758 }
1759 
1760 void
igc_enable_queue(struct igc_softc * sc,uint32_t eims)1761 igc_enable_queue(struct igc_softc *sc, uint32_t eims)
1762 {
1763 	IGC_WRITE_REG(&sc->hw, IGC_EIMS, eims);
1764 }
1765 
1766 void
igc_enable_intr(struct igc_softc * sc)1767 igc_enable_intr(struct igc_softc *sc)
1768 {
1769 	struct igc_hw *hw = &sc->hw;
1770 	uint32_t mask;
1771 
1772 	mask = (sc->msix_queuesmask | sc->msix_linkmask);
1773 	IGC_WRITE_REG(hw, IGC_EIAC, mask);
1774 	IGC_WRITE_REG(hw, IGC_EIAM, mask);
1775 	IGC_WRITE_REG(hw, IGC_EIMS, mask);
1776 	IGC_WRITE_REG(hw, IGC_IMS, IGC_IMS_LSC);
1777 	IGC_WRITE_FLUSH(hw);
1778 }
1779 
1780 void
igc_disable_intr(struct igc_softc * sc)1781 igc_disable_intr(struct igc_softc *sc)
1782 {
1783 	struct igc_hw *hw = &sc->hw;
1784 
1785 	IGC_WRITE_REG(hw, IGC_EIMC, 0xffffffff);
1786 	IGC_WRITE_REG(hw, IGC_EIAC, 0);
1787 	IGC_WRITE_REG(hw, IGC_IMC, 0xffffffff);
1788 	IGC_WRITE_FLUSH(hw);
1789 }
1790 
1791 int
igc_intr_link(void * arg)1792 igc_intr_link(void *arg)
1793 {
1794 	struct igc_softc *sc = (struct igc_softc *)arg;
1795 	uint32_t reg_icr = IGC_READ_REG(&sc->hw, IGC_ICR);
1796 
1797 	if (reg_icr & IGC_ICR_LSC) {
1798 		KERNEL_LOCK();
1799 		sc->hw.mac.get_link_status = true;
1800 		igc_update_link_status(sc);
1801 		KERNEL_UNLOCK();
1802 	}
1803 
1804 	IGC_WRITE_REG(&sc->hw, IGC_IMS, IGC_IMS_LSC);
1805 	IGC_WRITE_REG(&sc->hw, IGC_EIMS, sc->msix_linkmask);
1806 
1807 	return 1;
1808 }
1809 
1810 int
igc_intr_queue(void * arg)1811 igc_intr_queue(void *arg)
1812 {
1813 	struct igc_queue *iq = arg;
1814 	struct igc_softc *sc = iq->sc;
1815 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1816 	struct igc_rxring *rxr = iq->rxr;
1817 	struct igc_txring *txr = iq->txr;
1818 
1819 	if (ifp->if_flags & IFF_RUNNING) {
1820 		igc_txeof(txr);
1821 		igc_rxeof(rxr);
1822 		igc_rxrefill(rxr);
1823 	}
1824 
1825 	igc_enable_queue(sc, iq->eims);
1826 
1827 	return 1;
1828 }
1829 
1830 /*********************************************************************
1831  *
1832  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
1833  *  the information needed to transmit a packet on the wire.
1834  *
1835  **********************************************************************/
1836 int
igc_allocate_transmit_buffers(struct igc_txring * txr)1837 igc_allocate_transmit_buffers(struct igc_txring *txr)
1838 {
1839 	struct igc_softc *sc = txr->sc;
1840 	struct igc_tx_buf *txbuf;
1841 	int error, i;
1842 
1843 	txr->tx_buffers = mallocarray(sc->num_tx_desc,
1844 	    sizeof(struct igc_tx_buf), M_DEVBUF, M_NOWAIT | M_ZERO);
1845 	if (txr->tx_buffers == NULL) {
1846 		printf("%s: Unable to allocate tx_buffer memory\n",
1847 		    DEVNAME(sc));
1848 		error = ENOMEM;
1849 		goto fail;
1850 	}
1851 	txr->txtag = txr->txdma.dma_tag;
1852 
1853 	/* Create the descriptor buffer dma maps. */
1854 	for (i = 0; i < sc->num_tx_desc; i++) {
1855 		txbuf = &txr->tx_buffers[i];
1856 		error = bus_dmamap_create(txr->txdma.dma_tag, IGC_TSO_SIZE,
1857 		    IGC_MAX_SCATTER, PAGE_SIZE, 0, BUS_DMA_NOWAIT, &txbuf->map);
1858 		if (error != 0) {
1859 			printf("%s: Unable to create TX DMA map\n",
1860 			    DEVNAME(sc));
1861 			goto fail;
1862 		}
1863 	}
1864 
1865 	return 0;
1866 fail:
1867 	return error;
1868 }
1869 
1870 
1871 /*********************************************************************
1872  *
1873  *  Allocate and initialize transmit structures.
1874  *
1875  **********************************************************************/
1876 int
igc_setup_transmit_structures(struct igc_softc * sc)1877 igc_setup_transmit_structures(struct igc_softc *sc)
1878 {
1879 	struct igc_txring *txr = sc->tx_rings;
1880 	int i;
1881 
1882 	for (i = 0; i < sc->sc_nqueues; i++, txr++) {
1883 		if (igc_setup_transmit_ring(txr))
1884 			goto fail;
1885 	}
1886 
1887 	return 0;
1888 fail:
1889 	igc_free_transmit_structures(sc);
1890 	return ENOBUFS;
1891 }
1892 
1893 /*********************************************************************
1894  *
1895  *  Initialize a transmit ring.
1896  *
1897  **********************************************************************/
1898 int
igc_setup_transmit_ring(struct igc_txring * txr)1899 igc_setup_transmit_ring(struct igc_txring *txr)
1900 {
1901 	struct igc_softc *sc = txr->sc;
1902 
1903 	/* Now allocate transmit buffers for the ring. */
1904 	if (igc_allocate_transmit_buffers(txr))
1905 		return ENOMEM;
1906 
1907 	/* Clear the old ring contents */
1908 	bzero((void *)txr->tx_base,
1909 	    (sizeof(union igc_adv_tx_desc)) * sc->num_tx_desc);
1910 
1911 	/* Reset indices. */
1912 	txr->next_avail_desc = 0;
1913 	txr->next_to_clean = 0;
1914 
1915 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,
1916 	    txr->txdma.dma_map->dm_mapsize,
1917 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1918 
1919 	return 0;
1920 }
1921 
1922 /*********************************************************************
1923  *
1924  *  Enable transmit unit.
1925  *
1926  **********************************************************************/
1927 void
igc_initialize_transmit_unit(struct igc_softc * sc)1928 igc_initialize_transmit_unit(struct igc_softc *sc)
1929 {
1930 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1931 	struct igc_txring *txr;
1932 	struct igc_hw *hw = &sc->hw;
1933 	uint64_t bus_addr;
1934 	uint32_t tctl, txdctl = 0;
1935         int i;
1936 
1937 	/* Setup the Base and Length of the TX descriptor ring. */
1938 	for (i = 0; i < sc->sc_nqueues; i++) {
1939 		txr = &sc->tx_rings[i];
1940 
1941 		bus_addr = txr->txdma.dma_map->dm_segs[0].ds_addr;
1942 
1943 		/* Base and len of TX ring */
1944 		IGC_WRITE_REG(hw, IGC_TDLEN(i),
1945 		    sc->num_tx_desc * sizeof(union igc_adv_tx_desc));
1946 		IGC_WRITE_REG(hw, IGC_TDBAH(i), (uint32_t)(bus_addr >> 32));
1947 		IGC_WRITE_REG(hw, IGC_TDBAL(i), (uint32_t)bus_addr);
1948 
1949 		/* Init the HEAD/TAIL indices */
1950 		IGC_WRITE_REG(hw, IGC_TDT(i), 0);
1951 		IGC_WRITE_REG(hw, IGC_TDH(i), 0);
1952 
1953 		txr->watchdog_timer = 0;
1954 
1955 		txdctl = 0;		/* Clear txdctl */
1956 		txdctl |= 0x1f;		/* PTHRESH */
1957 		txdctl |= 1 << 8;	/* HTHRESH */
1958 		txdctl |= 1 << 16;	/* WTHRESH */
1959 		txdctl |= 1 << 22;	/* Reserved bit 22 must always be 1 */
1960 		txdctl |= IGC_TXDCTL_GRAN;
1961 		txdctl |= 1 << 25;	/* LWTHRESH */
1962 
1963 		IGC_WRITE_REG(hw, IGC_TXDCTL(i), txdctl);
1964 	}
1965 	ifp->if_timer = 0;
1966 
1967 	/* Program the Transmit Control Register */
1968 	tctl = IGC_READ_REG(&sc->hw, IGC_TCTL);
1969 	tctl &= ~IGC_TCTL_CT;
1970 	tctl |= (IGC_TCTL_PSP | IGC_TCTL_RTLC | IGC_TCTL_EN |
1971 	    (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT));
1972 
1973 	/* This write will effectively turn on the transmit unit. */
1974 	IGC_WRITE_REG(&sc->hw, IGC_TCTL, tctl);
1975 }
1976 
1977 /*********************************************************************
1978  *
1979  *  Free all transmit rings.
1980  *
1981  **********************************************************************/
1982 void
igc_free_transmit_structures(struct igc_softc * sc)1983 igc_free_transmit_structures(struct igc_softc *sc)
1984 {
1985 	struct igc_txring *txr = sc->tx_rings;
1986 	int i;
1987 
1988 	for (i = 0; i < sc->sc_nqueues; i++, txr++)
1989 		igc_free_transmit_buffers(txr);
1990 }
1991 
1992 /*********************************************************************
1993  *
1994  *  Free transmit ring related data structures.
1995  *
1996  **********************************************************************/
1997 void
igc_free_transmit_buffers(struct igc_txring * txr)1998 igc_free_transmit_buffers(struct igc_txring *txr)
1999 {
2000 	struct igc_softc *sc = txr->sc;
2001 	struct igc_tx_buf *txbuf;
2002 	int i;
2003 
2004 	if (txr->tx_buffers == NULL)
2005 		return;
2006 
2007 	txbuf = txr->tx_buffers;
2008 	for (i = 0; i < sc->num_tx_desc; i++, txbuf++) {
2009 		if (txbuf->map != NULL && txbuf->map->dm_nsegs > 0) {
2010 			bus_dmamap_sync(txr->txdma.dma_tag, txbuf->map,
2011 			    0, txbuf->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2012 			bus_dmamap_unload(txr->txdma.dma_tag, txbuf->map);
2013 		}
2014 		if (txbuf->m_head != NULL) {
2015 			m_freem(txbuf->m_head);
2016 			txbuf->m_head = NULL;
2017 		}
2018 		if (txbuf->map != NULL) {
2019 			bus_dmamap_destroy(txr->txdma.dma_tag, txbuf->map);
2020 			txbuf->map = NULL;
2021 		}
2022 	}
2023 
2024 	if (txr->tx_buffers != NULL)
2025 		free(txr->tx_buffers, M_DEVBUF,
2026 		    sc->num_tx_desc * sizeof(struct igc_tx_buf));
2027 	txr->tx_buffers = NULL;
2028 	txr->txtag = NULL;
2029 }
2030 
2031 
2032 /*********************************************************************
2033  *
2034  *  Advanced Context Descriptor setup for VLAN, CSUM or TSO
2035  *
2036  **********************************************************************/
2037 
2038 int
igc_tx_ctx_setup(struct igc_txring * txr,struct mbuf * mp,int prod,uint32_t * cmd_type_len,uint32_t * olinfo_status)2039 igc_tx_ctx_setup(struct igc_txring *txr, struct mbuf *mp, int prod,
2040     uint32_t *cmd_type_len, uint32_t *olinfo_status)
2041 {
2042 	struct ether_extracted ext;
2043 	struct igc_adv_tx_context_desc *txdesc;
2044 	uint32_t mss_l4len_idx = 0;
2045 	uint32_t type_tucmd_mlhl = 0;
2046 	uint32_t vlan_macip_lens = 0;
2047 	int off = 0;
2048 
2049 	/*
2050 	 * In advanced descriptors the vlan tag must
2051 	 * be placed into the context descriptor. Hence
2052 	 * we need to make one even if not doing offloads.
2053 	 */
2054 #if NVLAN > 0
2055 	if (ISSET(mp->m_flags, M_VLANTAG)) {
2056 		uint32_t vtag = mp->m_pkthdr.ether_vtag;
2057 		vlan_macip_lens |= (vtag << IGC_ADVTXD_VLAN_SHIFT);
2058 		*cmd_type_len |= IGC_ADVTXD_DCMD_VLE;
2059 		off = 1;
2060 	}
2061 #endif
2062 
2063 	ether_extract_headers(mp, &ext);
2064 
2065 	vlan_macip_lens |= (sizeof(*ext.eh) << IGC_ADVTXD_MACLEN_SHIFT);
2066 
2067 	if (ext.ip4) {
2068 		type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_IPV4;
2069 		if (ISSET(mp->m_pkthdr.csum_flags, M_IPV4_CSUM_OUT)) {
2070 			*olinfo_status |= IGC_TXD_POPTS_IXSM << 8;
2071 			off = 1;
2072 		}
2073 #ifdef INET6
2074 	} else if (ext.ip6) {
2075 		type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_IPV6;
2076 #endif
2077 	}
2078 
2079 	vlan_macip_lens |= ext.iphlen;
2080 	type_tucmd_mlhl |= IGC_ADVTXD_DCMD_DEXT | IGC_ADVTXD_DTYP_CTXT;
2081 
2082 	if (ext.tcp) {
2083 		type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_TCP;
2084 		if (ISSET(mp->m_pkthdr.csum_flags, M_TCP_CSUM_OUT)) {
2085 			*olinfo_status |= IGC_TXD_POPTS_TXSM << 8;
2086 			off = 1;
2087 		}
2088 	} else if (ext.udp) {
2089 		type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_UDP;
2090 		if (ISSET(mp->m_pkthdr.csum_flags, M_UDP_CSUM_OUT)) {
2091 			*olinfo_status |= IGC_TXD_POPTS_TXSM << 8;
2092 			off = 1;
2093 		}
2094 	}
2095 
2096 	if (ISSET(mp->m_pkthdr.csum_flags, M_TCP_TSO)) {
2097 		if (ext.tcp && mp->m_pkthdr.ph_mss > 0) {
2098 			uint32_t hdrlen, thlen, paylen, outlen;
2099 
2100 			thlen = ext.tcphlen;
2101 
2102 			outlen = mp->m_pkthdr.ph_mss;
2103 			mss_l4len_idx |= outlen << IGC_ADVTXD_MSS_SHIFT;
2104 			mss_l4len_idx |= thlen << IGC_ADVTXD_L4LEN_SHIFT;
2105 
2106 			hdrlen = sizeof(*ext.eh) + ext.iphlen + thlen;
2107 			paylen = mp->m_pkthdr.len - hdrlen;
2108 			CLR(*olinfo_status, IGC_ADVTXD_PAYLEN_MASK);
2109 			*olinfo_status |= paylen << IGC_ADVTXD_PAYLEN_SHIFT;
2110 
2111 			*cmd_type_len |= IGC_ADVTXD_DCMD_TSE;
2112 			off = 1;
2113 
2114 			tcpstat_add(tcps_outpkttso,
2115 			    (paylen + outlen - 1) / outlen);
2116 		} else
2117 			tcpstat_inc(tcps_outbadtso);
2118 	}
2119 
2120 	if (off == 0)
2121 		return 0;
2122 
2123 	/* Now ready a context descriptor */
2124 	txdesc = (struct igc_adv_tx_context_desc *)&txr->tx_base[prod];
2125 
2126 	/* Now copy bits into descriptor */
2127 	htolem32(&txdesc->vlan_macip_lens, vlan_macip_lens);
2128 	htolem32(&txdesc->type_tucmd_mlhl, type_tucmd_mlhl);
2129 	htolem32(&txdesc->seqnum_seed, 0);
2130 	htolem32(&txdesc->mss_l4len_idx, mss_l4len_idx);
2131 
2132 	return 1;
2133 }
2134 
2135 /*********************************************************************
2136  *
2137  *  Allocate memory for rx_buffer structures. Since we use one
2138  *  rx_buffer per received packet, the maximum number of rx_buffer's
2139  *  that we'll need is equal to the number of receive descriptors
2140  *  that we've allocated.
2141  *
2142  **********************************************************************/
2143 int
igc_allocate_receive_buffers(struct igc_rxring * rxr)2144 igc_allocate_receive_buffers(struct igc_rxring *rxr)
2145 {
2146 	struct igc_softc *sc = rxr->sc;
2147 	struct igc_rx_buf *rxbuf;
2148 	int i, error;
2149 
2150 	rxr->rx_buffers = mallocarray(sc->num_rx_desc,
2151 	    sizeof(struct igc_rx_buf), M_DEVBUF, M_NOWAIT | M_ZERO);
2152 	if (rxr->rx_buffers == NULL) {
2153 		printf("%s: Unable to allocate rx_buffer memory\n",
2154 		    DEVNAME(sc));
2155 		error = ENOMEM;
2156 		goto fail;
2157 	}
2158 
2159 	rxbuf = rxr->rx_buffers;
2160 	for (i = 0; i < sc->num_rx_desc; i++, rxbuf++) {
2161 		error = bus_dmamap_create(rxr->rxdma.dma_tag,
2162 		    MAX_JUMBO_FRAME_SIZE, 1, MAX_JUMBO_FRAME_SIZE, 0,
2163 		    BUS_DMA_NOWAIT, &rxbuf->map);
2164 		if (error) {
2165 			printf("%s: Unable to create RX DMA map\n",
2166 			    DEVNAME(sc));
2167 			goto fail;
2168 		}
2169 	}
2170 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,
2171 	    rxr->rxdma.dma_map->dm_mapsize,
2172 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2173 
2174 	return 0;
2175 fail:
2176 	return error;
2177 }
2178 
2179 /*********************************************************************
2180  *
2181  *  Allocate and initialize receive structures.
2182  *
2183  **********************************************************************/
2184 int
igc_setup_receive_structures(struct igc_softc * sc)2185 igc_setup_receive_structures(struct igc_softc *sc)
2186 {
2187 	struct igc_rxring *rxr = sc->rx_rings;
2188 	int i;
2189 
2190 	for (i = 0; i < sc->sc_nqueues; i++, rxr++) {
2191 		if (igc_setup_receive_ring(rxr))
2192 			goto fail;
2193 	}
2194 
2195 	return 0;
2196 fail:
2197 	igc_free_receive_structures(sc);
2198 	return ENOBUFS;
2199 }
2200 
2201 /*********************************************************************
2202  *
2203  *  Initialize a receive ring and its buffers.
2204  *
2205  **********************************************************************/
2206 int
igc_setup_receive_ring(struct igc_rxring * rxr)2207 igc_setup_receive_ring(struct igc_rxring *rxr)
2208 {
2209 	struct igc_softc *sc = rxr->sc;
2210 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2211 	int rsize;
2212 
2213 	rsize = roundup2(sc->num_rx_desc * sizeof(union igc_adv_rx_desc),
2214 	    IGC_DBA_ALIGN);
2215 
2216 	/* Clear the ring contents. */
2217 	bzero((void *)rxr->rx_base, rsize);
2218 
2219 	if (igc_allocate_receive_buffers(rxr))
2220 		return ENOMEM;
2221 
2222 	/* Setup our descriptor indices. */
2223 	rxr->next_to_check = 0;
2224 	rxr->last_desc_filled = sc->num_rx_desc - 1;
2225 
2226 	if_rxr_init(&rxr->rx_ring, 2 * ((ifp->if_hardmtu / MCLBYTES) + 1),
2227 	    sc->num_rx_desc - 1);
2228 
2229 	return 0;
2230 }
2231 
2232 /*********************************************************************
2233  *
2234  *  Enable receive unit.
2235  *
2236  **********************************************************************/
2237 #define BSIZEPKT_ROUNDUP	((1 << IGC_SRRCTL_BSIZEPKT_SHIFT) - 1)
2238 
2239 void
igc_initialize_receive_unit(struct igc_softc * sc)2240 igc_initialize_receive_unit(struct igc_softc *sc)
2241 {
2242         struct igc_rxring *rxr = sc->rx_rings;
2243         struct igc_hw *hw = &sc->hw;
2244 	uint32_t rctl, rxcsum, srrctl = 0;
2245 	int i;
2246 
2247 	/*
2248 	 * Make sure receives are disabled while setting
2249 	 * up the descriptor ring.
2250 	 */
2251 	rctl = IGC_READ_REG(hw, IGC_RCTL);
2252 	IGC_WRITE_REG(hw, IGC_RCTL, rctl & ~IGC_RCTL_EN);
2253 
2254 	/* Setup the Receive Control Register */
2255 	rctl &= ~(3 << IGC_RCTL_MO_SHIFT);
2256 	rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_LBM_NO |
2257 	    IGC_RCTL_RDMTS_HALF | (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT);
2258 
2259 	/* Do not store bad packets */
2260 	rctl &= ~IGC_RCTL_SBP;
2261 
2262 	/* Enable Long Packet receive */
2263 	if (sc->hw.mac.max_frame_size != ETHER_MAX_LEN)
2264 		rctl |= IGC_RCTL_LPE;
2265 
2266 	/* Strip the CRC */
2267 	rctl |= IGC_RCTL_SECRC;
2268 
2269 	/*
2270 	 * Set the interrupt throttling rate. Value is calculated
2271 	 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
2272 	 */
2273 	IGC_WRITE_REG(hw, IGC_ITR, DEFAULT_ITR);
2274 
2275 	rxcsum = IGC_READ_REG(hw, IGC_RXCSUM);
2276 	rxcsum &= ~IGC_RXCSUM_PCSD;
2277 
2278 	if (sc->sc_nqueues > 1)
2279 		rxcsum |= IGC_RXCSUM_PCSD;
2280 
2281 	IGC_WRITE_REG(hw, IGC_RXCSUM, rxcsum);
2282 
2283 	if (sc->sc_nqueues > 1)
2284 		igc_initialize_rss_mapping(sc);
2285 
2286 	/* Set maximum packet buffer len */
2287 	srrctl |= (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
2288 	    IGC_SRRCTL_BSIZEPKT_SHIFT;
2289 	/* srrctl above overrides this but set the register to a sane value */
2290 	rctl |= IGC_RCTL_SZ_2048;
2291 
2292 	/*
2293 	 * If TX flow control is disabled and there's > 1 queue defined,
2294 	 * enable DROP.
2295 	 *
2296 	 * This drops frames rather than hanging the RX MAC for all queues.
2297 	 */
2298 	if ((sc->sc_nqueues > 1) && (sc->fc == igc_fc_none ||
2299 	    sc->fc == igc_fc_rx_pause)) {
2300 		srrctl |= IGC_SRRCTL_DROP_EN;
2301 	}
2302 
2303 	/* Setup the Base and Length of the RX descriptor rings. */
2304 	for (i = 0; i < sc->sc_nqueues; i++, rxr++) {
2305 		IGC_WRITE_REG(hw, IGC_RXDCTL(i), 0);
2306 		uint64_t bus_addr = rxr->rxdma.dma_map->dm_segs[0].ds_addr;
2307 		uint32_t rxdctl;
2308 
2309 		srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;
2310 
2311 		IGC_WRITE_REG(hw, IGC_RDLEN(i),
2312 		    sc->num_rx_desc * sizeof(union igc_adv_rx_desc));
2313 		IGC_WRITE_REG(hw, IGC_RDBAH(i), (uint32_t)(bus_addr >> 32));
2314 		IGC_WRITE_REG(hw, IGC_RDBAL(i), (uint32_t)bus_addr);
2315 		IGC_WRITE_REG(hw, IGC_SRRCTL(i), srrctl);
2316 
2317 		/* Setup the Head and Tail Descriptor Pointers */
2318 		IGC_WRITE_REG(hw, IGC_RDH(i), 0);
2319 		IGC_WRITE_REG(hw, IGC_RDT(i), 0);
2320 
2321 		/* Enable this Queue */
2322 		rxdctl = IGC_READ_REG(hw, IGC_RXDCTL(i));
2323 		rxdctl |= IGC_RXDCTL_QUEUE_ENABLE;
2324 		rxdctl &= 0xFFF00000;
2325 		rxdctl |= IGC_RX_PTHRESH;
2326 		rxdctl |= IGC_RX_HTHRESH << 8;
2327 		rxdctl |= IGC_RX_WTHRESH << 16;
2328 		IGC_WRITE_REG(hw, IGC_RXDCTL(i), rxdctl);
2329 	}
2330 
2331 	/* Make sure VLAN Filters are off */
2332 	rctl &= ~IGC_RCTL_VFE;
2333 
2334 	/* Write out the settings */
2335 	IGC_WRITE_REG(hw, IGC_RCTL, rctl);
2336 }
2337 
2338 /*********************************************************************
2339  *
2340  *  Free all receive rings.
2341  *
2342  **********************************************************************/
2343 void
igc_free_receive_structures(struct igc_softc * sc)2344 igc_free_receive_structures(struct igc_softc *sc)
2345 {
2346 	struct igc_rxring *rxr;
2347 	int i;
2348 
2349 	for (i = 0, rxr = sc->rx_rings; i < sc->sc_nqueues; i++, rxr++)
2350 		if_rxr_init(&rxr->rx_ring, 0, 0);
2351 
2352 	for (i = 0, rxr = sc->rx_rings; i < sc->sc_nqueues; i++, rxr++)
2353 		igc_free_receive_buffers(rxr);
2354 }
2355 
2356 /*********************************************************************
2357  *
2358  *  Free receive ring data structures
2359  *
2360  **********************************************************************/
2361 void
igc_free_receive_buffers(struct igc_rxring * rxr)2362 igc_free_receive_buffers(struct igc_rxring *rxr)
2363 {
2364 	struct igc_softc *sc = rxr->sc;
2365 	struct igc_rx_buf *rxbuf;
2366 	int i;
2367 
2368 	if (rxr->rx_buffers != NULL) {
2369 		for (i = 0; i < sc->num_rx_desc; i++) {
2370 			rxbuf = &rxr->rx_buffers[i];
2371 			if (rxbuf->buf != NULL) {
2372 				bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map,
2373 				    0, rxbuf->map->dm_mapsize,
2374 				    BUS_DMASYNC_POSTREAD);
2375 				bus_dmamap_unload(rxr->rxdma.dma_tag,
2376 				    rxbuf->map);
2377 				m_freem(rxbuf->buf);
2378 				rxbuf->buf = NULL;
2379 			}
2380 			bus_dmamap_destroy(rxr->rxdma.dma_tag, rxbuf->map);
2381 			rxbuf->map = NULL;
2382 		}
2383 		free(rxr->rx_buffers, M_DEVBUF,
2384 		    sc->num_rx_desc * sizeof(struct igc_rx_buf));
2385 		rxr->rx_buffers = NULL;
2386 	}
2387 }
2388 
2389 /*
2390  * Initialise the RSS mapping for NICs that support multiple transmit/
2391  * receive rings.
2392  */
2393 void
igc_initialize_rss_mapping(struct igc_softc * sc)2394 igc_initialize_rss_mapping(struct igc_softc *sc)
2395 {
2396 	struct igc_hw *hw = &sc->hw;
2397 	uint32_t rss_key[10], mrqc, reta, shift = 0;
2398 	int i, queue_id;
2399 
2400 	/*
2401 	 * The redirection table controls which destination
2402 	 * queue each bucket redirects traffic to.
2403 	 * Each DWORD represents four queues, with the LSB
2404 	 * being the first queue in the DWORD.
2405 	 *
2406 	 * This just allocates buckets to queues using round-robin
2407 	 * allocation.
2408 	 *
2409 	 * NOTE: It Just Happens to line up with the default
2410 	 * RSS allocation method.
2411 	 */
2412 
2413 	/* Warning FM follows */
2414 	reta = 0;
2415 	for (i = 0; i < 128; i++) {
2416 		queue_id = (i % sc->sc_nqueues);
2417 		/* Adjust if required */
2418 		queue_id = queue_id << shift;
2419 
2420 		/*
2421 		 * The low 8 bits are for hash value (n+0);
2422 		 * The next 8 bits are for hash value (n+1), etc.
2423 		 */
2424 		reta = reta >> 8;
2425 		reta = reta | ( ((uint32_t) queue_id) << 24);
2426 		if ((i & 3) == 3) {
2427 			IGC_WRITE_REG(hw, IGC_RETA(i >> 2), reta);
2428 			reta = 0;
2429 		}
2430 	}
2431 
2432 	/*
2433 	 * MRQC: Multiple Receive Queues Command
2434 	 * Set queuing to RSS control, number depends on the device.
2435 	 */
2436 	mrqc = IGC_MRQC_ENABLE_RSS_4Q;
2437 
2438 	/* Set up random bits */
2439         stoeplitz_to_key(&rss_key, sizeof(rss_key));
2440 
2441 	/* Now fill our hash function seeds */
2442 	for (i = 0; i < 10; i++)
2443 		IGC_WRITE_REG_ARRAY(hw, IGC_RSSRK(0), i, rss_key[i]);
2444 
2445 	/*
2446 	 * Configure the RSS fields to hash upon.
2447 	 */
2448 	mrqc |= (IGC_MRQC_RSS_FIELD_IPV4 | IGC_MRQC_RSS_FIELD_IPV4_TCP);
2449 	mrqc |= (IGC_MRQC_RSS_FIELD_IPV6 | IGC_MRQC_RSS_FIELD_IPV6_TCP);
2450 	mrqc |= IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;
2451 
2452 	IGC_WRITE_REG(hw, IGC_MRQC, mrqc);
2453 }
2454 
2455 /*
2456  * igc_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
2457  * For ASF and Pass Through versions of f/w this means
2458  * that the driver is loaded. For AMT version type f/w
2459  * this means that the network i/f is open.
2460  */
2461 void
igc_get_hw_control(struct igc_softc * sc)2462 igc_get_hw_control(struct igc_softc *sc)
2463 {
2464 	uint32_t ctrl_ext;
2465 
2466 	ctrl_ext = IGC_READ_REG(&sc->hw, IGC_CTRL_EXT);
2467 	IGC_WRITE_REG(&sc->hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
2468 }
2469 
2470 /*
2471  * igc_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
2472  * For ASF and Pass Through versions of f/w this means that
2473  * the driver is no longer loaded. For AMT versions of the
2474  * f/w this means that the network i/f is closed.
2475  */
2476 void
igc_release_hw_control(struct igc_softc * sc)2477 igc_release_hw_control(struct igc_softc *sc)
2478 {
2479 	uint32_t ctrl_ext;
2480 
2481 	ctrl_ext = IGC_READ_REG(&sc->hw, IGC_CTRL_EXT);
2482 	IGC_WRITE_REG(&sc->hw, IGC_CTRL_EXT, ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
2483 }
2484 
2485 int
igc_is_valid_ether_addr(uint8_t * addr)2486 igc_is_valid_ether_addr(uint8_t *addr)
2487 {
2488 	char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
2489 
2490 	if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
2491 		return 0;
2492 	}
2493 
2494 	return 1;
2495 }
2496 
2497 #if NKSTAT > 0
2498 
2499 /*
2500  * the below are read to clear, so they need to be accumulated for
2501  * userland to see counters. periodically fetch the counters from a
2502  * timeout to avoid a 32 roll-over between kstat reads.
2503  */
2504 
2505 enum igc_stat {
2506 	igc_stat_crcerrs,
2507 	igc_stat_algnerrc,
2508 	igc_stat_rxerrc,
2509 	igc_stat_mpc,
2510 	igc_stat_scc,
2511 	igc_stat_ecol,
2512 	igc_stat_mcc,
2513 	igc_stat_latecol,
2514 	igc_stat_colc,
2515 	igc_stat_rerc,
2516 	igc_stat_dc,
2517 	igc_stat_tncrs,
2518 	igc_stat_htdpmc,
2519 	igc_stat_rlec,
2520 	igc_stat_xonrxc,
2521 	igc_stat_xontxc,
2522 	igc_stat_xoffrxc,
2523 	igc_stat_xofftxc,
2524 	igc_stat_fcruc,
2525 	igc_stat_prc64,
2526 	igc_stat_prc127,
2527 	igc_stat_prc255,
2528 	igc_stat_prc511,
2529 	igc_stat_prc1023,
2530 	igc_stat_prc1522,
2531 	igc_stat_gprc,
2532 	igc_stat_bprc,
2533 	igc_stat_mprc,
2534 	igc_stat_gptc,
2535 	igc_stat_gorc,
2536 	igc_stat_gotc,
2537 	igc_stat_rnbc,
2538 	igc_stat_ruc,
2539 	igc_stat_rfc,
2540 	igc_stat_roc,
2541 	igc_stat_rjc,
2542 	igc_stat_mgtprc,
2543 	igc_stat_mgtpdc,
2544 	igc_stat_mgtptc,
2545 	igc_stat_tor,
2546 	igc_stat_tot,
2547 	igc_stat_tpr,
2548 	igc_stat_tpt,
2549 	igc_stat_ptc64,
2550 	igc_stat_ptc127,
2551 	igc_stat_ptc255,
2552 	igc_stat_ptc511,
2553 	igc_stat_ptc1023,
2554 	igc_stat_ptc1522,
2555 	igc_stat_mptc,
2556 	igc_stat_bptc,
2557 	igc_stat_tsctc,
2558 
2559 	igc_stat_iac,
2560 	igc_stat_rpthc,
2561 	igc_stat_tlpic,
2562 	igc_stat_rlpic,
2563 	igc_stat_hgptc,
2564 	igc_stat_rxdmtc,
2565 	igc_stat_hgorc,
2566 	igc_stat_hgotc,
2567 	igc_stat_lenerrs,
2568 
2569 	igc_stat_count
2570 };
2571 
2572 struct igc_counter {
2573 	const char		*name;
2574 	enum kstat_kv_unit	 unit;
2575 	uint32_t		 reg;
2576 };
2577 
2578 static const struct igc_counter igc_counters[igc_stat_count] = {
2579 	[igc_stat_crcerrs] =
2580 	    { "crc errs",		KSTAT_KV_U_NONE,	IGC_CRCERRS },
2581 	[igc_stat_algnerrc] =
2582 	    { "alignment errs",		KSTAT_KV_U_NONE,	IGC_ALGNERRC },
2583 	[igc_stat_rxerrc] =
2584 	    { "rx errs",		KSTAT_KV_U_NONE,	IGC_RXERRC },
2585 	[igc_stat_mpc] =
2586 	    { "missed pkts",		KSTAT_KV_U_NONE,	IGC_MPC },
2587 	[igc_stat_scc] =
2588 	    { "single colls",		KSTAT_KV_U_NONE,	IGC_SCC },
2589 	[igc_stat_ecol] =
2590 	    { "excessive colls",	KSTAT_KV_U_NONE,	IGC_ECOL },
2591 	[igc_stat_mcc] =
2592 	    { "multiple colls",		KSTAT_KV_U_NONE,	IGC_MCC },
2593 	[igc_stat_latecol] =
2594 	    { "late colls",		KSTAT_KV_U_NONE,	IGC_LATECOL },
2595 	[igc_stat_colc] =
2596 	    { "collisions",		KSTAT_KV_U_NONE, 	IGC_COLC },
2597 	[igc_stat_rerc] =
2598 	    { "recv errs",		KSTAT_KV_U_NONE,	IGC_RERC },
2599 	[igc_stat_dc] =
2600 	    { "defers",			KSTAT_KV_U_NONE,	IGC_DC },
2601 	[igc_stat_tncrs] =
2602 	    { "tx no crs",		KSTAT_KV_U_NONE,	IGC_TNCRS},
2603 	[igc_stat_htdpmc] =
2604 	    { "host tx discards",	KSTAT_KV_U_NONE,	IGC_HTDPMC },
2605 	[igc_stat_rlec] =
2606 	    { "recv len errs",		KSTAT_KV_U_NONE,	IGC_RLEC },
2607 	[igc_stat_xonrxc] =
2608 	    { "xon rx",			KSTAT_KV_U_NONE,	IGC_XONRXC },
2609 	[igc_stat_xontxc] =
2610 	    { "xon tx",			KSTAT_KV_U_NONE,	IGC_XONTXC },
2611 	[igc_stat_xoffrxc] =
2612 	    { "xoff rx",		KSTAT_KV_U_NONE,	IGC_XOFFRXC },
2613 	[igc_stat_xofftxc] =
2614 	    { "xoff tx",		KSTAT_KV_U_NONE,	IGC_XOFFTXC },
2615 	[igc_stat_fcruc] =
2616 	    { "fc rx unsupp",		KSTAT_KV_U_NONE,	IGC_FCRUC },
2617 	[igc_stat_prc64] =
2618 	    { "rx 64B",			KSTAT_KV_U_PACKETS,	IGC_PRC64 },
2619 	[igc_stat_prc127] =
2620 	    { "rx 65-127B",		KSTAT_KV_U_PACKETS,	IGC_PRC127 },
2621 	[igc_stat_prc255] =
2622 	    { "rx 128-255B",		KSTAT_KV_U_PACKETS,	IGC_PRC255 },
2623 	[igc_stat_prc511] =
2624 	    { "rx 256-511B",		KSTAT_KV_U_PACKETS,	IGC_PRC511 },
2625 	[igc_stat_prc1023] =
2626 	    { "rx 512-1023B",		KSTAT_KV_U_PACKETS,	IGC_PRC1023 },
2627 	[igc_stat_prc1522] =
2628 	    { "rx 1024-maxB",		KSTAT_KV_U_PACKETS,	IGC_PRC1522 },
2629 	[igc_stat_gprc] =
2630 	    { "rx good",		KSTAT_KV_U_PACKETS,	IGC_GPRC },
2631 	[igc_stat_bprc] =
2632 	    { "rx bcast",		KSTAT_KV_U_PACKETS,	IGC_BPRC },
2633 	[igc_stat_mprc] =
2634 	    { "rx mcast",		KSTAT_KV_U_PACKETS,	IGC_MPRC },
2635 	[igc_stat_gptc] =
2636 	    { "tx good",		KSTAT_KV_U_PACKETS,	IGC_GPTC },
2637 	[igc_stat_gorc] =
2638 	    { "rx good bytes",		KSTAT_KV_U_BYTES,	0 },
2639 	[igc_stat_gotc] =
2640 	    { "tx good bytes",		KSTAT_KV_U_BYTES,	0 },
2641 	[igc_stat_rnbc] =
2642 	    { "rx no bufs",		KSTAT_KV_U_NONE,	IGC_RNBC },
2643 	[igc_stat_ruc] =
2644 	    { "rx undersize",		KSTAT_KV_U_NONE,	IGC_RUC },
2645 	[igc_stat_rfc] =
2646 	    { "rx frags",		KSTAT_KV_U_NONE,	IGC_RFC },
2647 	[igc_stat_roc] =
2648 	    { "rx oversize",		KSTAT_KV_U_NONE,	IGC_ROC },
2649 	[igc_stat_rjc] =
2650 	    { "rx jabbers",		KSTAT_KV_U_NONE,	IGC_RJC },
2651 	[igc_stat_mgtprc] =
2652 	    { "rx mgmt",		KSTAT_KV_U_PACKETS,	IGC_MGTPRC },
2653 	[igc_stat_mgtpdc] =
2654 	    { "rx mgmt drops",		KSTAT_KV_U_PACKETS,	IGC_MGTPDC },
2655 	[igc_stat_mgtptc] =
2656 	    { "tx mgmt",		KSTAT_KV_U_PACKETS,	IGC_MGTPTC },
2657 	[igc_stat_tor] =
2658 	    { "rx total bytes",		KSTAT_KV_U_BYTES,	0 },
2659 	[igc_stat_tot] =
2660 	    { "tx total bytes",		KSTAT_KV_U_BYTES,	0 },
2661 	[igc_stat_tpr] =
2662 	    { "rx total",		KSTAT_KV_U_PACKETS,	IGC_TPR },
2663 	[igc_stat_tpt] =
2664 	    { "tx total",		KSTAT_KV_U_PACKETS,	IGC_TPT },
2665 	[igc_stat_ptc64] =
2666 	    { "tx 64B",			KSTAT_KV_U_PACKETS,	IGC_PTC64 },
2667 	[igc_stat_ptc127] =
2668 	    { "tx 65-127B",		KSTAT_KV_U_PACKETS,	IGC_PTC127 },
2669 	[igc_stat_ptc255] =
2670 	    { "tx 128-255B",		KSTAT_KV_U_PACKETS,	IGC_PTC255 },
2671 	[igc_stat_ptc511] =
2672 	    { "tx 256-511B",		KSTAT_KV_U_PACKETS,	IGC_PTC511 },
2673 	[igc_stat_ptc1023] =
2674 	    { "tx 512-1023B",		KSTAT_KV_U_PACKETS,	IGC_PTC1023 },
2675 	[igc_stat_ptc1522] =
2676 	    { "tx 1024-maxB",		KSTAT_KV_U_PACKETS,	IGC_PTC1522 },
2677 	[igc_stat_mptc] =
2678 	    { "tx mcast",		KSTAT_KV_U_PACKETS,	IGC_MPTC },
2679 	[igc_stat_bptc] =
2680 	    { "tx bcast",		KSTAT_KV_U_PACKETS,	IGC_BPTC },
2681 	[igc_stat_tsctc] =
2682 	    { "tx tso ctx",		KSTAT_KV_U_NONE,	IGC_TSCTC },
2683 
2684 	[igc_stat_iac] =
2685 	    { "interrupts",		KSTAT_KV_U_NONE,	IGC_IAC },
2686 	[igc_stat_rpthc] =
2687 	    { "rx to host",		KSTAT_KV_U_PACKETS,	IGC_RPTHC },
2688 	[igc_stat_tlpic] =
2689 	    { "eee tx lpi",		KSTAT_KV_U_NONE,	IGC_TLPIC },
2690 	[igc_stat_rlpic] =
2691 	    { "eee rx lpi",		KSTAT_KV_U_NONE,	IGC_RLPIC },
2692 	[igc_stat_hgptc] =
2693 	    { "host rx",		KSTAT_KV_U_PACKETS,	IGC_HGPTC },
2694 	[igc_stat_rxdmtc] =
2695 	    { "rxd min thresh",		KSTAT_KV_U_NONE,	IGC_RXDMTC },
2696 	[igc_stat_hgorc] =
2697 	    { "host good rx",		KSTAT_KV_U_BYTES,	0 },
2698 	[igc_stat_hgotc] =
2699 	    { "host good tx",		KSTAT_KV_U_BYTES,	0 },
2700 	[igc_stat_lenerrs] =
2701 	    { "len errs",		KSTAT_KV_U_NONE,	IGC_LENERRS },
2702 };
2703 
2704 static void
igc_stat_read(struct igc_softc * sc)2705 igc_stat_read(struct igc_softc *sc)
2706 {
2707 	struct igc_hw *hw = &sc->hw;
2708 	struct kstat *ks = sc->ks;
2709 	struct kstat_kv *kvs = ks->ks_data;
2710 	uint32_t hi, lo;
2711 	unsigned int i;
2712 
2713 	for (i = 0; i < nitems(igc_counters); i++) {
2714 		const struct igc_counter *c = &igc_counters[i];
2715 		if (c->reg == 0)
2716 			continue;
2717 
2718 		kstat_kv_u64(&kvs[i]) += IGC_READ_REG(hw, c->reg);
2719 	}
2720 
2721 	lo = IGC_READ_REG(hw, IGC_GORCL);
2722 	hi = IGC_READ_REG(hw, IGC_GORCH);
2723 	kstat_kv_u64(&kvs[igc_stat_gorc]) +=
2724 	    ((uint64_t)hi << 32) | ((uint64_t)lo << 0);
2725 
2726 	lo = IGC_READ_REG(hw, IGC_GOTCL);
2727 	hi = IGC_READ_REG(hw, IGC_GOTCH);
2728 	kstat_kv_u64(&kvs[igc_stat_gotc]) +=
2729 	    ((uint64_t)hi << 32) | ((uint64_t)lo << 0);
2730 
2731 	lo = IGC_READ_REG(hw, IGC_TORL);
2732 	hi = IGC_READ_REG(hw, IGC_TORH);
2733 	kstat_kv_u64(&kvs[igc_stat_tor]) +=
2734 	    ((uint64_t)hi << 32) | ((uint64_t)lo << 0);
2735 
2736 	lo = IGC_READ_REG(hw, IGC_TOTL);
2737 	hi = IGC_READ_REG(hw, IGC_TOTH);
2738 	kstat_kv_u64(&kvs[igc_stat_tot]) +=
2739 	    ((uint64_t)hi << 32) | ((uint64_t)lo << 0);
2740 
2741 	lo = IGC_READ_REG(hw, IGC_HGORCL);
2742 	hi = IGC_READ_REG(hw, IGC_HGORCH);
2743 	kstat_kv_u64(&kvs[igc_stat_hgorc]) +=
2744 	    ((uint64_t)hi << 32) | ((uint64_t)lo << 0);
2745 
2746 	lo = IGC_READ_REG(hw, IGC_HGOTCL);
2747 	hi = IGC_READ_REG(hw, IGC_HGOTCH);
2748 	kstat_kv_u64(&kvs[igc_stat_hgotc]) +=
2749 	    ((uint64_t)hi << 32) | ((uint64_t)lo << 0);
2750 }
2751 
2752 static void
igc_kstat_tick(void * arg)2753 igc_kstat_tick(void *arg)
2754 {
2755 	struct igc_softc *sc = arg;
2756 
2757 	if (mtx_enter_try(&sc->ks_mtx)) {
2758 		igc_stat_read(sc);
2759 		mtx_leave(&sc->ks_mtx);
2760 	}
2761 
2762 	timeout_add_sec(&sc->ks_tmo, 4);
2763 }
2764 
2765 static int
igc_kstat_read(struct kstat * ks)2766 igc_kstat_read(struct kstat *ks)
2767 {
2768 	struct igc_softc *sc = ks->ks_softc;
2769 
2770 	igc_stat_read(sc);
2771 	nanouptime(&ks->ks_updated);
2772 
2773 	return (0);
2774 }
2775 
2776 void
igc_kstat_attach(struct igc_softc * sc)2777 igc_kstat_attach(struct igc_softc *sc)
2778 {
2779 	struct kstat *ks;
2780 	struct kstat_kv *kvs;
2781 	size_t len;
2782 	unsigned int i;
2783 
2784 	mtx_init(&sc->ks_mtx, IPL_SOFTCLOCK);
2785 	timeout_set(&sc->ks_tmo, igc_kstat_tick, sc);
2786 
2787 	kvs = mallocarray(sizeof(*kvs), nitems(igc_counters), M_DEVBUF,
2788 	    M_WAITOK|M_ZERO|M_CANFAIL);
2789 	if (kvs == NULL) {
2790 		printf("%s: unable to allocate igc kstats\n", DEVNAME(sc));
2791 		return;
2792 	}
2793 	len = sizeof(*kvs) * nitems(igc_counters);
2794 
2795 	ks = kstat_create(DEVNAME(sc), 0, "igc-stats", 0, KSTAT_T_KV, 0);
2796 	if (ks == NULL) {
2797 		printf("%s: unable to create igc kstats\n", DEVNAME(sc));
2798 		free(kvs, M_DEVBUF, len);
2799 		return;
2800 	}
2801 
2802 	for (i = 0; i < nitems(igc_counters); i++) {
2803 		const struct igc_counter *c = &igc_counters[i];
2804 		kstat_kv_unit_init(&kvs[i], c->name,
2805 		    KSTAT_KV_T_COUNTER64, c->unit);
2806 	}
2807 
2808 	ks->ks_softc = sc;
2809 	ks->ks_data = kvs;
2810 	ks->ks_datalen = len;
2811 	ks->ks_read = igc_kstat_read;
2812 	kstat_set_mutex(ks, &sc->ks_mtx);
2813 
2814 	kstat_install(ks);
2815 
2816 	sc->ks = ks;
2817 
2818 	igc_kstat_tick(sc); /* let's gooo */
2819 }
2820 #endif /* NKSTAT > 0 */
2821