xref: /openbsd/sys/dev/pci/if_ixgb.c (revision 8529ddd3)
1 /**************************************************************************
2 
3 Copyright (c) 2001-2005, Intel Corporation
4 All rights reserved.
5 
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8 
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11 
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15 
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19 
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31 
32 ***************************************************************************/
33 
34 /* $OpenBSD: if_ixgb.c,v 1.64 2015/04/30 07:51:07 mpi Exp $ */
35 
36 #include <dev/pci/if_ixgb.h>
37 
38 #ifdef IXGB_DEBUG
39 /*********************************************************************
40  *  Set this to one to display debug statistics
41  *********************************************************************/
42 int             ixgb_display_debug_stats = 0;
43 #endif
44 
45 /*********************************************************************
46  *  Driver version
47  *********************************************************************/
48 
49 #define IXGB_DRIVER_VERSION	"6.1.0"
50 
51 /*********************************************************************
52  *  PCI Device ID Table
53  *********************************************************************/
54 
55 const struct pci_matchid ixgb_devices[] = {
56 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82597EX },
57 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82597EX_SR },
58 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82597EX_LR },
59 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82597EX_CX4 },
60 };
61 
62 /*********************************************************************
63  *  Function prototypes
64  *********************************************************************/
65 int  ixgb_probe(struct device *, void *, void *);
66 void ixgb_attach(struct device *, struct device *, void *);
67 int  ixgb_intr(void *);
68 void ixgb_start(struct ifnet *);
69 int  ixgb_ioctl(struct ifnet *, u_long, caddr_t);
70 void ixgb_watchdog(struct ifnet *);
71 void ixgb_init(void *);
72 void ixgb_stop(void *);
73 void ixgb_media_status(struct ifnet *, struct ifmediareq *);
74 int  ixgb_media_change(struct ifnet *);
75 void ixgb_identify_hardware(struct ixgb_softc *);
76 int  ixgb_allocate_pci_resources(struct ixgb_softc *);
77 void ixgb_free_pci_resources(struct ixgb_softc *);
78 void ixgb_local_timer(void *);
79 int  ixgb_hardware_init(struct ixgb_softc *);
80 void ixgb_setup_interface(struct ixgb_softc *);
81 int  ixgb_setup_transmit_structures(struct ixgb_softc *);
82 void ixgb_initialize_transmit_unit(struct ixgb_softc *);
83 int  ixgb_setup_receive_structures(struct ixgb_softc *);
84 void ixgb_initialize_receive_unit(struct ixgb_softc *);
85 void ixgb_enable_intr(struct ixgb_softc *);
86 void ixgb_disable_intr(struct ixgb_softc *);
87 void ixgb_free_transmit_structures(struct ixgb_softc *);
88 void ixgb_free_receive_structures(struct ixgb_softc *);
89 void ixgb_update_stats_counters(struct ixgb_softc *);
90 void ixgb_txeof(struct ixgb_softc *);
91 int  ixgb_allocate_receive_structures(struct ixgb_softc *);
92 int  ixgb_allocate_transmit_structures(struct ixgb_softc *);
93 void ixgb_rxeof(struct ixgb_softc *, int);
94 void
95 ixgb_receive_checksum(struct ixgb_softc *,
96 		      struct ixgb_rx_desc * rx_desc,
97 		      struct mbuf *);
98 void
99 ixgb_transmit_checksum_setup(struct ixgb_softc *,
100 			     struct mbuf *,
101 			     u_int8_t *);
102 void ixgb_set_promisc(struct ixgb_softc *);
103 void ixgb_set_multi(struct ixgb_softc *);
104 #ifdef IXGB_DEBUG
105 void ixgb_print_hw_stats(struct ixgb_softc *);
106 #endif
107 void ixgb_update_link_status(struct ixgb_softc *);
108 int
109 ixgb_get_buf(struct ixgb_softc *, int i,
110 	     struct mbuf *);
111 void ixgb_enable_hw_vlans(struct ixgb_softc *);
112 int  ixgb_encap(struct ixgb_softc *, struct mbuf *);
113 int
114 ixgb_dma_malloc(struct ixgb_softc *, bus_size_t,
115 		struct ixgb_dma_alloc *, int);
116 void ixgb_dma_free(struct ixgb_softc *, struct ixgb_dma_alloc *);
117 
118 /*********************************************************************
119  *  OpenBSD Device Interface Entry Points
120  *********************************************************************/
121 
122 struct cfattach ixgb_ca = {
123 	sizeof(struct ixgb_softc), ixgb_probe, ixgb_attach
124 };
125 
126 struct cfdriver ixgb_cd = {
127 	NULL, "ixgb", DV_IFNET
128 };
129 
130 /* some defines for controlling descriptor fetches in h/w */
131 #define RXDCTL_PTHRESH_DEFAULT 0	/* chip considers prefech below this */
132 #define RXDCTL_HTHRESH_DEFAULT 0	/* chip will only prefetch if tail is
133 					 * pushed this many descriptors from
134 					 * head */
135 #define RXDCTL_WTHRESH_DEFAULT 0	/* chip writes back at this many or RXT0 */
136 
137 
138 /*********************************************************************
139  *  Device identification routine
140  *
141  *  ixgb_probe determines if the driver should be loaded on
142  *  adapter based on PCI vendor/device id of the adapter.
143  *
144  *  return 0 on no match, positive on match
145  *********************************************************************/
146 
147 int
148 ixgb_probe(struct device *parent, void *match, void *aux)
149 {
150 	INIT_DEBUGOUT("ixgb_probe: begin");
151 
152 	return (pci_matchbyid((struct pci_attach_args *)aux, ixgb_devices,
153 	    nitems(ixgb_devices)));
154 }
155 
156 /*********************************************************************
157  *  Device initialization routine
158  *
159  *  The attach entry point is called when the driver is being loaded.
160  *  This routine identifies the type of hardware, allocates all resources
161  *  and initializes the hardware.
162  *
163  *********************************************************************/
164 
165 void
166 ixgb_attach(struct device *parent, struct device *self, void *aux)
167 {
168 	struct pci_attach_args *pa = aux;
169 	struct ixgb_softc *sc;
170 	int             tsize, rsize;
171 
172 	INIT_DEBUGOUT("ixgb_attach: begin");
173 
174 	sc = (struct ixgb_softc *)self;
175 	sc->osdep.ixgb_pa = *pa;
176 
177 	timeout_set(&sc->timer_handle, ixgb_local_timer, sc);
178 
179 	/* Determine hardware revision */
180 	ixgb_identify_hardware(sc);
181 
182 	/* Parameters (to be read from user) */
183 	sc->num_tx_desc = IXGB_MAX_TXD;
184 	sc->num_rx_desc = IXGB_MAX_RXD;
185 	sc->tx_int_delay = TIDV;
186 	sc->rx_int_delay = RDTR;
187 	sc->rx_buffer_len = IXGB_RXBUFFER_2048;
188 
189 	/*
190 	 * These parameters control the automatic generation(Tx) and
191 	 * response(Rx) to Ethernet PAUSE frames.
192 	 */
193 	sc->hw.fc.high_water = FCRTH;
194 	sc->hw.fc.low_water = FCRTL;
195 	sc->hw.fc.pause_time = FCPAUSE;
196 	sc->hw.fc.send_xon = TRUE;
197 	sc->hw.fc.type = FLOW_CONTROL;
198 
199 	/* Set the max frame size assuming standard ethernet sized frames */
200 	sc->hw.max_frame_size = IXGB_MAX_JUMBO_FRAME_SIZE;
201 
202 	if (ixgb_allocate_pci_resources(sc))
203 		goto err_pci;
204 
205 	tsize = IXGB_ROUNDUP(sc->num_tx_desc * sizeof(struct ixgb_tx_desc),
206 	    IXGB_MAX_TXD * sizeof(struct ixgb_tx_desc));
207 	tsize = IXGB_ROUNDUP(tsize, PAGE_SIZE);
208 
209 	/* Allocate Transmit Descriptor ring */
210 	if (ixgb_dma_malloc(sc, tsize, &sc->txdma, BUS_DMA_NOWAIT)) {
211 		printf("%s: Unable to allocate TxDescriptor memory\n",
212 		       sc->sc_dv.dv_xname);
213 		goto err_tx_desc;
214 	}
215 	sc->tx_desc_base = (struct ixgb_tx_desc *) sc->txdma.dma_vaddr;
216 
217 	rsize = IXGB_ROUNDUP(sc->num_rx_desc * sizeof(struct ixgb_rx_desc),
218 	    IXGB_MAX_RXD * sizeof(struct ixgb_rx_desc));
219 	rsize = IXGB_ROUNDUP(rsize, PAGE_SIZE);
220 
221 	/* Allocate Receive Descriptor ring */
222 	if (ixgb_dma_malloc(sc, rsize, &sc->rxdma, BUS_DMA_NOWAIT)) {
223 		printf("%s: Unable to allocate rx_desc memory\n",
224 		       sc->sc_dv.dv_xname);
225 		goto err_rx_desc;
226 	}
227 	sc->rx_desc_base = (struct ixgb_rx_desc *) sc->rxdma.dma_vaddr;
228 
229 	/* Initialize the hardware */
230 	if (ixgb_hardware_init(sc)) {
231 		printf("%s: Unable to initialize the hardware\n",
232 		       sc->sc_dv.dv_xname);
233 		goto err_hw_init;
234 	}
235 
236 	/* Setup OS specific network interface */
237 	ixgb_setup_interface(sc);
238 
239 	/* Initialize statistics */
240 	ixgb_clear_hw_cntrs(&sc->hw);
241 	ixgb_update_stats_counters(sc);
242 	ixgb_update_link_status(sc);
243 
244 	printf(", address %s\n", ether_sprintf(sc->interface_data.ac_enaddr));
245 
246 	INIT_DEBUGOUT("ixgb_attach: end");
247 	return;
248 
249 err_hw_init:
250 	ixgb_dma_free(sc, &sc->rxdma);
251 err_rx_desc:
252 	ixgb_dma_free(sc, &sc->txdma);
253 err_tx_desc:
254 err_pci:
255 	ixgb_free_pci_resources(sc);
256 }
257 
258 /*********************************************************************
259  *  Transmit entry point
260  *
261  *  ixgb_start is called by the stack to initiate a transmit.
262  *  The driver will remain in this routine as long as there are
263  *  packets to transmit and transmit resources are available.
264  *  In case resources are not available stack is notified and
265  *  the packet is requeued.
266  **********************************************************************/
267 
268 void
269 ixgb_start(struct ifnet *ifp)
270 {
271 	struct mbuf    *m_head;
272 	struct ixgb_softc *sc = ifp->if_softc;
273 	int		post = 0;
274 
275 	if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING)
276 		return;
277 
278 	if (!sc->link_active)
279 		return;
280 
281 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
282 	    sc->txdma.dma_map->dm_mapsize,
283 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
284 
285 	for (;;) {
286 		IFQ_POLL(&ifp->if_snd, m_head);
287 		if (m_head == NULL)
288 			break;
289 
290 		if (ixgb_encap(sc, m_head)) {
291 			ifp->if_flags |= IFF_OACTIVE;
292 			break;
293 		}
294 
295 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
296 
297 #if NBPFILTER > 0
298 		/* Send a copy of the frame to the BPF listener */
299 		if (ifp->if_bpf)
300 			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
301 #endif
302 
303 		/* Set timeout in case hardware has problems transmitting */
304 		ifp->if_timer = IXGB_TX_TIMEOUT;
305 
306 		post = 1;
307 	}
308 
309 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
310 	    sc->txdma.dma_map->dm_mapsize,
311 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
312 	/*
313 	 * Advance the Transmit Descriptor Tail (Tdt),
314 	 * this tells the E1000 that this frame
315 	 * is available to transmit.
316 	 */
317 	if (post)
318 		IXGB_WRITE_REG(&sc->hw, TDT, sc->next_avail_tx_desc);
319 }
320 
321 /*********************************************************************
322  *  Ioctl entry point
323  *
324  *  ixgb_ioctl is called when the user wants to configure the
325  *  interface.
326  *
327  *  return 0 on success, positive on failure
328  **********************************************************************/
329 
330 int
331 ixgb_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
332 {
333 	struct ixgb_softc *sc = ifp->if_softc;
334 	struct ifaddr	*ifa = (struct ifaddr *) data;
335 	struct ifreq	*ifr = (struct ifreq *) data;
336 	int		s, error = 0;
337 
338 	s = splnet();
339 
340 	switch (command) {
341 	case SIOCSIFADDR:
342 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFADDR (Set Interface "
343 			       "Addr)");
344 		ifp->if_flags |= IFF_UP;
345 		if (!(ifp->if_flags & IFF_RUNNING))
346 			ixgb_init(sc);
347 		if (ifa->ifa_addr->sa_family == AF_INET)
348 			arp_ifinit(&sc->interface_data, ifa);
349 		break;
350 
351 	case SIOCSIFFLAGS:
352 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
353 		if (ifp->if_flags & IFF_UP) {
354 			/*
355 			 * If only the PROMISC or ALLMULTI flag changes, then
356 			 * don't do a full re-init of the chip, just update
357 			 * the Rx filter.
358 			 */
359 			if ((ifp->if_flags & IFF_RUNNING) &&
360 			    ((ifp->if_flags ^ sc->if_flags) &
361 			     (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
362 				ixgb_set_promisc(sc);
363 			} else {
364 				if (!(ifp->if_flags & IFF_RUNNING))
365 					ixgb_init(sc);
366 			}
367 		} else {
368 			if (ifp->if_flags & IFF_RUNNING)
369 				ixgb_stop(sc);
370 		}
371 		sc->if_flags = ifp->if_flags;
372 		break;
373 
374 	case SIOCSIFMEDIA:
375 	case SIOCGIFMEDIA:
376 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
377 		error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
378 		break;
379 
380 	default:
381 		error = ether_ioctl(ifp, &sc->interface_data, command, data);
382 	}
383 
384 	if (error == ENETRESET) {
385 		if (ifp->if_flags & IFF_RUNNING) {
386 			ixgb_disable_intr(sc);
387 			ixgb_set_multi(sc);
388 			ixgb_enable_intr(sc);
389 		}
390 		error = 0;
391 	}
392 
393 	splx(s);
394 	return (error);
395 }
396 
397 /*********************************************************************
398  *  Watchdog entry point
399  *
400  *  This routine is called whenever hardware quits transmitting.
401  *
402  **********************************************************************/
403 
404 void
405 ixgb_watchdog(struct ifnet * ifp)
406 {
407 	struct ixgb_softc *sc = ifp->if_softc;
408 
409 	/*
410 	 * If we are in this routine because of pause frames, then don't
411 	 * reset the hardware.
412 	 */
413 	if (IXGB_READ_REG(&sc->hw, STATUS) & IXGB_STATUS_TXOFF) {
414 		ifp->if_timer = IXGB_TX_TIMEOUT;
415 		return;
416 	}
417 
418 	printf("%s: watchdog timeout -- resetting\n", sc->sc_dv.dv_xname);
419 
420 	ixgb_init(sc);
421 
422 	sc->watchdog_events++;
423 }
424 
425 /*********************************************************************
426  *  Init entry point
427  *
428  *  This routine is used in two ways. It is used by the stack as
429  *  init entry point in network interface structure. It is also used
430  *  by the driver as a hw/sw initialization routine to get to a
431  *  consistent state.
432  *
433  **********************************************************************/
434 
435 void
436 ixgb_init(void *arg)
437 {
438 	struct ixgb_softc *sc = arg;
439 	struct ifnet   *ifp = &sc->interface_data.ac_if;
440 	uint32_t temp_reg;
441 	int s;
442 
443 	INIT_DEBUGOUT("ixgb_init: begin");
444 
445 	s = splnet();
446 
447 	ixgb_stop(sc);
448 
449 	/* Get the latest mac address, User can use a LAA */
450 	bcopy(sc->interface_data.ac_enaddr, sc->hw.curr_mac_addr,
451 	      IXGB_ETH_LENGTH_OF_ADDRESS);
452 
453 	/* Initialize the hardware */
454 	if (ixgb_hardware_init(sc)) {
455 		printf("%s: Unable to initialize the hardware\n",
456 		       sc->sc_dv.dv_xname);
457 		splx(s);
458 		return;
459 	}
460 
461 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
462 		ixgb_enable_hw_vlans(sc);
463 
464 	/* Prepare transmit descriptors and buffers */
465 	if (ixgb_setup_transmit_structures(sc)) {
466 		printf("%s: Could not setup transmit structures\n",
467 		       sc->sc_dv.dv_xname);
468 		ixgb_stop(sc);
469 		splx(s);
470 		return;
471 	}
472 	ixgb_initialize_transmit_unit(sc);
473 
474 	/* Setup Multicast table */
475 	ixgb_set_multi(sc);
476 
477 	/* Prepare receive descriptors and buffers */
478 	if (ixgb_setup_receive_structures(sc)) {
479 		printf("%s: Could not setup receive structures\n",
480 		       sc->sc_dv.dv_xname);
481 		ixgb_stop(sc);
482 		splx(s);
483 		return;
484 	}
485 	ixgb_initialize_receive_unit(sc);
486 
487 	/* Don't lose promiscuous settings */
488 	ixgb_set_promisc(sc);
489 
490 	ifp->if_flags |= IFF_RUNNING;
491 	ifp->if_flags &= ~IFF_OACTIVE;
492 
493 	/* Enable jumbo frames */
494 	IXGB_WRITE_REG(&sc->hw, MFRMS,
495 	    sc->hw.max_frame_size << IXGB_MFRMS_SHIFT);
496 	temp_reg = IXGB_READ_REG(&sc->hw, CTRL0);
497 	temp_reg |= IXGB_CTRL0_JFE;
498 	IXGB_WRITE_REG(&sc->hw, CTRL0, temp_reg);
499 
500 	timeout_add_sec(&sc->timer_handle, 1);
501 	ixgb_clear_hw_cntrs(&sc->hw);
502 	ixgb_enable_intr(sc);
503 
504 	splx(s);
505 }
506 
507 /*********************************************************************
508  *
509  *  Interrupt Service routine
510  *
511  **********************************************************************/
512 
513 int
514 ixgb_intr(void *arg)
515 {
516 	struct ixgb_softc *sc = arg;
517 	struct ifnet	*ifp;
518 	u_int32_t	reg_icr;
519 	boolean_t	rxdmt0 = FALSE;
520 	int claimed = 0;
521 
522 	ifp = &sc->interface_data.ac_if;
523 
524 	for (;;) {
525 		reg_icr = IXGB_READ_REG(&sc->hw, ICR);
526 		if (reg_icr == 0)
527 			break;
528 
529 		claimed = 1;
530 
531 		if (reg_icr & IXGB_INT_RXDMT0)
532 			rxdmt0 = TRUE;
533 
534 		if (ifp->if_flags & IFF_RUNNING) {
535 			ixgb_rxeof(sc, -1);
536 			ixgb_txeof(sc);
537 		}
538 
539 		/* Link status change */
540 		if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
541 			timeout_del(&sc->timer_handle);
542 			ixgb_check_for_link(&sc->hw);
543 			ixgb_update_link_status(sc);
544 			timeout_add_sec(&sc->timer_handle, 1);
545 		}
546 
547 		if (rxdmt0 && sc->raidc) {
548 			IXGB_WRITE_REG(&sc->hw, IMC, IXGB_INT_RXDMT0);
549 			IXGB_WRITE_REG(&sc->hw, IMS, IXGB_INT_RXDMT0);
550 		}
551 	}
552 
553 	if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
554 		ixgb_start(ifp);
555 
556 	return (claimed);
557 }
558 
559 
560 /*********************************************************************
561  *
562  *  Media Ioctl callback
563  *
564  *  This routine is called whenever the user queries the status of
565  *  the interface using ifconfig.
566  *
567  **********************************************************************/
568 void
569 ixgb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
570 {
571 	struct ixgb_softc *sc = ifp->if_softc;
572 
573 	INIT_DEBUGOUT("ixgb_media_status: begin");
574 
575 	ixgb_check_for_link(&sc->hw);
576 	ixgb_update_link_status(sc);
577 
578 	ifmr->ifm_status = IFM_AVALID;
579 	ifmr->ifm_active = IFM_ETHER;
580 
581 	if (!sc->hw.link_up) {
582 		ifmr->ifm_active |= IFM_NONE;
583 		return;
584 	}
585 
586 	ifmr->ifm_status |= IFM_ACTIVE;
587 	if ((sc->hw.phy_type == ixgb_phy_type_g6104) ||
588 	    (sc->hw.phy_type == ixgb_phy_type_txn17401))
589 		ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
590 	else
591 		ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
592 
593 	return;
594 }
595 
596 /*********************************************************************
597  *
598  *  Media Ioctl callback
599  *
600  *  This routine is called when the user changes speed/duplex using
601  *  media/mediopt option with ifconfig.
602  *
603  **********************************************************************/
604 int
605 ixgb_media_change(struct ifnet * ifp)
606 {
607 	struct ixgb_softc *sc = ifp->if_softc;
608 	struct ifmedia *ifm = &sc->media;
609 
610 	INIT_DEBUGOUT("ixgb_media_change: begin");
611 
612 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
613 		return (EINVAL);
614 
615 	return (0);
616 }
617 
618 /*********************************************************************
619  *
620  *  This routine maps the mbufs to tx descriptors.
621  *
622  *  return 0 on success, positive on failure
623  **********************************************************************/
624 
625 int
626 ixgb_encap(struct ixgb_softc *sc, struct mbuf *m_head)
627 {
628 	u_int8_t        txd_popts;
629 	int             i, j, error = 0;
630 	bus_dmamap_t	map;
631 
632 	struct ixgb_buffer *tx_buffer;
633 	struct ixgb_tx_desc *current_tx_desc = NULL;
634 
635 	/*
636 	 * Force a cleanup if number of TX descriptors available hits the
637 	 * threshold
638 	 */
639 	if (sc->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
640 		ixgb_txeof(sc);
641 		/* Now do we at least have a minimal? */
642 		if (sc->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
643 			sc->no_tx_desc_avail1++;
644 			return (ENOBUFS);
645 		}
646 	}
647 
648 	/*
649 	 * Map the packet for DMA.
650 	 */
651 	tx_buffer = &sc->tx_buffer_area[sc->next_avail_tx_desc];
652 	map = tx_buffer->map;
653 
654 	error = bus_dmamap_load_mbuf(sc->txtag, map,
655 				     m_head, BUS_DMA_NOWAIT);
656 	if (error != 0) {
657 		sc->no_tx_dma_setup++;
658 		return (error);
659 	}
660 	IXGB_KASSERT(map->dm_nsegs != 0, ("ixgb_encap: empty packet"));
661 
662 	if (map->dm_nsegs > sc->num_tx_desc_avail)
663 		goto fail;
664 
665 #ifdef IXGB_CSUM_OFFLOAD
666 	ixgb_transmit_checksum_setup(sc, m_head, &txd_popts);
667 #else
668 	txd_popts = 0;
669 #endif
670 
671 	i = sc->next_avail_tx_desc;
672 	for (j = 0; j < map->dm_nsegs; j++) {
673 		tx_buffer = &sc->tx_buffer_area[i];
674 		current_tx_desc = &sc->tx_desc_base[i];
675 
676 		current_tx_desc->buff_addr = htole64(map->dm_segs[j].ds_addr);
677 		current_tx_desc->cmd_type_len = htole32((sc->txd_cmd | map->dm_segs[j].ds_len));
678 		current_tx_desc->popts = txd_popts;
679 		if (++i == sc->num_tx_desc)
680 			i = 0;
681 
682 		tx_buffer->m_head = NULL;
683 	}
684 
685 	sc->num_tx_desc_avail -= map->dm_nsegs;
686 	sc->next_avail_tx_desc = i;
687 
688 	/* Find out if we are in VLAN mode */
689 	if (m_head->m_flags & M_VLANTAG) {
690 		/* Set the VLAN id */
691 		current_tx_desc->vlan = htole16(m_head->m_pkthdr.ether_vtag);
692 
693 		/* Tell hardware to add tag */
694 		current_tx_desc->cmd_type_len |= htole32(IXGB_TX_DESC_CMD_VLE);
695 	}
696 
697 	tx_buffer->m_head = m_head;
698 	bus_dmamap_sync(sc->txtag, map, 0, map->dm_mapsize,
699 	    BUS_DMASYNC_PREWRITE);
700 
701 	/*
702 	 * Last Descriptor of Packet needs End Of Packet (EOP)
703 	 */
704 	current_tx_desc->cmd_type_len |= htole32(IXGB_TX_DESC_CMD_EOP);
705 
706 	return (0);
707 
708 fail:
709 	sc->no_tx_desc_avail2++;
710 	bus_dmamap_unload(sc->txtag, map);
711 	return (ENOBUFS);
712 }
713 
714 void
715 ixgb_set_promisc(struct ixgb_softc *sc)
716 {
717 
718 	u_int32_t       reg_rctl;
719 	struct ifnet   *ifp = &sc->interface_data.ac_if;
720 
721 	reg_rctl = IXGB_READ_REG(&sc->hw, RCTL);
722 
723 	if (ifp->if_flags & IFF_PROMISC) {
724 		reg_rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
725 	} else if (ifp->if_flags & IFF_ALLMULTI) {
726 		reg_rctl |= IXGB_RCTL_MPE;
727 		reg_rctl &= ~IXGB_RCTL_UPE;
728 	} else {
729 		reg_rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
730 	}
731 	IXGB_WRITE_REG(&sc->hw, RCTL, reg_rctl);
732 }
733 
734 /*********************************************************************
735  *  Multicast Update
736  *
737  *  This routine is called whenever multicast address list is updated.
738  *
739  **********************************************************************/
740 
741 void
742 ixgb_set_multi(struct ixgb_softc *sc)
743 {
744 	u_int32_t       reg_rctl = 0;
745 	u_int8_t        mta[MAX_NUM_MULTICAST_ADDRESSES * IXGB_ETH_LENGTH_OF_ADDRESS];
746 	int             mcnt = 0;
747 	struct ifnet   *ifp = &sc->interface_data.ac_if;
748 	struct arpcom *ac = &sc->interface_data;
749 	struct ether_multi *enm;
750 	struct ether_multistep step;
751 
752 	IOCTL_DEBUGOUT("ixgb_set_multi: begin");
753 
754 	if (ac->ac_multirangecnt > 0) {
755 		ifp->if_flags |= IFF_ALLMULTI;
756 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
757 		goto setit;
758 	}
759 
760 	ETHER_FIRST_MULTI(step, ac, enm);
761 	while (enm != NULL) {
762 		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
763 			break;
764 		bcopy(enm->enm_addrlo, &mta[mcnt*IXGB_ETH_LENGTH_OF_ADDRESS],
765 		      IXGB_ETH_LENGTH_OF_ADDRESS);
766 		mcnt++;
767 		ETHER_NEXT_MULTI(step, enm);
768 	}
769 
770 setit:
771 	if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
772 		reg_rctl = IXGB_READ_REG(&sc->hw, RCTL);
773 		reg_rctl |= IXGB_RCTL_MPE;
774 		IXGB_WRITE_REG(&sc->hw, RCTL, reg_rctl);
775 	} else
776 		ixgb_mc_addr_list_update(&sc->hw, mta, mcnt, 0);
777 }
778 
779 
780 /*********************************************************************
781  *  Timer routine
782  *
783  *  This routine checks for link status and updates statistics.
784  *
785  **********************************************************************/
786 
787 void
788 ixgb_local_timer(void *arg)
789 {
790 	struct ifnet   *ifp;
791 	struct ixgb_softc *sc = arg;
792 	int s;
793 
794 	ifp = &sc->interface_data.ac_if;
795 
796 	s = splnet();
797 
798 	ixgb_check_for_link(&sc->hw);
799 	ixgb_update_link_status(sc);
800 	ixgb_update_stats_counters(sc);
801 #ifdef IXGB_DEBUG
802 	if (ixgb_display_debug_stats && ifp->if_flags & IFF_RUNNING)
803 		ixgb_print_hw_stats(sc);
804 #endif
805 
806 	timeout_add_sec(&sc->timer_handle, 1);
807 
808 	splx(s);
809 }
810 
811 void
812 ixgb_update_link_status(struct ixgb_softc *sc)
813 {
814 	struct ifnet *ifp = &sc->interface_data.ac_if;
815 
816 	if (sc->hw.link_up) {
817 		if (!sc->link_active) {
818 			ifp->if_baudrate = IF_Gbps(10);
819 			sc->link_active = 1;
820 			ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
821 			if_link_state_change(ifp);
822 		}
823 	} else {
824 		if (sc->link_active) {
825 			ifp->if_baudrate = 0;
826 			sc->link_active = 0;
827 			ifp->if_link_state = LINK_STATE_DOWN;
828 			if_link_state_change(ifp);
829 		}
830 	}
831 }
832 
833 /*********************************************************************
834  *
835  *  This routine disables all traffic on the adapter by issuing a
836  *  global reset on the MAC and deallocates TX/RX buffers.
837  *
838  **********************************************************************/
839 
840 void
841 ixgb_stop(void *arg)
842 {
843 	struct ifnet   *ifp;
844 	struct ixgb_softc *sc = arg;
845 	ifp = &sc->interface_data.ac_if;
846 
847 	INIT_DEBUGOUT("ixgb_stop: begin\n");
848 	ixgb_disable_intr(sc);
849 	sc->hw.adapter_stopped = FALSE;
850 	ixgb_adapter_stop(&sc->hw);
851 	timeout_del(&sc->timer_handle);
852 
853 	/* Tell the stack that the interface is no longer active */
854 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
855 
856 	ixgb_free_transmit_structures(sc);
857 	ixgb_free_receive_structures(sc);
858 }
859 
860 
861 /*********************************************************************
862  *
863  *  Determine hardware revision.
864  *
865  **********************************************************************/
866 void
867 ixgb_identify_hardware(struct ixgb_softc *sc)
868 {
869 	u_int32_t	reg;
870 	struct pci_attach_args *pa = &sc->osdep.ixgb_pa;
871 
872 	/* Make sure our PCI config space has the necessary stuff set */
873 	sc->hw.pci_cmd_word = pci_conf_read(pa->pa_pc, pa->pa_tag,
874 					    PCI_COMMAND_STATUS_REG);
875 
876 	/* Save off the information about this board */
877 	sc->hw.vendor_id = PCI_VENDOR(pa->pa_id);
878 	sc->hw.device_id = PCI_PRODUCT(pa->pa_id);
879 
880 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
881 	sc->hw.revision_id = PCI_REVISION(reg);
882 
883 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
884 	sc->hw.subsystem_vendor_id = PCI_VENDOR(reg);
885 	sc->hw.subsystem_id = PCI_PRODUCT(reg);
886 
887 	/* Set MacType, etc. based on this PCI info */
888 	switch (sc->hw.device_id) {
889 	case IXGB_DEVICE_ID_82597EX:
890 	case IXGB_DEVICE_ID_82597EX_SR:
891 	case IXGB_DEVICE_ID_82597EX_LR:
892 	case IXGB_DEVICE_ID_82597EX_CX4:
893 		sc->hw.mac_type = ixgb_82597;
894 		break;
895 	default:
896 		INIT_DEBUGOUT1("Unknown device if 0x%x", sc->hw.device_id);
897 		printf("%s: unsupported device id 0x%x\n",
898 		    sc->sc_dv.dv_xname, sc->hw.device_id);
899 	}
900 }
901 
902 int
903 ixgb_allocate_pci_resources(struct ixgb_softc *sc)
904 
905 {
906 	int val;
907 	pci_intr_handle_t	ih;
908 	const char		*intrstr = NULL;
909 	struct pci_attach_args *pa =  &sc->osdep.ixgb_pa;
910 	pci_chipset_tag_t	pc = pa->pa_pc;
911 
912 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, IXGB_MMBA);
913 	if (PCI_MAPREG_TYPE(val) != PCI_MAPREG_TYPE_MEM) {
914 		printf(": mmba is not mem space\n");
915 		return (ENXIO);
916 	}
917 	if (pci_mapreg_map(pa, IXGB_MMBA, PCI_MAPREG_MEM_TYPE(val), 0,
918 	    &sc->osdep.mem_bus_space_tag, &sc->osdep.mem_bus_space_handle,
919 	    &sc->osdep.ixgb_membase, &sc->osdep.ixgb_memsize, 0)) {
920 		printf(": cannot find mem space\n");
921 		return (ENXIO);
922 	}
923 
924 	if (pci_intr_map(pa, &ih)) {
925 		printf(": couldn't map interrupt\n");
926 		return (ENXIO);
927 	}
928 
929 	sc->hw.back = &sc->osdep;
930 
931 	intrstr = pci_intr_string(pc, ih);
932 	sc->sc_intrhand = pci_intr_establish(pc, ih, IPL_NET, ixgb_intr, sc,
933 					    sc->sc_dv.dv_xname);
934 	if (sc->sc_intrhand == NULL) {
935 		printf(": couldn't establish interrupt");
936 		if (intrstr != NULL)
937 			printf(" at %s", intrstr);
938 		printf("\n");
939 		return (ENXIO);
940 	}
941 	printf(": %s", intrstr);
942 
943 	return (0);
944 }
945 
946 void
947 ixgb_free_pci_resources(struct ixgb_softc *sc)
948 {
949 	struct pci_attach_args *pa = &sc->osdep.ixgb_pa;
950 	pci_chipset_tag_t	pc = pa->pa_pc;
951 
952 	if (sc->sc_intrhand)
953 		pci_intr_disestablish(pc, sc->sc_intrhand);
954 	sc->sc_intrhand = 0;
955 
956 	if (sc->osdep.ixgb_membase)
957 		bus_space_unmap(sc->osdep.mem_bus_space_tag, sc->osdep.mem_bus_space_handle,
958 				sc->osdep.ixgb_memsize);
959 	sc->osdep.ixgb_membase = 0;
960 }
961 
962 /*********************************************************************
963  *
964  *  Initialize the hardware to a configuration as specified by the
965  *  adapter structure. The controller is reset, the EEPROM is
966  *  verified, the MAC address is set, then the shared initialization
967  *  routines are called.
968  *
969  **********************************************************************/
970 int
971 ixgb_hardware_init(struct ixgb_softc *sc)
972 {
973 	/* Issue a global reset */
974 	sc->hw.adapter_stopped = FALSE;
975 	ixgb_adapter_stop(&sc->hw);
976 
977 	/* Make sure we have a good EEPROM before we read from it */
978 	if (!ixgb_validate_eeprom_checksum(&sc->hw)) {
979 		printf("%s: The EEPROM Checksum Is Not Valid\n",
980 		       sc->sc_dv.dv_xname);
981 		return (EIO);
982 	}
983 	if (!ixgb_init_hw(&sc->hw)) {
984 		printf("%s: Hardware Initialization Failed",
985 		       sc->sc_dv.dv_xname);
986 		return (EIO);
987 	}
988 	bcopy(sc->hw.curr_mac_addr, sc->interface_data.ac_enaddr,
989 	      IXGB_ETH_LENGTH_OF_ADDRESS);
990 
991 	return (0);
992 }
993 
994 /*********************************************************************
995  *
996  *  Setup networking device structure and register an interface.
997  *
998  **********************************************************************/
999 void
1000 ixgb_setup_interface(struct ixgb_softc *sc)
1001 {
1002 	struct ifnet   *ifp;
1003 	INIT_DEBUGOUT("ixgb_setup_interface: begin");
1004 
1005 	ifp = &sc->interface_data.ac_if;
1006 	strlcpy(ifp->if_xname, sc->sc_dv.dv_xname, IFNAMSIZ);
1007 
1008 	ifp->if_softc = sc;
1009 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1010 	ifp->if_ioctl = ixgb_ioctl;
1011 	ifp->if_start = ixgb_start;
1012 	ifp->if_watchdog = ixgb_watchdog;
1013 	ifp->if_hardmtu =
1014 		IXGB_MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN - ETHER_CRC_LEN;
1015 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->num_tx_desc - 1);
1016 	IFQ_SET_READY(&ifp->if_snd);
1017 
1018 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1019 
1020 #if NVLAN > 0
1021 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1022 #endif
1023 
1024 #ifdef IXGB_CSUM_OFFLOAD
1025 	ifp->if_capabilities |= IFCAP_CSUM_TCPv4|IFCAP_CSUM_UDPv4;
1026 #endif
1027 
1028 	/*
1029 	 * Specify the media types supported by this adapter and register
1030 	 * callbacks to update media and link information
1031 	 */
1032 	ifmedia_init(&sc->media, IFM_IMASK, ixgb_media_change,
1033 		     ixgb_media_status);
1034 	if ((sc->hw.phy_type == ixgb_phy_type_g6104) ||
1035 	    (sc->hw.phy_type == ixgb_phy_type_txn17401)) {
1036 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_LR |
1037 		    IFM_FDX, 0, NULL);
1038 	} else {
1039 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SR |
1040 		    IFM_FDX, 0, NULL);
1041 	}
1042 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1043 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1044 
1045 	if_attach(ifp);
1046 	ether_ifattach(ifp);
1047 }
1048 
1049 /********************************************************************
1050  * Manage DMA'able memory.
1051  *******************************************************************/
1052 int
1053 ixgb_dma_malloc(struct ixgb_softc *sc, bus_size_t size,
1054 		struct ixgb_dma_alloc * dma, int mapflags)
1055 {
1056 	int r;
1057 
1058 	dma->dma_tag = sc->osdep.ixgb_pa.pa_dmat;
1059 	r = bus_dmamap_create(dma->dma_tag, size, 1,
1060 	    size, 0, BUS_DMA_NOWAIT, &dma->dma_map);
1061 	if (r != 0) {
1062 		printf("%s: ixgb_dma_malloc: bus_dmamap_create failed; "
1063 			"error %u\n", sc->sc_dv.dv_xname, r);
1064 		goto fail_0;
1065 	}
1066 
1067 	r = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,
1068 	    1, &dma->dma_nseg, BUS_DMA_NOWAIT);
1069 	if (r != 0) {
1070 		printf("%s: ixgb_dma_malloc: bus_dmammem_alloc failed; "
1071 			"size %lu, error %d\n", sc->sc_dv.dv_xname,
1072 			(unsigned long)size, r);
1073 		goto fail_1;
1074 	}
1075 
1076 	r = bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
1077 	    &dma->dma_vaddr, BUS_DMA_NOWAIT);
1078 	if (r != 0) {
1079 		printf("%s: ixgb_dma_malloc: bus_dmammem_map failed; "
1080 			"size %lu, error %d\n", sc->sc_dv.dv_xname,
1081 			(unsigned long)size, r);
1082 		goto fail_2;
1083 	}
1084 
1085 	r = bus_dmamap_load(sc->osdep.ixgb_pa.pa_dmat, dma->dma_map,
1086 			    dma->dma_vaddr, size, NULL,
1087 			    mapflags | BUS_DMA_NOWAIT);
1088 	if (r != 0) {
1089 		printf("%s: ixgb_dma_malloc: bus_dmamap_load failed; "
1090 			"error %u\n", sc->sc_dv.dv_xname, r);
1091 		goto fail_3;
1092 	}
1093 
1094 	dma->dma_size = size;
1095 	return (0);
1096 
1097 fail_3:
1098 	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
1099 fail_2:
1100 	bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1101 fail_1:
1102 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1103 fail_0:
1104 	dma->dma_map = NULL;
1105 	dma->dma_tag = NULL;
1106 
1107 	return (r);
1108 }
1109 
1110 void
1111 ixgb_dma_free(struct ixgb_softc *sc, struct ixgb_dma_alloc *dma)
1112 {
1113 	if (dma->dma_tag == NULL)
1114 		return;
1115 
1116 	if (dma->dma_map != NULL) {
1117 		bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,
1118 		    dma->dma_map->dm_mapsize,
1119 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1120 		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1121 		bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
1122 		bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1123 		bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1124 	}
1125 }
1126 
1127 /*********************************************************************
1128  *
1129  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
1130  *  the information needed to transmit a packet on the wire.
1131  *
1132  **********************************************************************/
1133 int
1134 ixgb_allocate_transmit_structures(struct ixgb_softc *sc)
1135 {
1136 	if (!(sc->tx_buffer_area = mallocarray(sc->num_tx_desc,
1137 	    sizeof(struct ixgb_buffer), M_DEVBUF, M_NOWAIT | M_ZERO))) {
1138 		printf("%s: Unable to allocate tx_buffer memory\n",
1139 		       sc->sc_dv.dv_xname);
1140 		return (ENOMEM);
1141 	}
1142 
1143 	return (0);
1144 }
1145 
1146 /*********************************************************************
1147  *
1148  *  Allocate and initialize transmit structures.
1149  *
1150  **********************************************************************/
1151 int
1152 ixgb_setup_transmit_structures(struct ixgb_softc *sc)
1153 {
1154 	struct	ixgb_buffer *tx_buffer;
1155 	int error, i;
1156 
1157 	if ((error = ixgb_allocate_transmit_structures(sc)) != 0)
1158 		goto fail;
1159 
1160 	bzero((void *)sc->tx_desc_base,
1161 	      (sizeof(struct ixgb_tx_desc)) * sc->num_tx_desc);
1162 
1163 	sc->txtag = sc->osdep.ixgb_pa.pa_dmat;
1164 
1165 	tx_buffer = sc->tx_buffer_area;
1166 	for (i = 0; i < sc->num_tx_desc; i++) {
1167 		error = bus_dmamap_create(sc->txtag, IXGB_MAX_JUMBO_FRAME_SIZE,
1168 			    IXGB_MAX_SCATTER, IXGB_MAX_JUMBO_FRAME_SIZE, 0,
1169 			    BUS_DMA_NOWAIT, &tx_buffer->map);
1170 		if (error != 0) {
1171 			printf("%s: Unable to create TX DMA map\n",
1172 			    sc->sc_dv.dv_xname);
1173 			goto fail;
1174 		}
1175 		tx_buffer++;
1176 	}
1177 
1178 	sc->next_avail_tx_desc = 0;
1179 	sc->oldest_used_tx_desc = 0;
1180 
1181 	/* Set number of descriptors available */
1182 	sc->num_tx_desc_avail = sc->num_tx_desc;
1183 
1184 	/* Set checksum context */
1185 	sc->active_checksum_context = OFFLOAD_NONE;
1186 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
1187 	   sc->txdma.dma_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1188 
1189 	return (0);
1190 
1191 fail:
1192 	ixgb_free_transmit_structures(sc);
1193 	return (error);
1194 }
1195 
1196 /*********************************************************************
1197  *
1198  *  Enable transmit unit.
1199  *
1200  **********************************************************************/
1201 void
1202 ixgb_initialize_transmit_unit(struct ixgb_softc *sc)
1203 {
1204 	u_int32_t       reg_tctl;
1205 	u_int64_t       bus_addr;
1206 
1207 	/* Setup the Base and Length of the Tx Descriptor Ring */
1208 	bus_addr = sc->txdma.dma_map->dm_segs[0].ds_addr;
1209 	IXGB_WRITE_REG(&sc->hw, TDBAL, (u_int32_t)bus_addr);
1210 	IXGB_WRITE_REG(&sc->hw, TDBAH, (u_int32_t)(bus_addr >> 32));
1211 	IXGB_WRITE_REG(&sc->hw, TDLEN,
1212 		       sc->num_tx_desc *
1213 		       sizeof(struct ixgb_tx_desc));
1214 
1215 	/* Setup the HW Tx Head and Tail descriptor pointers */
1216 	IXGB_WRITE_REG(&sc->hw, TDH, 0);
1217 	IXGB_WRITE_REG(&sc->hw, TDT, 0);
1218 
1219 	HW_DEBUGOUT2("Base = %x, Length = %x\n",
1220 		     IXGB_READ_REG(&sc->hw, TDBAL),
1221 		     IXGB_READ_REG(&sc->hw, TDLEN));
1222 
1223 	IXGB_WRITE_REG(&sc->hw, TIDV, sc->tx_int_delay);
1224 
1225 	/* Program the Transmit Control Register */
1226 	reg_tctl = IXGB_READ_REG(&sc->hw, TCTL);
1227 	reg_tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
1228 	IXGB_WRITE_REG(&sc->hw, TCTL, reg_tctl);
1229 
1230 	/* Setup Transmit Descriptor Settings for this adapter */
1231 	sc->txd_cmd = IXGB_TX_DESC_TYPE | IXGB_TX_DESC_CMD_RS;
1232 
1233 	if (sc->tx_int_delay > 0)
1234 		sc->txd_cmd |= IXGB_TX_DESC_CMD_IDE;
1235 }
1236 
1237 /*********************************************************************
1238  *
1239  *  Free all transmit related data structures.
1240  *
1241  **********************************************************************/
1242 void
1243 ixgb_free_transmit_structures(struct ixgb_softc *sc)
1244 {
1245 	struct ixgb_buffer *tx_buffer;
1246 	int             i;
1247 
1248 	INIT_DEBUGOUT("free_transmit_structures: begin");
1249 
1250 	if (sc->tx_buffer_area != NULL) {
1251 		tx_buffer = sc->tx_buffer_area;
1252 		for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
1253 			if (tx_buffer->map != NULL &&
1254 			    tx_buffer->map->dm_nsegs > 0) {
1255 				bus_dmamap_sync(sc->txtag, tx_buffer->map,
1256 				    0, tx_buffer->map->dm_mapsize,
1257 				    BUS_DMASYNC_POSTWRITE);
1258 				bus_dmamap_unload(sc->txtag,
1259 				    tx_buffer->map);
1260 			}
1261 
1262 			if (tx_buffer->m_head != NULL) {
1263 				m_freem(tx_buffer->m_head);
1264 				tx_buffer->m_head = NULL;
1265 			}
1266 			if (tx_buffer->map != NULL) {
1267 				bus_dmamap_destroy(sc->txtag,
1268 				    tx_buffer->map);
1269 				tx_buffer->map = NULL;
1270 			}
1271 		}
1272 	}
1273 	if (sc->tx_buffer_area != NULL) {
1274 		free(sc->tx_buffer_area, M_DEVBUF, 0);
1275 		sc->tx_buffer_area = NULL;
1276 	}
1277 	if (sc->txtag != NULL) {
1278 		sc->txtag = NULL;
1279 	}
1280 }
1281 
1282 /*********************************************************************
1283  *
1284  *  The offload context needs to be set when we transfer the first
1285  *  packet of a particular protocol (TCP/UDP). We change the
1286  *  context only if the protocol type changes.
1287  *
1288  **********************************************************************/
1289 void
1290 ixgb_transmit_checksum_setup(struct ixgb_softc *sc,
1291 			     struct mbuf *mp,
1292 			     u_int8_t *txd_popts)
1293 {
1294 	struct ixgb_context_desc *TXD;
1295 	struct ixgb_buffer *tx_buffer;
1296 	int             curr_txd;
1297 
1298 	if (mp->m_pkthdr.csum_flags) {
1299 
1300 		if (mp->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) {
1301 			*txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1302 			if (sc->active_checksum_context == OFFLOAD_TCP_IP)
1303 				return;
1304 			else
1305 				sc->active_checksum_context = OFFLOAD_TCP_IP;
1306 
1307 		} else if (mp->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) {
1308 			*txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1309 			if (sc->active_checksum_context == OFFLOAD_UDP_IP)
1310 				return;
1311 			else
1312 				sc->active_checksum_context = OFFLOAD_UDP_IP;
1313 		} else {
1314 			*txd_popts = 0;
1315 			return;
1316 		}
1317 	} else {
1318 		*txd_popts = 0;
1319 		return;
1320 	}
1321 
1322 	/*
1323 	 * If we reach this point, the checksum offload context needs to be
1324 	 * reset.
1325 	 */
1326 	curr_txd = sc->next_avail_tx_desc;
1327 	tx_buffer = &sc->tx_buffer_area[curr_txd];
1328 	TXD = (struct ixgb_context_desc *) & sc->tx_desc_base[curr_txd];
1329 
1330 	TXD->tucss = ENET_HEADER_SIZE + sizeof(struct ip);
1331 	TXD->tucse = 0;
1332 
1333 	TXD->mss = 0;
1334 
1335 	if (sc->active_checksum_context == OFFLOAD_TCP_IP) {
1336 		TXD->tucso =
1337 			ENET_HEADER_SIZE + sizeof(struct ip) +
1338 			offsetof(struct tcphdr, th_sum);
1339 	} else if (sc->active_checksum_context == OFFLOAD_UDP_IP) {
1340 		TXD->tucso =
1341 			ENET_HEADER_SIZE + sizeof(struct ip) +
1342 			offsetof(struct udphdr, uh_sum);
1343 	}
1344 	TXD->cmd_type_len = htole32(IXGB_CONTEXT_DESC_CMD_TCP |
1345 	    IXGB_TX_DESC_CMD_RS | IXGB_CONTEXT_DESC_CMD_IDE);
1346 
1347 	tx_buffer->m_head = NULL;
1348 
1349 	if (++curr_txd == sc->num_tx_desc)
1350 		curr_txd = 0;
1351 
1352 	sc->num_tx_desc_avail--;
1353 	sc->next_avail_tx_desc = curr_txd;
1354 }
1355 
1356 /**********************************************************************
1357  *
1358  *  Examine each tx_buffer in the used queue. If the hardware is done
1359  *  processing the packet then free associated resources. The
1360  *  tx_buffer is put back on the free queue.
1361  *
1362  **********************************************************************/
1363 void
1364 ixgb_txeof(struct ixgb_softc *sc)
1365 {
1366 	int             i, num_avail;
1367 	struct ixgb_buffer *tx_buffer;
1368 	struct ixgb_tx_desc *tx_desc;
1369 	struct ifnet	*ifp = &sc->interface_data.ac_if;
1370 
1371 	if (sc->num_tx_desc_avail == sc->num_tx_desc)
1372 		return;
1373 
1374 	num_avail = sc->num_tx_desc_avail;
1375 	i = sc->oldest_used_tx_desc;
1376 
1377 	tx_buffer = &sc->tx_buffer_area[i];
1378 	tx_desc = &sc->tx_desc_base[i];
1379 
1380 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
1381 	    sc->txdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1382 	while (tx_desc->status & IXGB_TX_DESC_STATUS_DD) {
1383 
1384 		tx_desc->status = 0;
1385 		num_avail++;
1386 
1387 		if (tx_buffer->m_head != NULL) {
1388 			ifp->if_opackets++;
1389 
1390 			if (tx_buffer->map->dm_nsegs > 0) {
1391 				bus_dmamap_sync(sc->txtag, tx_buffer->map,
1392 				    0, tx_buffer->map->dm_mapsize,
1393 				    BUS_DMASYNC_POSTWRITE);
1394 				bus_dmamap_unload(sc->txtag, tx_buffer->map);
1395 			}
1396 
1397 			m_freem(tx_buffer->m_head);
1398 			tx_buffer->m_head = NULL;
1399 		}
1400 		if (++i == sc->num_tx_desc)
1401 			i = 0;
1402 
1403 		tx_buffer = &sc->tx_buffer_area[i];
1404 		tx_desc = &sc->tx_desc_base[i];
1405 	}
1406 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
1407 	    sc->txdma.dma_map->dm_mapsize,
1408 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1409 
1410 	sc->oldest_used_tx_desc = i;
1411 
1412 	/*
1413 	 * If we have enough room, clear IFF_OACTIVE to tell the stack that
1414 	 * it is OK to send packets. If there are no pending descriptors,
1415 	 * clear the timeout. Otherwise, if some descriptors have been freed,
1416 	 * restart the timeout.
1417 	 */
1418 	if (num_avail > IXGB_TX_CLEANUP_THRESHOLD)
1419 		ifp->if_flags &= ~IFF_OACTIVE;
1420 
1421 	/* All clean, turn off the timer */
1422 	if (num_avail == sc->num_tx_desc)
1423 		ifp->if_timer = 0;
1424 	/* Some cleaned, reset the timer */
1425 	else if (num_avail != sc->num_tx_desc_avail)
1426 		ifp->if_timer = IXGB_TX_TIMEOUT;
1427 
1428 	sc->num_tx_desc_avail = num_avail;
1429 }
1430 
1431 
1432 /*********************************************************************
1433  *
1434  *  Get a buffer from system mbuf buffer pool.
1435  *
1436  **********************************************************************/
1437 int
1438 ixgb_get_buf(struct ixgb_softc *sc, int i,
1439 	     struct mbuf *nmp)
1440 {
1441 	struct mbuf *mp = nmp;
1442 	struct ixgb_buffer *rx_buffer;
1443 	int             error;
1444 
1445 	if (mp == NULL) {
1446 		MGETHDR(mp, M_DONTWAIT, MT_DATA);
1447 		if (mp == NULL) {
1448 			sc->mbuf_alloc_failed++;
1449 			return (ENOBUFS);
1450 		}
1451 		MCLGET(mp, M_DONTWAIT);
1452 		if ((mp->m_flags & M_EXT) == 0) {
1453 			m_freem(mp);
1454 			sc->mbuf_cluster_failed++;
1455 			return (ENOBUFS);
1456 		}
1457 		mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1458 	} else {
1459 		mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1460 		mp->m_data = mp->m_ext.ext_buf;
1461 		mp->m_next = NULL;
1462 	}
1463 
1464 	if (sc->hw.max_frame_size <= (MCLBYTES - ETHER_ALIGN))
1465 		m_adj(mp, ETHER_ALIGN);
1466 
1467 	rx_buffer = &sc->rx_buffer_area[i];
1468 
1469 	/*
1470 	 * Using memory from the mbuf cluster pool, invoke the bus_dma
1471 	 * machinery to arrange the memory mapping.
1472 	 */
1473 	error = bus_dmamap_load_mbuf(sc->rxtag, rx_buffer->map,
1474 	    mp, BUS_DMA_NOWAIT);
1475 	if (error) {
1476 		m_freem(mp);
1477 		return (error);
1478 	}
1479 	rx_buffer->m_head = mp;
1480 	bzero(&sc->rx_desc_base[i], sizeof(sc->rx_desc_base[i]));
1481 	sc->rx_desc_base[i].buff_addr = htole64(rx_buffer->map->dm_segs[0].ds_addr);
1482 	bus_dmamap_sync(sc->rxtag, rx_buffer->map, 0,
1483 	    rx_buffer->map->dm_mapsize, BUS_DMASYNC_PREREAD);
1484 
1485 	return (0);
1486 }
1487 
1488 /*********************************************************************
1489  *
1490  *  Allocate memory for rx_buffer structures. Since we use one
1491  *  rx_buffer per received packet, the maximum number of rx_buffer's
1492  *  that we'll need is equal to the number of receive descriptors
1493  *  that we've allocated.
1494  *
1495  **********************************************************************/
1496 int
1497 ixgb_allocate_receive_structures(struct ixgb_softc *sc)
1498 {
1499 	int             i, error;
1500 	struct ixgb_buffer *rx_buffer;
1501 
1502 	if (!(sc->rx_buffer_area = mallocarray(sc->num_rx_desc,
1503 	    sizeof(struct ixgb_buffer), M_DEVBUF, M_NOWAIT | M_ZERO))) {
1504 		printf("%s: Unable to allocate rx_buffer memory\n",
1505 		       sc->sc_dv.dv_xname);
1506 		return (ENOMEM);
1507 	}
1508 
1509 	sc->rxtag = sc->osdep.ixgb_pa.pa_dmat;
1510 
1511 	rx_buffer = sc->rx_buffer_area;
1512 	for (i = 0; i < sc->num_rx_desc; i++, rx_buffer++) {
1513 		error = bus_dmamap_create(sc->rxtag, MCLBYTES, 1,
1514 					  MCLBYTES, 0, BUS_DMA_NOWAIT,
1515 					  &rx_buffer->map);
1516 		if (error != 0) {
1517 			printf("%s: ixgb_allocate_receive_structures: "
1518 			       "bus_dmamap_create failed; error %u\n",
1519 			       sc->sc_dv.dv_xname, error);
1520 			goto fail;
1521 		}
1522 	}
1523 
1524 	for (i = 0; i < sc->num_rx_desc; i++) {
1525 		error = ixgb_get_buf(sc, i, NULL);
1526 		if (error != 0)
1527 			goto fail;
1528 	}
1529 	bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0,
1530 	    sc->rxdma.dma_map->dm_mapsize,
1531 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1532 
1533 	return (0);
1534 
1535 fail:
1536 	ixgb_free_receive_structures(sc);
1537 	return (error);
1538 }
1539 
1540 /*********************************************************************
1541  *
1542  *  Allocate and initialize receive structures.
1543  *
1544  **********************************************************************/
1545 int
1546 ixgb_setup_receive_structures(struct ixgb_softc *sc)
1547 {
1548 	bzero((void *)sc->rx_desc_base,
1549 	      (sizeof(struct ixgb_rx_desc)) * sc->num_rx_desc);
1550 
1551 	if (ixgb_allocate_receive_structures(sc))
1552 		return (ENOMEM);
1553 
1554 	/* Setup our descriptor pointers */
1555 	sc->next_rx_desc_to_check = 0;
1556 	sc->next_rx_desc_to_use = 0;
1557 	return (0);
1558 }
1559 
1560 /*********************************************************************
1561  *
1562  *  Enable receive unit.
1563  *
1564  **********************************************************************/
1565 void
1566 ixgb_initialize_receive_unit(struct ixgb_softc *sc)
1567 {
1568 	u_int32_t       reg_rctl;
1569 	u_int32_t       reg_rxcsum;
1570 	u_int32_t       reg_rxdctl;
1571 	u_int64_t       bus_addr;
1572 
1573 	/*
1574 	 * Make sure receives are disabled while setting up the descriptor
1575 	 * ring
1576 	 */
1577 	reg_rctl = IXGB_READ_REG(&sc->hw, RCTL);
1578 	IXGB_WRITE_REG(&sc->hw, RCTL, reg_rctl & ~IXGB_RCTL_RXEN);
1579 
1580 	/* Set the Receive Delay Timer Register */
1581 	IXGB_WRITE_REG(&sc->hw, RDTR,
1582 		       sc->rx_int_delay);
1583 
1584 	/* Setup the Base and Length of the Rx Descriptor Ring */
1585 	bus_addr = sc->rxdma.dma_map->dm_segs[0].ds_addr;
1586 	IXGB_WRITE_REG(&sc->hw, RDBAL, (u_int32_t)bus_addr);
1587 	IXGB_WRITE_REG(&sc->hw, RDBAH, (u_int32_t)(bus_addr >> 32));
1588 	IXGB_WRITE_REG(&sc->hw, RDLEN, sc->num_rx_desc *
1589 		       sizeof(struct ixgb_rx_desc));
1590 
1591 	/* Setup the HW Rx Head and Tail Descriptor Pointers */
1592 	IXGB_WRITE_REG(&sc->hw, RDH, 0);
1593 
1594 	IXGB_WRITE_REG(&sc->hw, RDT, sc->num_rx_desc - 1);
1595 
1596 	reg_rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT
1597 		| RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT
1598 		| RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
1599 	IXGB_WRITE_REG(&sc->hw, RXDCTL, reg_rxdctl);
1600 
1601 	sc->raidc = 1;
1602 	if (sc->raidc) {
1603 		uint32_t        raidc;
1604 		uint8_t         poll_threshold;
1605 #define IXGB_RAIDC_POLL_DEFAULT 120
1606 
1607 		poll_threshold = ((sc->num_rx_desc - 1) >> 3);
1608 		poll_threshold >>= 1;
1609 		poll_threshold &= 0x3F;
1610 		raidc = IXGB_RAIDC_EN | IXGB_RAIDC_RXT_GATE |
1611 			(IXGB_RAIDC_POLL_DEFAULT << IXGB_RAIDC_POLL_SHIFT) |
1612 			(sc->rx_int_delay << IXGB_RAIDC_DELAY_SHIFT) |
1613 			poll_threshold;
1614 		IXGB_WRITE_REG(&sc->hw, RAIDC, raidc);
1615 	}
1616 
1617 	/* Enable Receive Checksum Offload for TCP and UDP ? */
1618 	reg_rxcsum = IXGB_READ_REG(&sc->hw, RXCSUM);
1619 	reg_rxcsum |= IXGB_RXCSUM_TUOFL;
1620 	IXGB_WRITE_REG(&sc->hw, RXCSUM, reg_rxcsum);
1621 
1622 	/* Setup the Receive Control Register */
1623 	reg_rctl = IXGB_READ_REG(&sc->hw, RCTL);
1624 	reg_rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
1625 	reg_rctl |= IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | IXGB_RCTL_SECRC |
1626 		IXGB_RCTL_CFF |
1627 		(sc->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
1628 
1629 	switch (sc->rx_buffer_len) {
1630 	default:
1631 	case IXGB_RXBUFFER_2048:
1632 		reg_rctl |= IXGB_RCTL_BSIZE_2048;
1633 		break;
1634 	case IXGB_RXBUFFER_4096:
1635 		reg_rctl |= IXGB_RCTL_BSIZE_4096;
1636 		break;
1637 	case IXGB_RXBUFFER_8192:
1638 		reg_rctl |= IXGB_RCTL_BSIZE_8192;
1639 		break;
1640 	case IXGB_RXBUFFER_16384:
1641 		reg_rctl |= IXGB_RCTL_BSIZE_16384;
1642 		break;
1643 	}
1644 
1645 	reg_rctl |= IXGB_RCTL_RXEN;
1646 
1647 	/* Enable Receives */
1648 	IXGB_WRITE_REG(&sc->hw, RCTL, reg_rctl);
1649 }
1650 
1651 /*********************************************************************
1652  *
1653  *  Free receive related data structures.
1654  *
1655  **********************************************************************/
1656 void
1657 ixgb_free_receive_structures(struct ixgb_softc *sc)
1658 {
1659 	struct ixgb_buffer *rx_buffer;
1660 	int             i;
1661 
1662 	INIT_DEBUGOUT("free_receive_structures: begin");
1663 
1664 	if (sc->rx_buffer_area != NULL) {
1665 		rx_buffer = sc->rx_buffer_area;
1666 		for (i = 0; i < sc->num_rx_desc; i++, rx_buffer++) {
1667 			if (rx_buffer->map != NULL &&
1668 			    rx_buffer->map->dm_nsegs > 0) {
1669 				bus_dmamap_sync(sc->rxtag, rx_buffer->map,
1670 				    0, rx_buffer->map->dm_mapsize,
1671 				    BUS_DMASYNC_POSTREAD);
1672 				bus_dmamap_unload(sc->rxtag,
1673 				    rx_buffer->map);
1674 			}
1675 			if (rx_buffer->m_head != NULL) {
1676 				m_freem(rx_buffer->m_head);
1677 				rx_buffer->m_head = NULL;
1678 			}
1679 			if (rx_buffer->map != NULL) {
1680 				bus_dmamap_destroy(sc->rxtag,
1681 				    rx_buffer->map);
1682 				rx_buffer->map = NULL;
1683 			}
1684 		}
1685 	}
1686 	if (sc->rx_buffer_area != NULL) {
1687 		free(sc->rx_buffer_area, M_DEVBUF, 0);
1688 		sc->rx_buffer_area = NULL;
1689 	}
1690 	if (sc->rxtag != NULL)
1691 		sc->rxtag = NULL;
1692 }
1693 
1694 /*********************************************************************
1695  *
1696  *  This routine executes in interrupt context. It replenishes
1697  *  the mbufs in the descriptor and sends data which has been
1698  *  dma'ed into host memory to upper layer.
1699  *
1700  *  We loop at most count times if count is > 0, or until done if
1701  *  count < 0.
1702  *
1703  *********************************************************************/
1704 void
1705 ixgb_rxeof(struct ixgb_softc *sc, int count)
1706 {
1707 	struct ifnet   *ifp;
1708 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1709 	struct mbuf    *mp;
1710 	int             eop = 0;
1711 	int             len;
1712 	u_int8_t        accept_frame = 0;
1713 	int             i;
1714 	int             next_to_use = 0;
1715 	int             eop_desc;
1716 
1717 	/* Pointer to the receive descriptor being examined. */
1718 	struct ixgb_rx_desc *current_desc;
1719 
1720 	ifp = &sc->interface_data.ac_if;
1721 	i = sc->next_rx_desc_to_check;
1722 	next_to_use = sc->next_rx_desc_to_use;
1723 	eop_desc = sc->next_rx_desc_to_check;
1724 	current_desc = &sc->rx_desc_base[i];
1725 	bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0,
1726 	    sc->rxdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1727 
1728 	if (!((current_desc->status) & IXGB_RX_DESC_STATUS_DD))
1729 		return;
1730 
1731 	while ((current_desc->status & IXGB_RX_DESC_STATUS_DD) &&
1732 		    (count != 0) &&
1733 		    (ifp->if_flags & IFF_RUNNING)) {
1734 
1735 		mp = sc->rx_buffer_area[i].m_head;
1736 		bus_dmamap_sync(sc->rxtag, sc->rx_buffer_area[i].map,
1737 		    0, sc->rx_buffer_area[i].map->dm_mapsize,
1738 		    BUS_DMASYNC_POSTREAD);
1739 		bus_dmamap_unload(sc->rxtag, sc->rx_buffer_area[i].map);
1740 
1741 		accept_frame = 1;
1742 		if (current_desc->status & IXGB_RX_DESC_STATUS_EOP) {
1743 			count--;
1744 			eop = 1;
1745 		} else {
1746 			eop = 0;
1747 		}
1748 		len = letoh16(current_desc->length);
1749 
1750 		if (current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
1751 			    IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
1752 					    IXGB_RX_DESC_ERRORS_RXE))
1753 			accept_frame = 0;
1754 		if (accept_frame) {
1755 
1756 			/* Assign correct length to the current fragment */
1757 			mp->m_len = len;
1758 
1759 			if (sc->fmp == NULL) {
1760 				mp->m_pkthdr.len = len;
1761 				sc->fmp = mp;	/* Store the first mbuf */
1762 				sc->lmp = mp;
1763 			} else {
1764 				/* Chain mbuf's together */
1765 				mp->m_flags &= ~M_PKTHDR;
1766 				sc->lmp->m_next = mp;
1767 				sc->lmp = sc->lmp->m_next;
1768 				sc->fmp->m_pkthdr.len += len;
1769 			}
1770 
1771 			if (eop) {
1772 				eop_desc = i;
1773 				ifp->if_ipackets++;
1774 				ixgb_receive_checksum(sc, current_desc, sc->fmp);
1775 
1776 #if NVLAN > 0
1777 				if (current_desc->status & IXGB_RX_DESC_STATUS_VP) {
1778 					sc->fmp->m_pkthdr.ether_vtag =
1779 					    letoh16(current_desc->special);
1780 					sc->fmp->m_flags |= M_VLANTAG;
1781 				}
1782 #endif
1783 
1784 
1785 				ml_enqueue(&ml, sc->fmp);
1786 				sc->fmp = NULL;
1787 				sc->lmp = NULL;
1788 			}
1789 			sc->rx_buffer_area[i].m_head = NULL;
1790 		} else {
1791 			sc->dropped_pkts++;
1792 			if (sc->fmp != NULL)
1793 				m_freem(sc->fmp);
1794 			sc->fmp = NULL;
1795 			sc->lmp = NULL;
1796 		}
1797 
1798 		/* Zero out the receive descriptors status  */
1799 		current_desc->status = 0;
1800 		bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0,
1801 		    sc->rxdma.dma_map->dm_mapsize,
1802 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1803 
1804 		/* Advance our pointers to the next descriptor */
1805 		if (++i == sc->num_rx_desc) {
1806 			i = 0;
1807 			current_desc = sc->rx_desc_base;
1808 		} else
1809 			current_desc++;
1810 	}
1811 	sc->next_rx_desc_to_check = i;
1812 
1813 	if (--i < 0)
1814 		i = (sc->num_rx_desc - 1);
1815 
1816 	/*
1817 	 * 82597EX: Workaround for redundent write back in receive descriptor ring (causes
1818  	 * memory corruption). Avoid using and re-submitting the most recently received RX
1819 	 * descriptor back to hardware.
1820 	 *
1821 	 * if(Last written back descriptor == EOP bit set descriptor)
1822 	 * 	then avoid re-submitting the most recently received RX descriptor
1823 	 *	back to hardware.
1824 	 * if(Last written back descriptor != EOP bit set descriptor)
1825 	 *	then avoid re-submitting the most recently received RX descriptors
1826 	 * 	till last EOP bit set descriptor.
1827 	 */
1828 	if (eop_desc != i) {
1829 		if (++eop_desc == sc->num_rx_desc)
1830 			eop_desc = 0;
1831 		i = eop_desc;
1832 	}
1833 	/* Replenish the descriptors with new mbufs till last EOP bit set descriptor */
1834 	while (next_to_use != i) {
1835 		current_desc = &sc->rx_desc_base[next_to_use];
1836 		if ((current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
1837 			    IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
1838 					     IXGB_RX_DESC_ERRORS_RXE))) {
1839 			mp = sc->rx_buffer_area[next_to_use].m_head;
1840 			ixgb_get_buf(sc, next_to_use, mp);
1841 		} else {
1842 			if (ixgb_get_buf(sc, next_to_use, NULL) == ENOBUFS)
1843 				break;
1844 		}
1845 		/* Advance our pointers to the next descriptor */
1846 		if (++next_to_use == sc->num_rx_desc)
1847 			next_to_use = 0;
1848 	}
1849 	sc->next_rx_desc_to_use = next_to_use;
1850 	if (--next_to_use < 0)
1851                 next_to_use = (sc->num_rx_desc - 1);
1852         /* Advance the IXGB's Receive Queue #0  "Tail Pointer" */
1853         IXGB_WRITE_REG(&sc->hw, RDT, next_to_use);
1854 
1855 	if_input(ifp, &ml);
1856 }
1857 
1858 /*********************************************************************
1859  *
1860  *  Verify that the hardware indicated that the checksum is valid.
1861  *  Inform the stack about the status of checksum so that stack
1862  *  doesn't spend time verifying the checksum.
1863  *
1864  *********************************************************************/
1865 void
1866 ixgb_receive_checksum(struct ixgb_softc *sc,
1867 		      struct ixgb_rx_desc *rx_desc,
1868 		      struct mbuf *mp)
1869 {
1870 	if (rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) {
1871 		mp->m_pkthdr.csum_flags = 0;
1872 		return;
1873 	}
1874 
1875 	if (rx_desc->status & IXGB_RX_DESC_STATUS_IPCS) {
1876 		/* Did it pass? */
1877 		if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_IPE)) {
1878 			/* IP Checksum Good */
1879 			mp->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
1880 
1881 		} else {
1882 			mp->m_pkthdr.csum_flags = 0;
1883 		}
1884 	}
1885 	if (rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS) {
1886 		/* Did it pass? */
1887 		if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE)) {
1888 			mp->m_pkthdr.csum_flags |=
1889 				M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
1890 		}
1891 	}
1892 }
1893 
1894 /*
1895  * This turns on the hardware offload of the VLAN
1896  * tag insertion and strip
1897  */
1898 void
1899 ixgb_enable_hw_vlans(struct ixgb_softc *sc)
1900 {
1901 	uint32_t ctrl;
1902 
1903 	ctrl = IXGB_READ_REG(&sc->hw, CTRL0);
1904 	ctrl |= IXGB_CTRL0_VME;
1905 	IXGB_WRITE_REG(&sc->hw, CTRL0, ctrl);
1906 }
1907 
1908 void
1909 ixgb_enable_intr(struct ixgb_softc *sc)
1910 {
1911 	uint32_t val;
1912 
1913 	val = IXGB_INT_RXT0 | IXGB_INT_TXDW | IXGB_INT_RXDMT0 |
1914 	      IXGB_INT_LSC | IXGB_INT_RXO;
1915 	if (sc->hw.subsystem_vendor_id == SUN_SUBVENDOR_ID)
1916 		val |= IXGB_INT_GPI0;
1917 	IXGB_WRITE_REG(&sc->hw, IMS, val);
1918 }
1919 
1920 void
1921 ixgb_disable_intr(struct ixgb_softc *sc)
1922 {
1923 	IXGB_WRITE_REG(&sc->hw, IMC, ~0);
1924 }
1925 
1926 void
1927 ixgb_write_pci_cfg(struct ixgb_hw *hw,
1928 		   uint32_t reg,
1929 		   uint16_t *value)
1930 {
1931 	struct pci_attach_args *pa = &((struct ixgb_osdep *)hw->back)->ixgb_pa;
1932 	pci_chipset_tag_t pc = pa->pa_pc;
1933 	/* Should we do read/mask/write...?  16 vs 32 bit!!! */
1934 	pci_conf_write(pc, pa->pa_tag, reg, *value);
1935 }
1936 
1937 /**********************************************************************
1938  *
1939  *  Update the board statistics counters.
1940  *
1941  **********************************************************************/
1942 void
1943 ixgb_update_stats_counters(struct ixgb_softc *sc)
1944 {
1945 	struct ifnet   *ifp;
1946 
1947 	sc->stats.crcerrs += IXGB_READ_REG(&sc->hw, CRCERRS);
1948 	sc->stats.gprcl += IXGB_READ_REG(&sc->hw, GPRCL);
1949 	sc->stats.gprch += IXGB_READ_REG(&sc->hw, GPRCH);
1950 	sc->stats.gorcl += IXGB_READ_REG(&sc->hw, GORCL);
1951 	sc->stats.gorch += IXGB_READ_REG(&sc->hw, GORCH);
1952 	sc->stats.bprcl += IXGB_READ_REG(&sc->hw, BPRCL);
1953 	sc->stats.bprch += IXGB_READ_REG(&sc->hw, BPRCH);
1954 	sc->stats.mprcl += IXGB_READ_REG(&sc->hw, MPRCL);
1955 	sc->stats.mprch += IXGB_READ_REG(&sc->hw, MPRCH);
1956 	sc->stats.roc += IXGB_READ_REG(&sc->hw, ROC);
1957 
1958 	sc->stats.mpc += IXGB_READ_REG(&sc->hw, MPC);
1959 	sc->stats.dc += IXGB_READ_REG(&sc->hw, DC);
1960 	sc->stats.rlec += IXGB_READ_REG(&sc->hw, RLEC);
1961 	sc->stats.xonrxc += IXGB_READ_REG(&sc->hw, XONRXC);
1962 	sc->stats.xontxc += IXGB_READ_REG(&sc->hw, XONTXC);
1963 	sc->stats.xoffrxc += IXGB_READ_REG(&sc->hw, XOFFRXC);
1964 	sc->stats.xofftxc += IXGB_READ_REG(&sc->hw, XOFFTXC);
1965 	sc->stats.gptcl += IXGB_READ_REG(&sc->hw, GPTCL);
1966 	sc->stats.gptch += IXGB_READ_REG(&sc->hw, GPTCH);
1967 	sc->stats.gotcl += IXGB_READ_REG(&sc->hw, GOTCL);
1968 	sc->stats.gotch += IXGB_READ_REG(&sc->hw, GOTCH);
1969 	sc->stats.ruc += IXGB_READ_REG(&sc->hw, RUC);
1970 	sc->stats.rfc += IXGB_READ_REG(&sc->hw, RFC);
1971 	sc->stats.rjc += IXGB_READ_REG(&sc->hw, RJC);
1972 	sc->stats.torl += IXGB_READ_REG(&sc->hw, TORL);
1973 	sc->stats.torh += IXGB_READ_REG(&sc->hw, TORH);
1974 	sc->stats.totl += IXGB_READ_REG(&sc->hw, TOTL);
1975 	sc->stats.toth += IXGB_READ_REG(&sc->hw, TOTH);
1976 	sc->stats.tprl += IXGB_READ_REG(&sc->hw, TPRL);
1977 	sc->stats.tprh += IXGB_READ_REG(&sc->hw, TPRH);
1978 	sc->stats.tptl += IXGB_READ_REG(&sc->hw, TPTL);
1979 	sc->stats.tpth += IXGB_READ_REG(&sc->hw, TPTH);
1980 	sc->stats.plt64c += IXGB_READ_REG(&sc->hw, PLT64C);
1981 	sc->stats.mptcl += IXGB_READ_REG(&sc->hw, MPTCL);
1982 	sc->stats.mptch += IXGB_READ_REG(&sc->hw, MPTCH);
1983 	sc->stats.bptcl += IXGB_READ_REG(&sc->hw, BPTCL);
1984 	sc->stats.bptch += IXGB_READ_REG(&sc->hw, BPTCH);
1985 
1986 	sc->stats.uprcl += IXGB_READ_REG(&sc->hw, UPRCL);
1987 	sc->stats.uprch += IXGB_READ_REG(&sc->hw, UPRCH);
1988 	sc->stats.vprcl += IXGB_READ_REG(&sc->hw, VPRCL);
1989 	sc->stats.vprch += IXGB_READ_REG(&sc->hw, VPRCH);
1990 	sc->stats.jprcl += IXGB_READ_REG(&sc->hw, JPRCL);
1991 	sc->stats.jprch += IXGB_READ_REG(&sc->hw, JPRCH);
1992 	sc->stats.rnbc += IXGB_READ_REG(&sc->hw, RNBC);
1993 	sc->stats.icbc += IXGB_READ_REG(&sc->hw, ICBC);
1994 	sc->stats.ecbc += IXGB_READ_REG(&sc->hw, ECBC);
1995 	sc->stats.uptcl += IXGB_READ_REG(&sc->hw, UPTCL);
1996 	sc->stats.uptch += IXGB_READ_REG(&sc->hw, UPTCH);
1997 	sc->stats.vptcl += IXGB_READ_REG(&sc->hw, VPTCL);
1998 	sc->stats.vptch += IXGB_READ_REG(&sc->hw, VPTCH);
1999 	sc->stats.jptcl += IXGB_READ_REG(&sc->hw, JPTCL);
2000 	sc->stats.jptch += IXGB_READ_REG(&sc->hw, JPTCH);
2001 	sc->stats.tsctc += IXGB_READ_REG(&sc->hw, TSCTC);
2002 	sc->stats.tsctfc += IXGB_READ_REG(&sc->hw, TSCTFC);
2003 	sc->stats.ibic += IXGB_READ_REG(&sc->hw, IBIC);
2004 	sc->stats.lfc += IXGB_READ_REG(&sc->hw, LFC);
2005 	sc->stats.pfrc += IXGB_READ_REG(&sc->hw, PFRC);
2006 	sc->stats.pftc += IXGB_READ_REG(&sc->hw, PFTC);
2007 	sc->stats.mcfrc += IXGB_READ_REG(&sc->hw, MCFRC);
2008 
2009 	ifp = &sc->interface_data.ac_if;
2010 
2011 	/* Fill out the OS statistics structure */
2012 	ifp->if_collisions = 0;
2013 
2014 	/* Rx Errors */
2015 	ifp->if_ierrors =
2016 		sc->dropped_pkts +
2017 		sc->stats.crcerrs +
2018 		sc->stats.rnbc +
2019 		sc->stats.mpc +
2020 		sc->stats.rlec;
2021 
2022 	/* Tx Errors */
2023 	ifp->if_oerrors =
2024 		sc->watchdog_events;
2025 }
2026 
2027 #ifdef IXGB_DEBUG
2028 /**********************************************************************
2029  *
2030  *  This routine is called only when ixgb_display_debug_stats is enabled.
2031  *  This routine provides a way to take a look at important statistics
2032  *  maintained by the driver and hardware.
2033  *
2034  **********************************************************************/
2035 void
2036 ixgb_print_hw_stats(struct ixgb_softc *sc)
2037 {
2038 	char            buf_speed[100], buf_type[100];
2039 	ixgb_bus_speed  bus_speed;
2040 	ixgb_bus_type   bus_type;
2041 	const char * const unit = sc->sc_dv.dv_xname;
2042 
2043 	bus_speed = sc->hw.bus.speed;
2044 	bus_type = sc->hw.bus.type;
2045 	snprintf(buf_speed, sizeof(buf_speed),
2046 		bus_speed == ixgb_bus_speed_33 ? "33MHz" :
2047 		bus_speed == ixgb_bus_speed_66 ? "66MHz" :
2048 		bus_speed == ixgb_bus_speed_100 ? "100MHz" :
2049 		bus_speed == ixgb_bus_speed_133 ? "133MHz" :
2050 		"UNKNOWN");
2051 	printf("%s: PCI_Bus_Speed = %s\n", unit,
2052 		buf_speed);
2053 
2054 	snprintf(buf_type, sizeof(buf_type),
2055 		bus_type == ixgb_bus_type_pci ? "PCI" :
2056 		bus_type == ixgb_bus_type_pcix ? "PCI-X" :
2057 		"UNKNOWN");
2058 	printf("%s: PCI_Bus_Type = %s\n", unit,
2059 		buf_type);
2060 
2061 	printf("%s: Tx Descriptors not Avail1 = %ld\n", unit,
2062 		sc->no_tx_desc_avail1);
2063 	printf("%s: Tx Descriptors not Avail2 = %ld\n", unit,
2064 		sc->no_tx_desc_avail2);
2065 	printf("%s: Std Mbuf Failed = %ld\n", unit,
2066 		sc->mbuf_alloc_failed);
2067 	printf("%s: Std Cluster Failed = %ld\n", unit,
2068 		sc->mbuf_cluster_failed);
2069 
2070 	printf("%s: Defer count = %lld\n", unit,
2071 		(long long)sc->stats.dc);
2072 	printf("%s: Missed Packets = %lld\n", unit,
2073 		(long long)sc->stats.mpc);
2074 	printf("%s: Receive No Buffers = %lld\n", unit,
2075 		(long long)sc->stats.rnbc);
2076 	printf("%s: Receive length errors = %lld\n", unit,
2077 		(long long)sc->stats.rlec);
2078 	printf("%s: Crc errors = %lld\n", unit,
2079 		(long long)sc->stats.crcerrs);
2080 	printf("%s: Driver dropped packets = %ld\n", unit,
2081 		sc->dropped_pkts);
2082 
2083 	printf("%s: XON Rcvd = %lld\n", unit,
2084 		(long long)sc->stats.xonrxc);
2085 	printf("%s: XON Xmtd = %lld\n", unit,
2086 		(long long)sc->stats.xontxc);
2087 	printf("%s: XOFF Rcvd = %lld\n", unit,
2088 		(long long)sc->stats.xoffrxc);
2089 	printf("%s: XOFF Xmtd = %lld\n", unit,
2090 		(long long)sc->stats.xofftxc);
2091 
2092 	printf("%s: Good Packets Rcvd = %lld\n", unit,
2093 		(long long)sc->stats.gprcl);
2094 	printf("%s: Good Packets Xmtd = %lld\n", unit,
2095 		(long long)sc->stats.gptcl);
2096 
2097 	printf("%s: Jumbo frames recvd = %lld\n", unit,
2098 		(long long)sc->stats.jprcl);
2099 	printf("%s: Jumbo frames Xmtd = %lld\n", unit,
2100 		(long long)sc->stats.jptcl);
2101 }
2102 #endif
2103