xref: /openbsd/sys/dev/pci/if_ixgb.c (revision cf96265b)
1 /**************************************************************************
2 
3 Copyright (c) 2001-2005, Intel Corporation
4 All rights reserved.
5 
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8 
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11 
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15 
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19 
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31 
32 ***************************************************************************/
33 
34 /* $OpenBSD: if_ixgb.c,v 1.76 2023/11/10 15:51:20 bluhm Exp $ */
35 
36 #include <dev/pci/if_ixgb.h>
37 
38 #ifdef IXGB_DEBUG
39 /*********************************************************************
40  *  Set this to one to display debug statistics
41  *********************************************************************/
42 int             ixgb_display_debug_stats = 0;
43 #endif
44 
45 /*********************************************************************
46  *  Driver version
47  *********************************************************************/
48 
49 #define IXGB_DRIVER_VERSION	"6.1.0"
50 
51 /*********************************************************************
52  *  PCI Device ID Table
53  *********************************************************************/
54 
55 const struct pci_matchid ixgb_devices[] = {
56 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82597EX },
57 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82597EX_SR },
58 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82597EX_LR },
59 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82597EX_CX4 },
60 };
61 
62 /*********************************************************************
63  *  Function prototypes
64  *********************************************************************/
65 int  ixgb_probe(struct device *, void *, void *);
66 void ixgb_attach(struct device *, struct device *, void *);
67 int  ixgb_intr(void *);
68 void ixgb_start(struct ifnet *);
69 int  ixgb_ioctl(struct ifnet *, u_long, caddr_t);
70 void ixgb_watchdog(struct ifnet *);
71 void ixgb_init(void *);
72 void ixgb_stop(void *);
73 void ixgb_media_status(struct ifnet *, struct ifmediareq *);
74 int  ixgb_media_change(struct ifnet *);
75 void ixgb_identify_hardware(struct ixgb_softc *);
76 int  ixgb_allocate_pci_resources(struct ixgb_softc *);
77 void ixgb_free_pci_resources(struct ixgb_softc *);
78 void ixgb_local_timer(void *);
79 int  ixgb_hardware_init(struct ixgb_softc *);
80 void ixgb_setup_interface(struct ixgb_softc *);
81 int  ixgb_setup_transmit_structures(struct ixgb_softc *);
82 void ixgb_initialize_transmit_unit(struct ixgb_softc *);
83 int  ixgb_setup_receive_structures(struct ixgb_softc *);
84 void ixgb_initialize_receive_unit(struct ixgb_softc *);
85 void ixgb_enable_intr(struct ixgb_softc *);
86 void ixgb_disable_intr(struct ixgb_softc *);
87 void ixgb_free_transmit_structures(struct ixgb_softc *);
88 void ixgb_free_receive_structures(struct ixgb_softc *);
89 void ixgb_update_stats_counters(struct ixgb_softc *);
90 void ixgb_txeof(struct ixgb_softc *);
91 int  ixgb_allocate_receive_structures(struct ixgb_softc *);
92 int  ixgb_allocate_transmit_structures(struct ixgb_softc *);
93 void ixgb_rxeof(struct ixgb_softc *, int);
94 void
95 ixgb_receive_checksum(struct ixgb_softc *,
96 		      struct ixgb_rx_desc * rx_desc,
97 		      struct mbuf *);
98 void
99 ixgb_transmit_checksum_setup(struct ixgb_softc *,
100 			     struct mbuf *,
101 			     u_int8_t *);
102 void ixgb_set_promisc(struct ixgb_softc *);
103 void ixgb_set_multi(struct ixgb_softc *);
104 #ifdef IXGB_DEBUG
105 void ixgb_print_hw_stats(struct ixgb_softc *);
106 #endif
107 void ixgb_update_link_status(struct ixgb_softc *);
108 int
109 ixgb_get_buf(struct ixgb_softc *, int i,
110 	     struct mbuf *);
111 void ixgb_enable_hw_vlans(struct ixgb_softc *);
112 int  ixgb_encap(struct ixgb_softc *, struct mbuf *);
113 int
114 ixgb_dma_malloc(struct ixgb_softc *, bus_size_t,
115 		struct ixgb_dma_alloc *, int);
116 void ixgb_dma_free(struct ixgb_softc *, struct ixgb_dma_alloc *);
117 
118 /*********************************************************************
119  *  OpenBSD Device Interface Entry Points
120  *********************************************************************/
121 
122 const struct cfattach ixgb_ca = {
123 	sizeof(struct ixgb_softc), ixgb_probe, ixgb_attach
124 };
125 
126 struct cfdriver ixgb_cd = {
127 	NULL, "ixgb", DV_IFNET
128 };
129 
130 /* some defines for controlling descriptor fetches in h/w */
131 #define RXDCTL_PTHRESH_DEFAULT 0	/* chip considers prefech below this */
132 #define RXDCTL_HTHRESH_DEFAULT 0	/* chip will only prefetch if tail is
133 					 * pushed this many descriptors from
134 					 * head */
135 #define RXDCTL_WTHRESH_DEFAULT 0	/* chip writes back at this many or RXT0 */
136 
137 
138 /*********************************************************************
139  *  Device identification routine
140  *
141  *  ixgb_probe determines if the driver should be loaded on
142  *  adapter based on PCI vendor/device id of the adapter.
143  *
144  *  return 0 on no match, positive on match
145  *********************************************************************/
146 
147 int
ixgb_probe(struct device * parent,void * match,void * aux)148 ixgb_probe(struct device *parent, void *match, void *aux)
149 {
150 	INIT_DEBUGOUT("ixgb_probe: begin");
151 
152 	return (pci_matchbyid((struct pci_attach_args *)aux, ixgb_devices,
153 	    nitems(ixgb_devices)));
154 }
155 
156 /*********************************************************************
157  *  Device initialization routine
158  *
159  *  The attach entry point is called when the driver is being loaded.
160  *  This routine identifies the type of hardware, allocates all resources
161  *  and initializes the hardware.
162  *
163  *********************************************************************/
164 
165 void
ixgb_attach(struct device * parent,struct device * self,void * aux)166 ixgb_attach(struct device *parent, struct device *self, void *aux)
167 {
168 	struct pci_attach_args *pa = aux;
169 	struct ixgb_softc *sc;
170 	int             tsize, rsize;
171 
172 	INIT_DEBUGOUT("ixgb_attach: begin");
173 
174 	sc = (struct ixgb_softc *)self;
175 	sc->osdep.ixgb_pa = *pa;
176 
177 	timeout_set(&sc->timer_handle, ixgb_local_timer, sc);
178 
179 	/* Determine hardware revision */
180 	ixgb_identify_hardware(sc);
181 
182 	/* Parameters (to be read from user) */
183 	sc->num_tx_desc = IXGB_MAX_TXD;
184 	sc->num_rx_desc = IXGB_MAX_RXD;
185 	sc->tx_int_delay = TIDV;
186 	sc->rx_int_delay = RDTR;
187 	sc->rx_buffer_len = IXGB_RXBUFFER_2048;
188 
189 	/*
190 	 * These parameters control the automatic generation(Tx) and
191 	 * response(Rx) to Ethernet PAUSE frames.
192 	 */
193 	sc->hw.fc.high_water = FCRTH;
194 	sc->hw.fc.low_water = FCRTL;
195 	sc->hw.fc.pause_time = FCPAUSE;
196 	sc->hw.fc.send_xon = TRUE;
197 	sc->hw.fc.type = FLOW_CONTROL;
198 
199 	/* Set the max frame size assuming standard ethernet sized frames */
200 	sc->hw.max_frame_size = IXGB_MAX_JUMBO_FRAME_SIZE;
201 
202 	if (ixgb_allocate_pci_resources(sc))
203 		goto err_pci;
204 
205 	tsize = IXGB_ROUNDUP(sc->num_tx_desc * sizeof(struct ixgb_tx_desc),
206 	    IXGB_MAX_TXD * sizeof(struct ixgb_tx_desc));
207 	tsize = IXGB_ROUNDUP(tsize, PAGE_SIZE);
208 
209 	/* Allocate Transmit Descriptor ring */
210 	if (ixgb_dma_malloc(sc, tsize, &sc->txdma, BUS_DMA_NOWAIT)) {
211 		printf("%s: Unable to allocate TxDescriptor memory\n",
212 		       sc->sc_dv.dv_xname);
213 		goto err_tx_desc;
214 	}
215 	sc->tx_desc_base = (struct ixgb_tx_desc *) sc->txdma.dma_vaddr;
216 
217 	rsize = IXGB_ROUNDUP(sc->num_rx_desc * sizeof(struct ixgb_rx_desc),
218 	    IXGB_MAX_RXD * sizeof(struct ixgb_rx_desc));
219 	rsize = IXGB_ROUNDUP(rsize, PAGE_SIZE);
220 
221 	/* Allocate Receive Descriptor ring */
222 	if (ixgb_dma_malloc(sc, rsize, &sc->rxdma, BUS_DMA_NOWAIT)) {
223 		printf("%s: Unable to allocate rx_desc memory\n",
224 		       sc->sc_dv.dv_xname);
225 		goto err_rx_desc;
226 	}
227 	sc->rx_desc_base = (struct ixgb_rx_desc *) sc->rxdma.dma_vaddr;
228 
229 	/* Initialize the hardware */
230 	if (ixgb_hardware_init(sc)) {
231 		printf("%s: Unable to initialize the hardware\n",
232 		       sc->sc_dv.dv_xname);
233 		goto err_hw_init;
234 	}
235 
236 	/* Setup OS specific network interface */
237 	ixgb_setup_interface(sc);
238 
239 	/* Initialize statistics */
240 	ixgb_clear_hw_cntrs(&sc->hw);
241 	ixgb_update_stats_counters(sc);
242 	ixgb_update_link_status(sc);
243 
244 	printf(", address %s\n", ether_sprintf(sc->interface_data.ac_enaddr));
245 
246 	INIT_DEBUGOUT("ixgb_attach: end");
247 	return;
248 
249 err_hw_init:
250 	ixgb_dma_free(sc, &sc->rxdma);
251 err_rx_desc:
252 	ixgb_dma_free(sc, &sc->txdma);
253 err_tx_desc:
254 err_pci:
255 	ixgb_free_pci_resources(sc);
256 }
257 
258 /*********************************************************************
259  *  Transmit entry point
260  *
261  *  ixgb_start is called by the stack to initiate a transmit.
262  *  The driver will remain in this routine as long as there are
263  *  packets to transmit and transmit resources are available.
264  *  In case resources are not available stack is notified and
265  *  the packet is requeued.
266  **********************************************************************/
267 
268 void
ixgb_start(struct ifnet * ifp)269 ixgb_start(struct ifnet *ifp)
270 {
271 	struct mbuf    *m_head;
272 	struct ixgb_softc *sc = ifp->if_softc;
273 	int		post = 0;
274 
275 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
276 		return;
277 
278 	if (!sc->link_active)
279 		return;
280 
281 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
282 	    sc->txdma.dma_map->dm_mapsize,
283 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
284 
285 	for (;;) {
286 		m_head = ifq_deq_begin(&ifp->if_snd);
287 		if (m_head == NULL)
288 			break;
289 
290 		if (ixgb_encap(sc, m_head)) {
291 			ifq_deq_rollback(&ifp->if_snd, m_head);
292 			ifq_set_oactive(&ifp->if_snd);
293 			break;
294 		}
295 
296 		ifq_deq_commit(&ifp->if_snd, m_head);
297 
298 #if NBPFILTER > 0
299 		/* Send a copy of the frame to the BPF listener */
300 		if (ifp->if_bpf)
301 			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
302 #endif
303 
304 		/* Set timeout in case hardware has problems transmitting */
305 		ifp->if_timer = IXGB_TX_TIMEOUT;
306 
307 		post = 1;
308 	}
309 
310 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
311 	    sc->txdma.dma_map->dm_mapsize,
312 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
313 	/*
314 	 * Advance the Transmit Descriptor Tail (Tdt),
315 	 * this tells the E1000 that this frame
316 	 * is available to transmit.
317 	 */
318 	if (post)
319 		IXGB_WRITE_REG(&sc->hw, TDT, sc->next_avail_tx_desc);
320 }
321 
322 /*********************************************************************
323  *  Ioctl entry point
324  *
325  *  ixgb_ioctl is called when the user wants to configure the
326  *  interface.
327  *
328  *  return 0 on success, positive on failure
329  **********************************************************************/
330 
331 int
ixgb_ioctl(struct ifnet * ifp,u_long command,caddr_t data)332 ixgb_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
333 {
334 	struct ixgb_softc *sc = ifp->if_softc;
335 	struct ifreq	*ifr = (struct ifreq *) data;
336 	int		s, error = 0;
337 
338 	s = splnet();
339 
340 	switch (command) {
341 	case SIOCSIFADDR:
342 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFADDR (Set Interface "
343 			       "Addr)");
344 		ifp->if_flags |= IFF_UP;
345 		if (!(ifp->if_flags & IFF_RUNNING))
346 			ixgb_init(sc);
347 		break;
348 
349 	case SIOCSIFFLAGS:
350 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
351 		if (ifp->if_flags & IFF_UP) {
352 			/*
353 			 * If only the PROMISC or ALLMULTI flag changes, then
354 			 * don't do a full re-init of the chip, just update
355 			 * the Rx filter.
356 			 */
357 			if ((ifp->if_flags & IFF_RUNNING) &&
358 			    ((ifp->if_flags ^ sc->if_flags) &
359 			     (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
360 				ixgb_set_promisc(sc);
361 			} else {
362 				if (!(ifp->if_flags & IFF_RUNNING))
363 					ixgb_init(sc);
364 			}
365 		} else {
366 			if (ifp->if_flags & IFF_RUNNING)
367 				ixgb_stop(sc);
368 		}
369 		sc->if_flags = ifp->if_flags;
370 		break;
371 
372 	case SIOCSIFMEDIA:
373 	case SIOCGIFMEDIA:
374 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
375 		error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
376 		break;
377 
378 	default:
379 		error = ether_ioctl(ifp, &sc->interface_data, command, data);
380 	}
381 
382 	if (error == ENETRESET) {
383 		if (ifp->if_flags & IFF_RUNNING) {
384 			ixgb_disable_intr(sc);
385 			ixgb_set_multi(sc);
386 			ixgb_enable_intr(sc);
387 		}
388 		error = 0;
389 	}
390 
391 	splx(s);
392 	return (error);
393 }
394 
395 /*********************************************************************
396  *  Watchdog entry point
397  *
398  *  This routine is called whenever hardware quits transmitting.
399  *
400  **********************************************************************/
401 
402 void
ixgb_watchdog(struct ifnet * ifp)403 ixgb_watchdog(struct ifnet * ifp)
404 {
405 	struct ixgb_softc *sc = ifp->if_softc;
406 
407 	/*
408 	 * If we are in this routine because of pause frames, then don't
409 	 * reset the hardware.
410 	 */
411 	if (IXGB_READ_REG(&sc->hw, STATUS) & IXGB_STATUS_TXOFF) {
412 		ifp->if_timer = IXGB_TX_TIMEOUT;
413 		return;
414 	}
415 
416 	printf("%s: watchdog timeout -- resetting\n", sc->sc_dv.dv_xname);
417 
418 	ixgb_init(sc);
419 
420 	sc->watchdog_events++;
421 }
422 
423 /*********************************************************************
424  *  Init entry point
425  *
426  *  This routine is used in two ways. It is used by the stack as
427  *  init entry point in network interface structure. It is also used
428  *  by the driver as a hw/sw initialization routine to get to a
429  *  consistent state.
430  *
431  **********************************************************************/
432 
433 void
ixgb_init(void * arg)434 ixgb_init(void *arg)
435 {
436 	struct ixgb_softc *sc = arg;
437 	struct ifnet   *ifp = &sc->interface_data.ac_if;
438 	uint32_t temp_reg;
439 	int s;
440 
441 	INIT_DEBUGOUT("ixgb_init: begin");
442 
443 	s = splnet();
444 
445 	ixgb_stop(sc);
446 
447 	/* Get the latest mac address, User can use a LAA */
448 	bcopy(sc->interface_data.ac_enaddr, sc->hw.curr_mac_addr,
449 	      IXGB_ETH_LENGTH_OF_ADDRESS);
450 
451 	/* Initialize the hardware */
452 	if (ixgb_hardware_init(sc)) {
453 		printf("%s: Unable to initialize the hardware\n",
454 		       sc->sc_dv.dv_xname);
455 		splx(s);
456 		return;
457 	}
458 
459 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
460 		ixgb_enable_hw_vlans(sc);
461 
462 	/* Prepare transmit descriptors and buffers */
463 	if (ixgb_setup_transmit_structures(sc)) {
464 		printf("%s: Could not setup transmit structures\n",
465 		       sc->sc_dv.dv_xname);
466 		ixgb_stop(sc);
467 		splx(s);
468 		return;
469 	}
470 	ixgb_initialize_transmit_unit(sc);
471 
472 	/* Setup Multicast table */
473 	ixgb_set_multi(sc);
474 
475 	/* Prepare receive descriptors and buffers */
476 	if (ixgb_setup_receive_structures(sc)) {
477 		printf("%s: Could not setup receive structures\n",
478 		       sc->sc_dv.dv_xname);
479 		ixgb_stop(sc);
480 		splx(s);
481 		return;
482 	}
483 	ixgb_initialize_receive_unit(sc);
484 
485 	/* Don't lose promiscuous settings */
486 	ixgb_set_promisc(sc);
487 
488 	ifp->if_flags |= IFF_RUNNING;
489 	ifq_clr_oactive(&ifp->if_snd);
490 
491 	/* Enable jumbo frames */
492 	IXGB_WRITE_REG(&sc->hw, MFRMS,
493 	    sc->hw.max_frame_size << IXGB_MFRMS_SHIFT);
494 	temp_reg = IXGB_READ_REG(&sc->hw, CTRL0);
495 	temp_reg |= IXGB_CTRL0_JFE;
496 	IXGB_WRITE_REG(&sc->hw, CTRL0, temp_reg);
497 
498 	timeout_add_sec(&sc->timer_handle, 1);
499 	ixgb_clear_hw_cntrs(&sc->hw);
500 	ixgb_enable_intr(sc);
501 
502 	splx(s);
503 }
504 
505 /*********************************************************************
506  *
507  *  Interrupt Service routine
508  *
509  **********************************************************************/
510 
511 int
ixgb_intr(void * arg)512 ixgb_intr(void *arg)
513 {
514 	struct ixgb_softc *sc = arg;
515 	struct ifnet	*ifp;
516 	u_int32_t	reg_icr;
517 	boolean_t	rxdmt0 = FALSE;
518 	int claimed = 0;
519 
520 	ifp = &sc->interface_data.ac_if;
521 
522 	for (;;) {
523 		reg_icr = IXGB_READ_REG(&sc->hw, ICR);
524 		if (reg_icr == 0)
525 			break;
526 
527 		claimed = 1;
528 
529 		if (reg_icr & IXGB_INT_RXDMT0)
530 			rxdmt0 = TRUE;
531 
532 		if (ifp->if_flags & IFF_RUNNING) {
533 			ixgb_rxeof(sc, -1);
534 			ixgb_txeof(sc);
535 		}
536 
537 		/* Link status change */
538 		if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
539 			timeout_del(&sc->timer_handle);
540 			ixgb_check_for_link(&sc->hw);
541 			ixgb_update_link_status(sc);
542 			timeout_add_sec(&sc->timer_handle, 1);
543 		}
544 
545 		if (rxdmt0 && sc->raidc) {
546 			IXGB_WRITE_REG(&sc->hw, IMC, IXGB_INT_RXDMT0);
547 			IXGB_WRITE_REG(&sc->hw, IMS, IXGB_INT_RXDMT0);
548 		}
549 	}
550 
551 	if (ifp->if_flags & IFF_RUNNING && !ifq_empty(&ifp->if_snd))
552 		ixgb_start(ifp);
553 
554 	return (claimed);
555 }
556 
557 
558 /*********************************************************************
559  *
560  *  Media Ioctl callback
561  *
562  *  This routine is called whenever the user queries the status of
563  *  the interface using ifconfig.
564  *
565  **********************************************************************/
566 void
ixgb_media_status(struct ifnet * ifp,struct ifmediareq * ifmr)567 ixgb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
568 {
569 	struct ixgb_softc *sc = ifp->if_softc;
570 
571 	INIT_DEBUGOUT("ixgb_media_status: begin");
572 
573 	ixgb_check_for_link(&sc->hw);
574 	ixgb_update_link_status(sc);
575 
576 	ifmr->ifm_status = IFM_AVALID;
577 	ifmr->ifm_active = IFM_ETHER;
578 
579 	if (!sc->hw.link_up) {
580 		ifmr->ifm_active |= IFM_NONE;
581 		return;
582 	}
583 
584 	ifmr->ifm_status |= IFM_ACTIVE;
585 	if ((sc->hw.phy_type == ixgb_phy_type_g6104) ||
586 	    (sc->hw.phy_type == ixgb_phy_type_txn17401))
587 		ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
588 	else
589 		ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
590 
591 	return;
592 }
593 
594 /*********************************************************************
595  *
596  *  Media Ioctl callback
597  *
598  *  This routine is called when the user changes speed/duplex using
599  *  media/mediopt option with ifconfig.
600  *
601  **********************************************************************/
602 int
ixgb_media_change(struct ifnet * ifp)603 ixgb_media_change(struct ifnet * ifp)
604 {
605 	struct ixgb_softc *sc = ifp->if_softc;
606 	struct ifmedia *ifm = &sc->media;
607 
608 	INIT_DEBUGOUT("ixgb_media_change: begin");
609 
610 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
611 		return (EINVAL);
612 
613 	return (0);
614 }
615 
616 /*********************************************************************
617  *
618  *  This routine maps the mbufs to tx descriptors.
619  *
620  *  return 0 on success, positive on failure
621  **********************************************************************/
622 
623 int
ixgb_encap(struct ixgb_softc * sc,struct mbuf * m_head)624 ixgb_encap(struct ixgb_softc *sc, struct mbuf *m_head)
625 {
626 	u_int8_t        txd_popts;
627 	int             i, j, error = 0;
628 	bus_dmamap_t	map;
629 
630 	struct ixgb_buffer *tx_buffer;
631 	struct ixgb_tx_desc *current_tx_desc = NULL;
632 
633 	/*
634 	 * Force a cleanup if number of TX descriptors available hits the
635 	 * threshold
636 	 */
637 	if (sc->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
638 		ixgb_txeof(sc);
639 		/* Now do we at least have a minimal? */
640 		if (sc->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
641 			sc->no_tx_desc_avail1++;
642 			return (ENOBUFS);
643 		}
644 	}
645 
646 	/*
647 	 * Map the packet for DMA.
648 	 */
649 	tx_buffer = &sc->tx_buffer_area[sc->next_avail_tx_desc];
650 	map = tx_buffer->map;
651 
652 	error = bus_dmamap_load_mbuf(sc->txtag, map,
653 				     m_head, BUS_DMA_NOWAIT);
654 	if (error != 0) {
655 		sc->no_tx_dma_setup++;
656 		return (error);
657 	}
658 	IXGB_KASSERT(map->dm_nsegs != 0, ("ixgb_encap: empty packet"));
659 
660 	if (map->dm_nsegs > sc->num_tx_desc_avail)
661 		goto fail;
662 
663 #ifdef IXGB_CSUM_OFFLOAD
664 	ixgb_transmit_checksum_setup(sc, m_head, &txd_popts);
665 #else
666 	txd_popts = 0;
667 #endif
668 
669 	i = sc->next_avail_tx_desc;
670 	for (j = 0; j < map->dm_nsegs; j++) {
671 		tx_buffer = &sc->tx_buffer_area[i];
672 		current_tx_desc = &sc->tx_desc_base[i];
673 
674 		current_tx_desc->buff_addr = htole64(map->dm_segs[j].ds_addr);
675 		current_tx_desc->cmd_type_len = htole32((sc->txd_cmd | map->dm_segs[j].ds_len));
676 		current_tx_desc->popts = txd_popts;
677 		if (++i == sc->num_tx_desc)
678 			i = 0;
679 
680 		tx_buffer->m_head = NULL;
681 	}
682 
683 	sc->num_tx_desc_avail -= map->dm_nsegs;
684 	sc->next_avail_tx_desc = i;
685 
686 	/* Find out if we are in VLAN mode */
687 	if (m_head->m_flags & M_VLANTAG) {
688 		/* Set the VLAN id */
689 		current_tx_desc->vlan = htole16(m_head->m_pkthdr.ether_vtag);
690 
691 		/* Tell hardware to add tag */
692 		current_tx_desc->cmd_type_len |= htole32(IXGB_TX_DESC_CMD_VLE);
693 	}
694 
695 	tx_buffer->m_head = m_head;
696 	bus_dmamap_sync(sc->txtag, map, 0, map->dm_mapsize,
697 	    BUS_DMASYNC_PREWRITE);
698 
699 	/*
700 	 * Last Descriptor of Packet needs End Of Packet (EOP)
701 	 */
702 	current_tx_desc->cmd_type_len |= htole32(IXGB_TX_DESC_CMD_EOP);
703 
704 	return (0);
705 
706 fail:
707 	sc->no_tx_desc_avail2++;
708 	bus_dmamap_unload(sc->txtag, map);
709 	return (ENOBUFS);
710 }
711 
712 void
ixgb_set_promisc(struct ixgb_softc * sc)713 ixgb_set_promisc(struct ixgb_softc *sc)
714 {
715 
716 	u_int32_t       reg_rctl;
717 	struct ifnet   *ifp = &sc->interface_data.ac_if;
718 
719 	reg_rctl = IXGB_READ_REG(&sc->hw, RCTL);
720 
721 	if (ifp->if_flags & IFF_PROMISC) {
722 		reg_rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
723 	} else if (ifp->if_flags & IFF_ALLMULTI) {
724 		reg_rctl |= IXGB_RCTL_MPE;
725 		reg_rctl &= ~IXGB_RCTL_UPE;
726 	} else {
727 		reg_rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
728 	}
729 	IXGB_WRITE_REG(&sc->hw, RCTL, reg_rctl);
730 }
731 
732 /*********************************************************************
733  *  Multicast Update
734  *
735  *  This routine is called whenever multicast address list is updated.
736  *
737  **********************************************************************/
738 
739 void
ixgb_set_multi(struct ixgb_softc * sc)740 ixgb_set_multi(struct ixgb_softc *sc)
741 {
742 	u_int32_t       reg_rctl = 0;
743 	u_int8_t        mta[MAX_NUM_MULTICAST_ADDRESSES * IXGB_ETH_LENGTH_OF_ADDRESS];
744 	int             mcnt = 0;
745 	struct ifnet   *ifp = &sc->interface_data.ac_if;
746 	struct arpcom *ac = &sc->interface_data;
747 	struct ether_multi *enm;
748 	struct ether_multistep step;
749 
750 	IOCTL_DEBUGOUT("ixgb_set_multi: begin");
751 
752 	if (ac->ac_multirangecnt > 0) {
753 		ifp->if_flags |= IFF_ALLMULTI;
754 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
755 		goto setit;
756 	}
757 
758 	ETHER_FIRST_MULTI(step, ac, enm);
759 	while (enm != NULL) {
760 		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
761 			break;
762 		bcopy(enm->enm_addrlo, &mta[mcnt*IXGB_ETH_LENGTH_OF_ADDRESS],
763 		      IXGB_ETH_LENGTH_OF_ADDRESS);
764 		mcnt++;
765 		ETHER_NEXT_MULTI(step, enm);
766 	}
767 
768 setit:
769 	if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
770 		reg_rctl = IXGB_READ_REG(&sc->hw, RCTL);
771 		reg_rctl |= IXGB_RCTL_MPE;
772 		IXGB_WRITE_REG(&sc->hw, RCTL, reg_rctl);
773 	} else
774 		ixgb_mc_addr_list_update(&sc->hw, mta, mcnt, 0);
775 }
776 
777 
778 /*********************************************************************
779  *  Timer routine
780  *
781  *  This routine checks for link status and updates statistics.
782  *
783  **********************************************************************/
784 
785 void
ixgb_local_timer(void * arg)786 ixgb_local_timer(void *arg)
787 {
788 	struct ifnet   *ifp;
789 	struct ixgb_softc *sc = arg;
790 	int s;
791 
792 	ifp = &sc->interface_data.ac_if;
793 
794 	s = splnet();
795 
796 	ixgb_check_for_link(&sc->hw);
797 	ixgb_update_link_status(sc);
798 	ixgb_update_stats_counters(sc);
799 #ifdef IXGB_DEBUG
800 	if (ixgb_display_debug_stats && ifp->if_flags & IFF_RUNNING)
801 		ixgb_print_hw_stats(sc);
802 #endif
803 
804 	timeout_add_sec(&sc->timer_handle, 1);
805 
806 	splx(s);
807 }
808 
809 void
ixgb_update_link_status(struct ixgb_softc * sc)810 ixgb_update_link_status(struct ixgb_softc *sc)
811 {
812 	struct ifnet *ifp = &sc->interface_data.ac_if;
813 
814 	if (sc->hw.link_up) {
815 		if (!sc->link_active) {
816 			ifp->if_baudrate = IF_Gbps(10);
817 			sc->link_active = 1;
818 			ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
819 			if_link_state_change(ifp);
820 		}
821 	} else {
822 		if (sc->link_active) {
823 			ifp->if_baudrate = 0;
824 			sc->link_active = 0;
825 			ifp->if_link_state = LINK_STATE_DOWN;
826 			if_link_state_change(ifp);
827 		}
828 	}
829 }
830 
831 /*********************************************************************
832  *
833  *  This routine disables all traffic on the adapter by issuing a
834  *  global reset on the MAC and deallocates TX/RX buffers.
835  *
836  **********************************************************************/
837 
838 void
ixgb_stop(void * arg)839 ixgb_stop(void *arg)
840 {
841 	struct ifnet   *ifp;
842 	struct ixgb_softc *sc = arg;
843 	ifp = &sc->interface_data.ac_if;
844 
845 	INIT_DEBUGOUT("ixgb_stop: begin\n");
846 	ixgb_disable_intr(sc);
847 	sc->hw.adapter_stopped = FALSE;
848 	ixgb_adapter_stop(&sc->hw);
849 	timeout_del(&sc->timer_handle);
850 
851 	/* Tell the stack that the interface is no longer active */
852 	ifp->if_flags &= ~IFF_RUNNING;
853 	ifq_clr_oactive(&ifp->if_snd);
854 
855 	ixgb_free_transmit_structures(sc);
856 	ixgb_free_receive_structures(sc);
857 }
858 
859 
860 /*********************************************************************
861  *
862  *  Determine hardware revision.
863  *
864  **********************************************************************/
865 void
ixgb_identify_hardware(struct ixgb_softc * sc)866 ixgb_identify_hardware(struct ixgb_softc *sc)
867 {
868 	u_int32_t	reg;
869 	struct pci_attach_args *pa = &sc->osdep.ixgb_pa;
870 
871 	/* Make sure our PCI config space has the necessary stuff set */
872 	sc->hw.pci_cmd_word = pci_conf_read(pa->pa_pc, pa->pa_tag,
873 					    PCI_COMMAND_STATUS_REG);
874 
875 	/* Save off the information about this board */
876 	sc->hw.vendor_id = PCI_VENDOR(pa->pa_id);
877 	sc->hw.device_id = PCI_PRODUCT(pa->pa_id);
878 
879 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
880 	sc->hw.revision_id = PCI_REVISION(reg);
881 
882 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
883 	sc->hw.subsystem_vendor_id = PCI_VENDOR(reg);
884 	sc->hw.subsystem_id = PCI_PRODUCT(reg);
885 
886 	/* Set MacType, etc. based on this PCI info */
887 	switch (sc->hw.device_id) {
888 	case IXGB_DEVICE_ID_82597EX:
889 	case IXGB_DEVICE_ID_82597EX_SR:
890 	case IXGB_DEVICE_ID_82597EX_LR:
891 	case IXGB_DEVICE_ID_82597EX_CX4:
892 		sc->hw.mac_type = ixgb_82597;
893 		break;
894 	default:
895 		INIT_DEBUGOUT1("Unknown device if 0x%x", sc->hw.device_id);
896 		printf("%s: unsupported device id 0x%x\n",
897 		    sc->sc_dv.dv_xname, sc->hw.device_id);
898 	}
899 }
900 
901 int
ixgb_allocate_pci_resources(struct ixgb_softc * sc)902 ixgb_allocate_pci_resources(struct ixgb_softc *sc)
903 
904 {
905 	int val;
906 	pci_intr_handle_t	ih;
907 	const char		*intrstr = NULL;
908 	struct pci_attach_args *pa =  &sc->osdep.ixgb_pa;
909 	pci_chipset_tag_t	pc = pa->pa_pc;
910 
911 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, IXGB_MMBA);
912 	if (PCI_MAPREG_TYPE(val) != PCI_MAPREG_TYPE_MEM) {
913 		printf(": mmba is not mem space\n");
914 		return (ENXIO);
915 	}
916 	if (pci_mapreg_map(pa, IXGB_MMBA, PCI_MAPREG_MEM_TYPE(val), 0,
917 	    &sc->osdep.mem_bus_space_tag, &sc->osdep.mem_bus_space_handle,
918 	    &sc->osdep.ixgb_membase, &sc->osdep.ixgb_memsize, 0)) {
919 		printf(": cannot find mem space\n");
920 		return (ENXIO);
921 	}
922 
923 	if (pci_intr_map(pa, &ih)) {
924 		printf(": couldn't map interrupt\n");
925 		return (ENXIO);
926 	}
927 
928 	sc->hw.back = &sc->osdep;
929 
930 	intrstr = pci_intr_string(pc, ih);
931 	sc->sc_intrhand = pci_intr_establish(pc, ih, IPL_NET, ixgb_intr, sc,
932 					    sc->sc_dv.dv_xname);
933 	if (sc->sc_intrhand == NULL) {
934 		printf(": couldn't establish interrupt");
935 		if (intrstr != NULL)
936 			printf(" at %s", intrstr);
937 		printf("\n");
938 		return (ENXIO);
939 	}
940 	printf(": %s", intrstr);
941 
942 	return (0);
943 }
944 
945 void
ixgb_free_pci_resources(struct ixgb_softc * sc)946 ixgb_free_pci_resources(struct ixgb_softc *sc)
947 {
948 	struct pci_attach_args *pa = &sc->osdep.ixgb_pa;
949 	pci_chipset_tag_t	pc = pa->pa_pc;
950 
951 	if (sc->sc_intrhand)
952 		pci_intr_disestablish(pc, sc->sc_intrhand);
953 	sc->sc_intrhand = 0;
954 
955 	if (sc->osdep.ixgb_membase)
956 		bus_space_unmap(sc->osdep.mem_bus_space_tag, sc->osdep.mem_bus_space_handle,
957 				sc->osdep.ixgb_memsize);
958 	sc->osdep.ixgb_membase = 0;
959 }
960 
961 /*********************************************************************
962  *
963  *  Initialize the hardware to a configuration as specified by the
964  *  adapter structure. The controller is reset, the EEPROM is
965  *  verified, the MAC address is set, then the shared initialization
966  *  routines are called.
967  *
968  **********************************************************************/
969 int
ixgb_hardware_init(struct ixgb_softc * sc)970 ixgb_hardware_init(struct ixgb_softc *sc)
971 {
972 	/* Issue a global reset */
973 	sc->hw.adapter_stopped = FALSE;
974 	ixgb_adapter_stop(&sc->hw);
975 
976 	/* Make sure we have a good EEPROM before we read from it */
977 	if (!ixgb_validate_eeprom_checksum(&sc->hw)) {
978 		printf("%s: The EEPROM Checksum Is Not Valid\n",
979 		       sc->sc_dv.dv_xname);
980 		return (EIO);
981 	}
982 	if (!ixgb_init_hw(&sc->hw)) {
983 		printf("%s: Hardware Initialization Failed",
984 		       sc->sc_dv.dv_xname);
985 		return (EIO);
986 	}
987 	bcopy(sc->hw.curr_mac_addr, sc->interface_data.ac_enaddr,
988 	      IXGB_ETH_LENGTH_OF_ADDRESS);
989 
990 	return (0);
991 }
992 
993 /*********************************************************************
994  *
995  *  Setup networking device structure and register an interface.
996  *
997  **********************************************************************/
998 void
ixgb_setup_interface(struct ixgb_softc * sc)999 ixgb_setup_interface(struct ixgb_softc *sc)
1000 {
1001 	struct ifnet   *ifp;
1002 	INIT_DEBUGOUT("ixgb_setup_interface: begin");
1003 
1004 	ifp = &sc->interface_data.ac_if;
1005 	strlcpy(ifp->if_xname, sc->sc_dv.dv_xname, IFNAMSIZ);
1006 
1007 	ifp->if_softc = sc;
1008 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1009 	ifp->if_ioctl = ixgb_ioctl;
1010 	ifp->if_start = ixgb_start;
1011 	ifp->if_watchdog = ixgb_watchdog;
1012 	ifp->if_hardmtu =
1013 		IXGB_MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN - ETHER_CRC_LEN;
1014 	ifq_init_maxlen(&ifp->if_snd, sc->num_tx_desc - 1);
1015 
1016 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1017 
1018 #if NVLAN > 0
1019 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1020 #endif
1021 
1022 #ifdef IXGB_CSUM_OFFLOAD
1023 	ifp->if_capabilities |= IFCAP_CSUM_TCPv4|IFCAP_CSUM_UDPv4;
1024 #endif
1025 
1026 	/*
1027 	 * Specify the media types supported by this adapter and register
1028 	 * callbacks to update media and link information
1029 	 */
1030 	ifmedia_init(&sc->media, IFM_IMASK, ixgb_media_change,
1031 		     ixgb_media_status);
1032 	if ((sc->hw.phy_type == ixgb_phy_type_g6104) ||
1033 	    (sc->hw.phy_type == ixgb_phy_type_txn17401)) {
1034 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_LR |
1035 		    IFM_FDX, 0, NULL);
1036 	} else {
1037 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SR |
1038 		    IFM_FDX, 0, NULL);
1039 	}
1040 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1041 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1042 
1043 	if_attach(ifp);
1044 	ether_ifattach(ifp);
1045 }
1046 
1047 /********************************************************************
1048  * Manage DMA'able memory.
1049  *******************************************************************/
1050 int
ixgb_dma_malloc(struct ixgb_softc * sc,bus_size_t size,struct ixgb_dma_alloc * dma,int mapflags)1051 ixgb_dma_malloc(struct ixgb_softc *sc, bus_size_t size,
1052 		struct ixgb_dma_alloc * dma, int mapflags)
1053 {
1054 	int r;
1055 
1056 	dma->dma_tag = sc->osdep.ixgb_pa.pa_dmat;
1057 	r = bus_dmamap_create(dma->dma_tag, size, 1,
1058 	    size, 0, BUS_DMA_NOWAIT, &dma->dma_map);
1059 	if (r != 0) {
1060 		printf("%s: ixgb_dma_malloc: bus_dmamap_create failed; "
1061 			"error %u\n", sc->sc_dv.dv_xname, r);
1062 		goto fail_0;
1063 	}
1064 
1065 	r = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,
1066 	    1, &dma->dma_nseg, BUS_DMA_NOWAIT);
1067 	if (r != 0) {
1068 		printf("%s: ixgb_dma_malloc: bus_dmamem_alloc failed; "
1069 			"size %lu, error %d\n", sc->sc_dv.dv_xname,
1070 			(unsigned long)size, r);
1071 		goto fail_1;
1072 	}
1073 
1074 	r = bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
1075 	    &dma->dma_vaddr, BUS_DMA_NOWAIT);
1076 	if (r != 0) {
1077 		printf("%s: ixgb_dma_malloc: bus_dmamem_map failed; "
1078 			"size %lu, error %d\n", sc->sc_dv.dv_xname,
1079 			(unsigned long)size, r);
1080 		goto fail_2;
1081 	}
1082 
1083 	r = bus_dmamap_load(sc->osdep.ixgb_pa.pa_dmat, dma->dma_map,
1084 			    dma->dma_vaddr, size, NULL,
1085 			    mapflags | BUS_DMA_NOWAIT);
1086 	if (r != 0) {
1087 		printf("%s: ixgb_dma_malloc: bus_dmamap_load failed; "
1088 			"error %u\n", sc->sc_dv.dv_xname, r);
1089 		goto fail_3;
1090 	}
1091 
1092 	dma->dma_size = size;
1093 	return (0);
1094 
1095 fail_3:
1096 	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
1097 fail_2:
1098 	bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1099 fail_1:
1100 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1101 fail_0:
1102 	dma->dma_map = NULL;
1103 	dma->dma_tag = NULL;
1104 
1105 	return (r);
1106 }
1107 
1108 void
ixgb_dma_free(struct ixgb_softc * sc,struct ixgb_dma_alloc * dma)1109 ixgb_dma_free(struct ixgb_softc *sc, struct ixgb_dma_alloc *dma)
1110 {
1111 	if (dma->dma_tag == NULL)
1112 		return;
1113 
1114 	if (dma->dma_map != NULL) {
1115 		bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,
1116 		    dma->dma_map->dm_mapsize,
1117 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1118 		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1119 		bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
1120 		bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1121 		bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1122 	}
1123 }
1124 
1125 /*********************************************************************
1126  *
1127  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
1128  *  the information needed to transmit a packet on the wire.
1129  *
1130  **********************************************************************/
1131 int
ixgb_allocate_transmit_structures(struct ixgb_softc * sc)1132 ixgb_allocate_transmit_structures(struct ixgb_softc *sc)
1133 {
1134 	if (!(sc->tx_buffer_area = mallocarray(sc->num_tx_desc,
1135 	    sizeof(struct ixgb_buffer), M_DEVBUF, M_NOWAIT | M_ZERO))) {
1136 		printf("%s: Unable to allocate tx_buffer memory\n",
1137 		       sc->sc_dv.dv_xname);
1138 		return (ENOMEM);
1139 	}
1140 
1141 	return (0);
1142 }
1143 
1144 /*********************************************************************
1145  *
1146  *  Allocate and initialize transmit structures.
1147  *
1148  **********************************************************************/
1149 int
ixgb_setup_transmit_structures(struct ixgb_softc * sc)1150 ixgb_setup_transmit_structures(struct ixgb_softc *sc)
1151 {
1152 	struct	ixgb_buffer *tx_buffer;
1153 	int error, i;
1154 
1155 	if ((error = ixgb_allocate_transmit_structures(sc)) != 0)
1156 		goto fail;
1157 
1158 	bzero((void *)sc->tx_desc_base,
1159 	      (sizeof(struct ixgb_tx_desc)) * sc->num_tx_desc);
1160 
1161 	sc->txtag = sc->osdep.ixgb_pa.pa_dmat;
1162 
1163 	tx_buffer = sc->tx_buffer_area;
1164 	for (i = 0; i < sc->num_tx_desc; i++) {
1165 		error = bus_dmamap_create(sc->txtag, IXGB_MAX_JUMBO_FRAME_SIZE,
1166 			    IXGB_MAX_SCATTER, IXGB_MAX_JUMBO_FRAME_SIZE, 0,
1167 			    BUS_DMA_NOWAIT, &tx_buffer->map);
1168 		if (error != 0) {
1169 			printf("%s: Unable to create TX DMA map\n",
1170 			    sc->sc_dv.dv_xname);
1171 			goto fail;
1172 		}
1173 		tx_buffer++;
1174 	}
1175 
1176 	sc->next_avail_tx_desc = 0;
1177 	sc->oldest_used_tx_desc = 0;
1178 
1179 	/* Set number of descriptors available */
1180 	sc->num_tx_desc_avail = sc->num_tx_desc;
1181 
1182 	/* Set checksum context */
1183 	sc->active_checksum_context = OFFLOAD_NONE;
1184 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
1185 	   sc->txdma.dma_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1186 
1187 	return (0);
1188 
1189 fail:
1190 	ixgb_free_transmit_structures(sc);
1191 	return (error);
1192 }
1193 
1194 /*********************************************************************
1195  *
1196  *  Enable transmit unit.
1197  *
1198  **********************************************************************/
1199 void
ixgb_initialize_transmit_unit(struct ixgb_softc * sc)1200 ixgb_initialize_transmit_unit(struct ixgb_softc *sc)
1201 {
1202 	u_int32_t       reg_tctl;
1203 	u_int64_t       bus_addr;
1204 
1205 	/* Setup the Base and Length of the Tx Descriptor Ring */
1206 	bus_addr = sc->txdma.dma_map->dm_segs[0].ds_addr;
1207 	IXGB_WRITE_REG(&sc->hw, TDBAL, (u_int32_t)bus_addr);
1208 	IXGB_WRITE_REG(&sc->hw, TDBAH, (u_int32_t)(bus_addr >> 32));
1209 	IXGB_WRITE_REG(&sc->hw, TDLEN,
1210 		       sc->num_tx_desc *
1211 		       sizeof(struct ixgb_tx_desc));
1212 
1213 	/* Setup the HW Tx Head and Tail descriptor pointers */
1214 	IXGB_WRITE_REG(&sc->hw, TDH, 0);
1215 	IXGB_WRITE_REG(&sc->hw, TDT, 0);
1216 
1217 	HW_DEBUGOUT2("Base = %x, Length = %x\n",
1218 		     IXGB_READ_REG(&sc->hw, TDBAL),
1219 		     IXGB_READ_REG(&sc->hw, TDLEN));
1220 
1221 	IXGB_WRITE_REG(&sc->hw, TIDV, sc->tx_int_delay);
1222 
1223 	/* Program the Transmit Control Register */
1224 	reg_tctl = IXGB_READ_REG(&sc->hw, TCTL);
1225 	reg_tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
1226 	IXGB_WRITE_REG(&sc->hw, TCTL, reg_tctl);
1227 
1228 	/* Setup Transmit Descriptor Settings for this adapter */
1229 	sc->txd_cmd = IXGB_TX_DESC_TYPE | IXGB_TX_DESC_CMD_RS;
1230 
1231 	if (sc->tx_int_delay > 0)
1232 		sc->txd_cmd |= IXGB_TX_DESC_CMD_IDE;
1233 }
1234 
1235 /*********************************************************************
1236  *
1237  *  Free all transmit related data structures.
1238  *
1239  **********************************************************************/
1240 void
ixgb_free_transmit_structures(struct ixgb_softc * sc)1241 ixgb_free_transmit_structures(struct ixgb_softc *sc)
1242 {
1243 	struct ixgb_buffer *tx_buffer;
1244 	int             i;
1245 
1246 	INIT_DEBUGOUT("free_transmit_structures: begin");
1247 
1248 	if (sc->tx_buffer_area != NULL) {
1249 		tx_buffer = sc->tx_buffer_area;
1250 		for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
1251 			if (tx_buffer->map != NULL &&
1252 			    tx_buffer->map->dm_nsegs > 0) {
1253 				bus_dmamap_sync(sc->txtag, tx_buffer->map,
1254 				    0, tx_buffer->map->dm_mapsize,
1255 				    BUS_DMASYNC_POSTWRITE);
1256 				bus_dmamap_unload(sc->txtag,
1257 				    tx_buffer->map);
1258 			}
1259 
1260 			if (tx_buffer->m_head != NULL) {
1261 				m_freem(tx_buffer->m_head);
1262 				tx_buffer->m_head = NULL;
1263 			}
1264 			if (tx_buffer->map != NULL) {
1265 				bus_dmamap_destroy(sc->txtag,
1266 				    tx_buffer->map);
1267 				tx_buffer->map = NULL;
1268 			}
1269 		}
1270 	}
1271 	if (sc->tx_buffer_area != NULL) {
1272 		free(sc->tx_buffer_area, M_DEVBUF, 0);
1273 		sc->tx_buffer_area = NULL;
1274 	}
1275 	if (sc->txtag != NULL) {
1276 		sc->txtag = NULL;
1277 	}
1278 }
1279 
1280 /*********************************************************************
1281  *
1282  *  The offload context needs to be set when we transfer the first
1283  *  packet of a particular protocol (TCP/UDP). We change the
1284  *  context only if the protocol type changes.
1285  *
1286  **********************************************************************/
1287 void
ixgb_transmit_checksum_setup(struct ixgb_softc * sc,struct mbuf * mp,u_int8_t * txd_popts)1288 ixgb_transmit_checksum_setup(struct ixgb_softc *sc,
1289 			     struct mbuf *mp,
1290 			     u_int8_t *txd_popts)
1291 {
1292 	struct ixgb_context_desc *TXD;
1293 	struct ixgb_buffer *tx_buffer;
1294 	int             curr_txd;
1295 
1296 	if (mp->m_pkthdr.csum_flags) {
1297 
1298 		if (mp->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) {
1299 			*txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1300 			if (sc->active_checksum_context == OFFLOAD_TCP_IP)
1301 				return;
1302 			else
1303 				sc->active_checksum_context = OFFLOAD_TCP_IP;
1304 
1305 		} else if (mp->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) {
1306 			*txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1307 			if (sc->active_checksum_context == OFFLOAD_UDP_IP)
1308 				return;
1309 			else
1310 				sc->active_checksum_context = OFFLOAD_UDP_IP;
1311 		} else {
1312 			*txd_popts = 0;
1313 			return;
1314 		}
1315 	} else {
1316 		*txd_popts = 0;
1317 		return;
1318 	}
1319 
1320 	/*
1321 	 * If we reach this point, the checksum offload context needs to be
1322 	 * reset.
1323 	 */
1324 	curr_txd = sc->next_avail_tx_desc;
1325 	tx_buffer = &sc->tx_buffer_area[curr_txd];
1326 	TXD = (struct ixgb_context_desc *) & sc->tx_desc_base[curr_txd];
1327 
1328 	TXD->tucss = ENET_HEADER_SIZE + sizeof(struct ip);
1329 	TXD->tucse = 0;
1330 
1331 	TXD->mss = 0;
1332 
1333 	if (sc->active_checksum_context == OFFLOAD_TCP_IP) {
1334 		TXD->tucso =
1335 			ENET_HEADER_SIZE + sizeof(struct ip) +
1336 			offsetof(struct tcphdr, th_sum);
1337 	} else if (sc->active_checksum_context == OFFLOAD_UDP_IP) {
1338 		TXD->tucso =
1339 			ENET_HEADER_SIZE + sizeof(struct ip) +
1340 			offsetof(struct udphdr, uh_sum);
1341 	}
1342 	TXD->cmd_type_len = htole32(IXGB_CONTEXT_DESC_CMD_TCP |
1343 	    IXGB_TX_DESC_CMD_RS | IXGB_CONTEXT_DESC_CMD_IDE);
1344 
1345 	tx_buffer->m_head = NULL;
1346 
1347 	if (++curr_txd == sc->num_tx_desc)
1348 		curr_txd = 0;
1349 
1350 	sc->num_tx_desc_avail--;
1351 	sc->next_avail_tx_desc = curr_txd;
1352 }
1353 
1354 /**********************************************************************
1355  *
1356  *  Examine each tx_buffer in the used queue. If the hardware is done
1357  *  processing the packet then free associated resources. The
1358  *  tx_buffer is put back on the free queue.
1359  *
1360  **********************************************************************/
1361 void
ixgb_txeof(struct ixgb_softc * sc)1362 ixgb_txeof(struct ixgb_softc *sc)
1363 {
1364 	int             i, num_avail;
1365 	struct ixgb_buffer *tx_buffer;
1366 	struct ixgb_tx_desc *tx_desc;
1367 	struct ifnet	*ifp = &sc->interface_data.ac_if;
1368 
1369 	if (sc->num_tx_desc_avail == sc->num_tx_desc)
1370 		return;
1371 
1372 	num_avail = sc->num_tx_desc_avail;
1373 	i = sc->oldest_used_tx_desc;
1374 
1375 	tx_buffer = &sc->tx_buffer_area[i];
1376 	tx_desc = &sc->tx_desc_base[i];
1377 
1378 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
1379 	    sc->txdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1380 	while (tx_desc->status & IXGB_TX_DESC_STATUS_DD) {
1381 
1382 		tx_desc->status = 0;
1383 		num_avail++;
1384 
1385 		if (tx_buffer->m_head != NULL) {
1386 			if (tx_buffer->map->dm_nsegs > 0) {
1387 				bus_dmamap_sync(sc->txtag, tx_buffer->map,
1388 				    0, tx_buffer->map->dm_mapsize,
1389 				    BUS_DMASYNC_POSTWRITE);
1390 				bus_dmamap_unload(sc->txtag, tx_buffer->map);
1391 			}
1392 
1393 			m_freem(tx_buffer->m_head);
1394 			tx_buffer->m_head = NULL;
1395 		}
1396 		if (++i == sc->num_tx_desc)
1397 			i = 0;
1398 
1399 		tx_buffer = &sc->tx_buffer_area[i];
1400 		tx_desc = &sc->tx_desc_base[i];
1401 	}
1402 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
1403 	    sc->txdma.dma_map->dm_mapsize,
1404 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1405 
1406 	sc->oldest_used_tx_desc = i;
1407 
1408 	/*
1409 	 * If we have enough room, clear IFF_OACTIVE to tell the stack that
1410 	 * it is OK to send packets. If there are no pending descriptors,
1411 	 * clear the timeout. Otherwise, if some descriptors have been freed,
1412 	 * restart the timeout.
1413 	 */
1414 	if (num_avail > IXGB_TX_CLEANUP_THRESHOLD)
1415 		ifq_clr_oactive(&ifp->if_snd);
1416 
1417 	/* All clean, turn off the timer */
1418 	if (num_avail == sc->num_tx_desc)
1419 		ifp->if_timer = 0;
1420 	/* Some cleaned, reset the timer */
1421 	else if (num_avail != sc->num_tx_desc_avail)
1422 		ifp->if_timer = IXGB_TX_TIMEOUT;
1423 
1424 	sc->num_tx_desc_avail = num_avail;
1425 }
1426 
1427 
1428 /*********************************************************************
1429  *
1430  *  Get a buffer from system mbuf buffer pool.
1431  *
1432  **********************************************************************/
1433 int
ixgb_get_buf(struct ixgb_softc * sc,int i,struct mbuf * nmp)1434 ixgb_get_buf(struct ixgb_softc *sc, int i,
1435 	     struct mbuf *nmp)
1436 {
1437 	struct mbuf *mp = nmp;
1438 	struct ixgb_buffer *rx_buffer;
1439 	int             error;
1440 
1441 	if (mp == NULL) {
1442 		MGETHDR(mp, M_DONTWAIT, MT_DATA);
1443 		if (mp == NULL) {
1444 			sc->mbuf_alloc_failed++;
1445 			return (ENOBUFS);
1446 		}
1447 		MCLGET(mp, M_DONTWAIT);
1448 		if ((mp->m_flags & M_EXT) == 0) {
1449 			m_freem(mp);
1450 			sc->mbuf_cluster_failed++;
1451 			return (ENOBUFS);
1452 		}
1453 		mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1454 	} else {
1455 		mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1456 		mp->m_data = mp->m_ext.ext_buf;
1457 		mp->m_next = NULL;
1458 	}
1459 
1460 	if (sc->hw.max_frame_size <= (MCLBYTES - ETHER_ALIGN))
1461 		m_adj(mp, ETHER_ALIGN);
1462 
1463 	rx_buffer = &sc->rx_buffer_area[i];
1464 
1465 	/*
1466 	 * Using memory from the mbuf cluster pool, invoke the bus_dma
1467 	 * machinery to arrange the memory mapping.
1468 	 */
1469 	error = bus_dmamap_load_mbuf(sc->rxtag, rx_buffer->map,
1470 	    mp, BUS_DMA_NOWAIT);
1471 	if (error) {
1472 		m_freem(mp);
1473 		return (error);
1474 	}
1475 	rx_buffer->m_head = mp;
1476 	bzero(&sc->rx_desc_base[i], sizeof(sc->rx_desc_base[i]));
1477 	sc->rx_desc_base[i].buff_addr = htole64(rx_buffer->map->dm_segs[0].ds_addr);
1478 	bus_dmamap_sync(sc->rxtag, rx_buffer->map, 0,
1479 	    rx_buffer->map->dm_mapsize, BUS_DMASYNC_PREREAD);
1480 
1481 	return (0);
1482 }
1483 
1484 /*********************************************************************
1485  *
1486  *  Allocate memory for rx_buffer structures. Since we use one
1487  *  rx_buffer per received packet, the maximum number of rx_buffer's
1488  *  that we'll need is equal to the number of receive descriptors
1489  *  that we've allocated.
1490  *
1491  **********************************************************************/
1492 int
ixgb_allocate_receive_structures(struct ixgb_softc * sc)1493 ixgb_allocate_receive_structures(struct ixgb_softc *sc)
1494 {
1495 	int             i, error;
1496 	struct ixgb_buffer *rx_buffer;
1497 
1498 	if (!(sc->rx_buffer_area = mallocarray(sc->num_rx_desc,
1499 	    sizeof(struct ixgb_buffer), M_DEVBUF, M_NOWAIT | M_ZERO))) {
1500 		printf("%s: Unable to allocate rx_buffer memory\n",
1501 		       sc->sc_dv.dv_xname);
1502 		return (ENOMEM);
1503 	}
1504 
1505 	sc->rxtag = sc->osdep.ixgb_pa.pa_dmat;
1506 
1507 	rx_buffer = sc->rx_buffer_area;
1508 	for (i = 0; i < sc->num_rx_desc; i++, rx_buffer++) {
1509 		error = bus_dmamap_create(sc->rxtag, MCLBYTES, 1,
1510 					  MCLBYTES, 0, BUS_DMA_NOWAIT,
1511 					  &rx_buffer->map);
1512 		if (error != 0) {
1513 			printf("%s: ixgb_allocate_receive_structures: "
1514 			       "bus_dmamap_create failed; error %u\n",
1515 			       sc->sc_dv.dv_xname, error);
1516 			goto fail;
1517 		}
1518 	}
1519 
1520 	for (i = 0; i < sc->num_rx_desc; i++) {
1521 		error = ixgb_get_buf(sc, i, NULL);
1522 		if (error != 0)
1523 			goto fail;
1524 	}
1525 	bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0,
1526 	    sc->rxdma.dma_map->dm_mapsize,
1527 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1528 
1529 	return (0);
1530 
1531 fail:
1532 	ixgb_free_receive_structures(sc);
1533 	return (error);
1534 }
1535 
1536 /*********************************************************************
1537  *
1538  *  Allocate and initialize receive structures.
1539  *
1540  **********************************************************************/
1541 int
ixgb_setup_receive_structures(struct ixgb_softc * sc)1542 ixgb_setup_receive_structures(struct ixgb_softc *sc)
1543 {
1544 	bzero((void *)sc->rx_desc_base,
1545 	      (sizeof(struct ixgb_rx_desc)) * sc->num_rx_desc);
1546 
1547 	if (ixgb_allocate_receive_structures(sc))
1548 		return (ENOMEM);
1549 
1550 	/* Setup our descriptor pointers */
1551 	sc->next_rx_desc_to_check = 0;
1552 	sc->next_rx_desc_to_use = 0;
1553 	return (0);
1554 }
1555 
1556 /*********************************************************************
1557  *
1558  *  Enable receive unit.
1559  *
1560  **********************************************************************/
1561 void
ixgb_initialize_receive_unit(struct ixgb_softc * sc)1562 ixgb_initialize_receive_unit(struct ixgb_softc *sc)
1563 {
1564 	u_int32_t       reg_rctl;
1565 	u_int32_t       reg_rxcsum;
1566 	u_int32_t       reg_rxdctl;
1567 	u_int64_t       bus_addr;
1568 
1569 	/*
1570 	 * Make sure receives are disabled while setting up the descriptor
1571 	 * ring
1572 	 */
1573 	reg_rctl = IXGB_READ_REG(&sc->hw, RCTL);
1574 	IXGB_WRITE_REG(&sc->hw, RCTL, reg_rctl & ~IXGB_RCTL_RXEN);
1575 
1576 	/* Set the Receive Delay Timer Register */
1577 	IXGB_WRITE_REG(&sc->hw, RDTR,
1578 		       sc->rx_int_delay);
1579 
1580 	/* Setup the Base and Length of the Rx Descriptor Ring */
1581 	bus_addr = sc->rxdma.dma_map->dm_segs[0].ds_addr;
1582 	IXGB_WRITE_REG(&sc->hw, RDBAL, (u_int32_t)bus_addr);
1583 	IXGB_WRITE_REG(&sc->hw, RDBAH, (u_int32_t)(bus_addr >> 32));
1584 	IXGB_WRITE_REG(&sc->hw, RDLEN, sc->num_rx_desc *
1585 		       sizeof(struct ixgb_rx_desc));
1586 
1587 	/* Setup the HW Rx Head and Tail Descriptor Pointers */
1588 	IXGB_WRITE_REG(&sc->hw, RDH, 0);
1589 
1590 	IXGB_WRITE_REG(&sc->hw, RDT, sc->num_rx_desc - 1);
1591 
1592 	reg_rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT
1593 		| RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT
1594 		| RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
1595 	IXGB_WRITE_REG(&sc->hw, RXDCTL, reg_rxdctl);
1596 
1597 	sc->raidc = 1;
1598 	if (sc->raidc) {
1599 		uint32_t        raidc;
1600 		uint8_t         poll_threshold;
1601 #define IXGB_RAIDC_POLL_DEFAULT 120
1602 
1603 		poll_threshold = ((sc->num_rx_desc - 1) >> 3);
1604 		poll_threshold >>= 1;
1605 		poll_threshold &= 0x3F;
1606 		raidc = IXGB_RAIDC_EN | IXGB_RAIDC_RXT_GATE |
1607 			(IXGB_RAIDC_POLL_DEFAULT << IXGB_RAIDC_POLL_SHIFT) |
1608 			(sc->rx_int_delay << IXGB_RAIDC_DELAY_SHIFT) |
1609 			poll_threshold;
1610 		IXGB_WRITE_REG(&sc->hw, RAIDC, raidc);
1611 	}
1612 
1613 	/* Enable Receive Checksum Offload for TCP and UDP ? */
1614 	reg_rxcsum = IXGB_READ_REG(&sc->hw, RXCSUM);
1615 	reg_rxcsum |= IXGB_RXCSUM_TUOFL;
1616 	IXGB_WRITE_REG(&sc->hw, RXCSUM, reg_rxcsum);
1617 
1618 	/* Setup the Receive Control Register */
1619 	reg_rctl = IXGB_READ_REG(&sc->hw, RCTL);
1620 	reg_rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
1621 	reg_rctl |= IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | IXGB_RCTL_SECRC |
1622 		IXGB_RCTL_CFF |
1623 		(sc->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
1624 
1625 	switch (sc->rx_buffer_len) {
1626 	default:
1627 	case IXGB_RXBUFFER_2048:
1628 		reg_rctl |= IXGB_RCTL_BSIZE_2048;
1629 		break;
1630 	case IXGB_RXBUFFER_4096:
1631 		reg_rctl |= IXGB_RCTL_BSIZE_4096;
1632 		break;
1633 	case IXGB_RXBUFFER_8192:
1634 		reg_rctl |= IXGB_RCTL_BSIZE_8192;
1635 		break;
1636 	case IXGB_RXBUFFER_16384:
1637 		reg_rctl |= IXGB_RCTL_BSIZE_16384;
1638 		break;
1639 	}
1640 
1641 	reg_rctl |= IXGB_RCTL_RXEN;
1642 
1643 	/* Enable Receives */
1644 	IXGB_WRITE_REG(&sc->hw, RCTL, reg_rctl);
1645 }
1646 
1647 /*********************************************************************
1648  *
1649  *  Free receive related data structures.
1650  *
1651  **********************************************************************/
1652 void
ixgb_free_receive_structures(struct ixgb_softc * sc)1653 ixgb_free_receive_structures(struct ixgb_softc *sc)
1654 {
1655 	struct ixgb_buffer *rx_buffer;
1656 	int             i;
1657 
1658 	INIT_DEBUGOUT("free_receive_structures: begin");
1659 
1660 	if (sc->rx_buffer_area != NULL) {
1661 		rx_buffer = sc->rx_buffer_area;
1662 		for (i = 0; i < sc->num_rx_desc; i++, rx_buffer++) {
1663 			if (rx_buffer->map != NULL &&
1664 			    rx_buffer->map->dm_nsegs > 0) {
1665 				bus_dmamap_sync(sc->rxtag, rx_buffer->map,
1666 				    0, rx_buffer->map->dm_mapsize,
1667 				    BUS_DMASYNC_POSTREAD);
1668 				bus_dmamap_unload(sc->rxtag,
1669 				    rx_buffer->map);
1670 			}
1671 			if (rx_buffer->m_head != NULL) {
1672 				m_freem(rx_buffer->m_head);
1673 				rx_buffer->m_head = NULL;
1674 			}
1675 			if (rx_buffer->map != NULL) {
1676 				bus_dmamap_destroy(sc->rxtag,
1677 				    rx_buffer->map);
1678 				rx_buffer->map = NULL;
1679 			}
1680 		}
1681 	}
1682 	if (sc->rx_buffer_area != NULL) {
1683 		free(sc->rx_buffer_area, M_DEVBUF, 0);
1684 		sc->rx_buffer_area = NULL;
1685 	}
1686 	if (sc->rxtag != NULL)
1687 		sc->rxtag = NULL;
1688 }
1689 
1690 /*********************************************************************
1691  *
1692  *  This routine executes in interrupt context. It replenishes
1693  *  the mbufs in the descriptor and sends data which has been
1694  *  dma'ed into host memory to upper layer.
1695  *
1696  *  We loop at most count times if count is > 0, or until done if
1697  *  count < 0.
1698  *
1699  *********************************************************************/
1700 void
ixgb_rxeof(struct ixgb_softc * sc,int count)1701 ixgb_rxeof(struct ixgb_softc *sc, int count)
1702 {
1703 	struct ifnet   *ifp;
1704 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1705 	struct mbuf    *mp;
1706 	int             eop = 0;
1707 	int             len;
1708 	u_int8_t        accept_frame = 0;
1709 	int             i;
1710 	int             next_to_use = 0;
1711 	int             eop_desc;
1712 
1713 	/* Pointer to the receive descriptor being examined. */
1714 	struct ixgb_rx_desc *current_desc;
1715 
1716 	ifp = &sc->interface_data.ac_if;
1717 	i = sc->next_rx_desc_to_check;
1718 	next_to_use = sc->next_rx_desc_to_use;
1719 	eop_desc = sc->next_rx_desc_to_check;
1720 	current_desc = &sc->rx_desc_base[i];
1721 	bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0,
1722 	    sc->rxdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1723 
1724 	if (!((current_desc->status) & IXGB_RX_DESC_STATUS_DD))
1725 		return;
1726 
1727 	while ((current_desc->status & IXGB_RX_DESC_STATUS_DD) &&
1728 		    (count != 0) &&
1729 		    (ifp->if_flags & IFF_RUNNING)) {
1730 
1731 		mp = sc->rx_buffer_area[i].m_head;
1732 		bus_dmamap_sync(sc->rxtag, sc->rx_buffer_area[i].map,
1733 		    0, sc->rx_buffer_area[i].map->dm_mapsize,
1734 		    BUS_DMASYNC_POSTREAD);
1735 		bus_dmamap_unload(sc->rxtag, sc->rx_buffer_area[i].map);
1736 
1737 		accept_frame = 1;
1738 		if (current_desc->status & IXGB_RX_DESC_STATUS_EOP) {
1739 			count--;
1740 			eop = 1;
1741 		} else {
1742 			eop = 0;
1743 		}
1744 		len = letoh16(current_desc->length);
1745 
1746 		if (current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
1747 			    IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
1748 					    IXGB_RX_DESC_ERRORS_RXE))
1749 			accept_frame = 0;
1750 		if (accept_frame) {
1751 
1752 			/* Assign correct length to the current fragment */
1753 			mp->m_len = len;
1754 
1755 			if (sc->fmp == NULL) {
1756 				mp->m_pkthdr.len = len;
1757 				sc->fmp = mp;	/* Store the first mbuf */
1758 				sc->lmp = mp;
1759 			} else {
1760 				/* Chain mbuf's together */
1761 				mp->m_flags &= ~M_PKTHDR;
1762 				sc->lmp->m_next = mp;
1763 				sc->lmp = sc->lmp->m_next;
1764 				sc->fmp->m_pkthdr.len += len;
1765 			}
1766 
1767 			if (eop) {
1768 				eop_desc = i;
1769 				ixgb_receive_checksum(sc, current_desc, sc->fmp);
1770 
1771 #if NVLAN > 0
1772 				if (current_desc->status & IXGB_RX_DESC_STATUS_VP) {
1773 					sc->fmp->m_pkthdr.ether_vtag =
1774 					    letoh16(current_desc->special);
1775 					sc->fmp->m_flags |= M_VLANTAG;
1776 				}
1777 #endif
1778 
1779 
1780 				ml_enqueue(&ml, sc->fmp);
1781 				sc->fmp = NULL;
1782 				sc->lmp = NULL;
1783 			}
1784 			sc->rx_buffer_area[i].m_head = NULL;
1785 		} else {
1786 			sc->dropped_pkts++;
1787 			m_freem(sc->fmp);
1788 			sc->fmp = NULL;
1789 			sc->lmp = NULL;
1790 		}
1791 
1792 		/* Zero out the receive descriptors status  */
1793 		current_desc->status = 0;
1794 		bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0,
1795 		    sc->rxdma.dma_map->dm_mapsize,
1796 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1797 
1798 		/* Advance our pointers to the next descriptor */
1799 		if (++i == sc->num_rx_desc) {
1800 			i = 0;
1801 			current_desc = sc->rx_desc_base;
1802 		} else
1803 			current_desc++;
1804 	}
1805 	sc->next_rx_desc_to_check = i;
1806 
1807 	if (--i < 0)
1808 		i = (sc->num_rx_desc - 1);
1809 
1810 	/*
1811 	 * 82597EX: Workaround for redundant write back in receive descriptor ring (causes
1812  	 * memory corruption). Avoid using and re-submitting the most recently received RX
1813 	 * descriptor back to hardware.
1814 	 *
1815 	 * if(Last written back descriptor == EOP bit set descriptor)
1816 	 * 	then avoid re-submitting the most recently received RX descriptor
1817 	 *	back to hardware.
1818 	 * if(Last written back descriptor != EOP bit set descriptor)
1819 	 *	then avoid re-submitting the most recently received RX descriptors
1820 	 * 	till last EOP bit set descriptor.
1821 	 */
1822 	if (eop_desc != i) {
1823 		if (++eop_desc == sc->num_rx_desc)
1824 			eop_desc = 0;
1825 		i = eop_desc;
1826 	}
1827 	/* Replenish the descriptors with new mbufs till last EOP bit set descriptor */
1828 	while (next_to_use != i) {
1829 		current_desc = &sc->rx_desc_base[next_to_use];
1830 		if ((current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
1831 			    IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
1832 					     IXGB_RX_DESC_ERRORS_RXE))) {
1833 			mp = sc->rx_buffer_area[next_to_use].m_head;
1834 			ixgb_get_buf(sc, next_to_use, mp);
1835 		} else {
1836 			if (ixgb_get_buf(sc, next_to_use, NULL) == ENOBUFS)
1837 				break;
1838 		}
1839 		/* Advance our pointers to the next descriptor */
1840 		if (++next_to_use == sc->num_rx_desc)
1841 			next_to_use = 0;
1842 	}
1843 	sc->next_rx_desc_to_use = next_to_use;
1844 	if (--next_to_use < 0)
1845                 next_to_use = (sc->num_rx_desc - 1);
1846         /* Advance the IXGB's Receive Queue #0  "Tail Pointer" */
1847         IXGB_WRITE_REG(&sc->hw, RDT, next_to_use);
1848 
1849 	if_input(ifp, &ml);
1850 }
1851 
1852 /*********************************************************************
1853  *
1854  *  Verify that the hardware indicated that the checksum is valid.
1855  *  Inform the stack about the status of checksum so that stack
1856  *  doesn't spend time verifying the checksum.
1857  *
1858  *********************************************************************/
1859 void
ixgb_receive_checksum(struct ixgb_softc * sc,struct ixgb_rx_desc * rx_desc,struct mbuf * mp)1860 ixgb_receive_checksum(struct ixgb_softc *sc,
1861 		      struct ixgb_rx_desc *rx_desc,
1862 		      struct mbuf *mp)
1863 {
1864 	if (rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) {
1865 		mp->m_pkthdr.csum_flags = 0;
1866 		return;
1867 	}
1868 
1869 	if (rx_desc->status & IXGB_RX_DESC_STATUS_IPCS) {
1870 		/* Did it pass? */
1871 		if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_IPE)) {
1872 			/* IP Checksum Good */
1873 			mp->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
1874 
1875 		} else {
1876 			mp->m_pkthdr.csum_flags = 0;
1877 		}
1878 	}
1879 	if (rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS) {
1880 		/* Did it pass? */
1881 		if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE)) {
1882 			mp->m_pkthdr.csum_flags |=
1883 				M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
1884 		}
1885 	}
1886 }
1887 
1888 /*
1889  * This turns on the hardware offload of the VLAN
1890  * tag insertion and strip
1891  */
1892 void
ixgb_enable_hw_vlans(struct ixgb_softc * sc)1893 ixgb_enable_hw_vlans(struct ixgb_softc *sc)
1894 {
1895 	uint32_t ctrl;
1896 
1897 	ctrl = IXGB_READ_REG(&sc->hw, CTRL0);
1898 	ctrl |= IXGB_CTRL0_VME;
1899 	IXGB_WRITE_REG(&sc->hw, CTRL0, ctrl);
1900 }
1901 
1902 void
ixgb_enable_intr(struct ixgb_softc * sc)1903 ixgb_enable_intr(struct ixgb_softc *sc)
1904 {
1905 	uint32_t val;
1906 
1907 	val = IXGB_INT_RXT0 | IXGB_INT_TXDW | IXGB_INT_RXDMT0 |
1908 	      IXGB_INT_LSC | IXGB_INT_RXO;
1909 	if (sc->hw.subsystem_vendor_id == SUN_SUBVENDOR_ID)
1910 		val |= IXGB_INT_GPI0;
1911 	IXGB_WRITE_REG(&sc->hw, IMS, val);
1912 }
1913 
1914 void
ixgb_disable_intr(struct ixgb_softc * sc)1915 ixgb_disable_intr(struct ixgb_softc *sc)
1916 {
1917 	IXGB_WRITE_REG(&sc->hw, IMC, ~0);
1918 }
1919 
1920 void
ixgb_write_pci_cfg(struct ixgb_hw * hw,uint32_t reg,uint16_t * value)1921 ixgb_write_pci_cfg(struct ixgb_hw *hw,
1922 		   uint32_t reg,
1923 		   uint16_t *value)
1924 {
1925 	struct pci_attach_args *pa = &((struct ixgb_osdep *)hw->back)->ixgb_pa;
1926 	pci_chipset_tag_t pc = pa->pa_pc;
1927 	/* Should we do read/mask/write...?  16 vs 32 bit!!! */
1928 	pci_conf_write(pc, pa->pa_tag, reg, *value);
1929 }
1930 
1931 /**********************************************************************
1932  *
1933  *  Update the board statistics counters.
1934  *
1935  **********************************************************************/
1936 void
ixgb_update_stats_counters(struct ixgb_softc * sc)1937 ixgb_update_stats_counters(struct ixgb_softc *sc)
1938 {
1939 	struct ifnet   *ifp;
1940 
1941 	sc->stats.crcerrs += IXGB_READ_REG(&sc->hw, CRCERRS);
1942 	sc->stats.gprcl += IXGB_READ_REG(&sc->hw, GPRCL);
1943 	sc->stats.gprch += IXGB_READ_REG(&sc->hw, GPRCH);
1944 	sc->stats.gorcl += IXGB_READ_REG(&sc->hw, GORCL);
1945 	sc->stats.gorch += IXGB_READ_REG(&sc->hw, GORCH);
1946 	sc->stats.bprcl += IXGB_READ_REG(&sc->hw, BPRCL);
1947 	sc->stats.bprch += IXGB_READ_REG(&sc->hw, BPRCH);
1948 	sc->stats.mprcl += IXGB_READ_REG(&sc->hw, MPRCL);
1949 	sc->stats.mprch += IXGB_READ_REG(&sc->hw, MPRCH);
1950 	sc->stats.roc += IXGB_READ_REG(&sc->hw, ROC);
1951 
1952 	sc->stats.mpc += IXGB_READ_REG(&sc->hw, MPC);
1953 	sc->stats.dc += IXGB_READ_REG(&sc->hw, DC);
1954 	sc->stats.rlec += IXGB_READ_REG(&sc->hw, RLEC);
1955 	sc->stats.xonrxc += IXGB_READ_REG(&sc->hw, XONRXC);
1956 	sc->stats.xontxc += IXGB_READ_REG(&sc->hw, XONTXC);
1957 	sc->stats.xoffrxc += IXGB_READ_REG(&sc->hw, XOFFRXC);
1958 	sc->stats.xofftxc += IXGB_READ_REG(&sc->hw, XOFFTXC);
1959 	sc->stats.gptcl += IXGB_READ_REG(&sc->hw, GPTCL);
1960 	sc->stats.gptch += IXGB_READ_REG(&sc->hw, GPTCH);
1961 	sc->stats.gotcl += IXGB_READ_REG(&sc->hw, GOTCL);
1962 	sc->stats.gotch += IXGB_READ_REG(&sc->hw, GOTCH);
1963 	sc->stats.ruc += IXGB_READ_REG(&sc->hw, RUC);
1964 	sc->stats.rfc += IXGB_READ_REG(&sc->hw, RFC);
1965 	sc->stats.rjc += IXGB_READ_REG(&sc->hw, RJC);
1966 	sc->stats.torl += IXGB_READ_REG(&sc->hw, TORL);
1967 	sc->stats.torh += IXGB_READ_REG(&sc->hw, TORH);
1968 	sc->stats.totl += IXGB_READ_REG(&sc->hw, TOTL);
1969 	sc->stats.toth += IXGB_READ_REG(&sc->hw, TOTH);
1970 	sc->stats.tprl += IXGB_READ_REG(&sc->hw, TPRL);
1971 	sc->stats.tprh += IXGB_READ_REG(&sc->hw, TPRH);
1972 	sc->stats.tptl += IXGB_READ_REG(&sc->hw, TPTL);
1973 	sc->stats.tpth += IXGB_READ_REG(&sc->hw, TPTH);
1974 	sc->stats.plt64c += IXGB_READ_REG(&sc->hw, PLT64C);
1975 	sc->stats.mptcl += IXGB_READ_REG(&sc->hw, MPTCL);
1976 	sc->stats.mptch += IXGB_READ_REG(&sc->hw, MPTCH);
1977 	sc->stats.bptcl += IXGB_READ_REG(&sc->hw, BPTCL);
1978 	sc->stats.bptch += IXGB_READ_REG(&sc->hw, BPTCH);
1979 
1980 	sc->stats.uprcl += IXGB_READ_REG(&sc->hw, UPRCL);
1981 	sc->stats.uprch += IXGB_READ_REG(&sc->hw, UPRCH);
1982 	sc->stats.vprcl += IXGB_READ_REG(&sc->hw, VPRCL);
1983 	sc->stats.vprch += IXGB_READ_REG(&sc->hw, VPRCH);
1984 	sc->stats.jprcl += IXGB_READ_REG(&sc->hw, JPRCL);
1985 	sc->stats.jprch += IXGB_READ_REG(&sc->hw, JPRCH);
1986 	sc->stats.rnbc += IXGB_READ_REG(&sc->hw, RNBC);
1987 	sc->stats.icbc += IXGB_READ_REG(&sc->hw, ICBC);
1988 	sc->stats.ecbc += IXGB_READ_REG(&sc->hw, ECBC);
1989 	sc->stats.uptcl += IXGB_READ_REG(&sc->hw, UPTCL);
1990 	sc->stats.uptch += IXGB_READ_REG(&sc->hw, UPTCH);
1991 	sc->stats.vptcl += IXGB_READ_REG(&sc->hw, VPTCL);
1992 	sc->stats.vptch += IXGB_READ_REG(&sc->hw, VPTCH);
1993 	sc->stats.jptcl += IXGB_READ_REG(&sc->hw, JPTCL);
1994 	sc->stats.jptch += IXGB_READ_REG(&sc->hw, JPTCH);
1995 	sc->stats.tsctc += IXGB_READ_REG(&sc->hw, TSCTC);
1996 	sc->stats.tsctfc += IXGB_READ_REG(&sc->hw, TSCTFC);
1997 	sc->stats.ibic += IXGB_READ_REG(&sc->hw, IBIC);
1998 	sc->stats.lfc += IXGB_READ_REG(&sc->hw, LFC);
1999 	sc->stats.pfrc += IXGB_READ_REG(&sc->hw, PFRC);
2000 	sc->stats.pftc += IXGB_READ_REG(&sc->hw, PFTC);
2001 	sc->stats.mcfrc += IXGB_READ_REG(&sc->hw, MCFRC);
2002 
2003 	ifp = &sc->interface_data.ac_if;
2004 
2005 	/* Fill out the OS statistics structure */
2006 	ifp->if_collisions = 0;
2007 
2008 	/* Rx Errors */
2009 	ifp->if_ierrors =
2010 		sc->dropped_pkts +
2011 		sc->stats.crcerrs +
2012 		sc->stats.rnbc +
2013 		sc->stats.mpc +
2014 		sc->stats.rlec;
2015 
2016 	/* Tx Errors */
2017 	ifp->if_oerrors =
2018 		sc->watchdog_events;
2019 }
2020 
2021 #ifdef IXGB_DEBUG
2022 /**********************************************************************
2023  *
2024  *  This routine is called only when ixgb_display_debug_stats is enabled.
2025  *  This routine provides a way to take a look at important statistics
2026  *  maintained by the driver and hardware.
2027  *
2028  **********************************************************************/
2029 void
ixgb_print_hw_stats(struct ixgb_softc * sc)2030 ixgb_print_hw_stats(struct ixgb_softc *sc)
2031 {
2032 	char            buf_speed[100], buf_type[100];
2033 	ixgb_bus_speed  bus_speed;
2034 	ixgb_bus_type   bus_type;
2035 	const char * const unit = sc->sc_dv.dv_xname;
2036 
2037 	bus_speed = sc->hw.bus.speed;
2038 	bus_type = sc->hw.bus.type;
2039 	snprintf(buf_speed, sizeof(buf_speed),
2040 		bus_speed == ixgb_bus_speed_33 ? "33MHz" :
2041 		bus_speed == ixgb_bus_speed_66 ? "66MHz" :
2042 		bus_speed == ixgb_bus_speed_100 ? "100MHz" :
2043 		bus_speed == ixgb_bus_speed_133 ? "133MHz" :
2044 		"UNKNOWN");
2045 	printf("%s: PCI_Bus_Speed = %s\n", unit,
2046 		buf_speed);
2047 
2048 	snprintf(buf_type, sizeof(buf_type),
2049 		bus_type == ixgb_bus_type_pci ? "PCI" :
2050 		bus_type == ixgb_bus_type_pcix ? "PCI-X" :
2051 		"UNKNOWN");
2052 	printf("%s: PCI_Bus_Type = %s\n", unit,
2053 		buf_type);
2054 
2055 	printf("%s: Tx Descriptors not Avail1 = %ld\n", unit,
2056 		sc->no_tx_desc_avail1);
2057 	printf("%s: Tx Descriptors not Avail2 = %ld\n", unit,
2058 		sc->no_tx_desc_avail2);
2059 	printf("%s: Std Mbuf Failed = %ld\n", unit,
2060 		sc->mbuf_alloc_failed);
2061 	printf("%s: Std Cluster Failed = %ld\n", unit,
2062 		sc->mbuf_cluster_failed);
2063 
2064 	printf("%s: Defer count = %lld\n", unit,
2065 		(long long)sc->stats.dc);
2066 	printf("%s: Missed Packets = %lld\n", unit,
2067 		(long long)sc->stats.mpc);
2068 	printf("%s: Receive No Buffers = %lld\n", unit,
2069 		(long long)sc->stats.rnbc);
2070 	printf("%s: Receive length errors = %lld\n", unit,
2071 		(long long)sc->stats.rlec);
2072 	printf("%s: Crc errors = %lld\n", unit,
2073 		(long long)sc->stats.crcerrs);
2074 	printf("%s: Driver dropped packets = %ld\n", unit,
2075 		sc->dropped_pkts);
2076 
2077 	printf("%s: XON Rcvd = %lld\n", unit,
2078 		(long long)sc->stats.xonrxc);
2079 	printf("%s: XON Xmtd = %lld\n", unit,
2080 		(long long)sc->stats.xontxc);
2081 	printf("%s: XOFF Rcvd = %lld\n", unit,
2082 		(long long)sc->stats.xoffrxc);
2083 	printf("%s: XOFF Xmtd = %lld\n", unit,
2084 		(long long)sc->stats.xofftxc);
2085 
2086 	printf("%s: Good Packets Rcvd = %lld\n", unit,
2087 		(long long)sc->stats.gprcl);
2088 	printf("%s: Good Packets Xmtd = %lld\n", unit,
2089 		(long long)sc->stats.gptcl);
2090 
2091 	printf("%s: Jumbo frames recvd = %lld\n", unit,
2092 		(long long)sc->stats.jprcl);
2093 	printf("%s: Jumbo frames Xmtd = %lld\n", unit,
2094 		(long long)sc->stats.jptcl);
2095 }
2096 #endif
2097