xref: /freebsd/sys/dev/ixgbe/if_ixv.c (revision 5b9c547c)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2015, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #ifndef IXGBE_STANDALONE_BUILD
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #endif
40 
41 #include "ixgbe.h"
42 
43 /*********************************************************************
44  *  Driver version
45  *********************************************************************/
46 char ixv_driver_version[] = "1.2.5";
47 
48 /*********************************************************************
49  *  PCI Device ID Table
50  *
51  *  Used by probe to select devices to load on
52  *  Last field stores an index into ixv_strings
53  *  Last entry must be all 0s
54  *
55  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
56  *********************************************************************/
57 
58 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
59 {
60 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
61 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
62 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
63 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
64 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
65 	/* required last entry */
66 	{0, 0, 0, 0, 0}
67 };
68 
69 /*********************************************************************
70  *  Table of branding strings
71  *********************************************************************/
72 
73 static char    *ixv_strings[] = {
74 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
75 };
76 
77 /*********************************************************************
78  *  Function prototypes
79  *********************************************************************/
80 static int      ixv_probe(device_t);
81 static int      ixv_attach(device_t);
82 static int      ixv_detach(device_t);
83 static int      ixv_shutdown(device_t);
84 static int      ixv_ioctl(struct ifnet *, u_long, caddr_t);
85 static void	ixv_init(void *);
86 static void	ixv_init_locked(struct adapter *);
87 static void     ixv_stop(void *);
88 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
89 static int      ixv_media_change(struct ifnet *);
90 static void     ixv_identify_hardware(struct adapter *);
91 static int      ixv_allocate_pci_resources(struct adapter *);
92 static int      ixv_allocate_msix(struct adapter *);
93 static int	ixv_setup_msix(struct adapter *);
94 static void	ixv_free_pci_resources(struct adapter *);
95 static void     ixv_local_timer(void *);
96 static void     ixv_setup_interface(device_t, struct adapter *);
97 static void     ixv_config_link(struct adapter *);
98 
99 static void     ixv_initialize_transmit_units(struct adapter *);
100 static void     ixv_initialize_receive_units(struct adapter *);
101 
102 static void     ixv_enable_intr(struct adapter *);
103 static void     ixv_disable_intr(struct adapter *);
104 static void     ixv_set_multi(struct adapter *);
105 static void     ixv_update_link_status(struct adapter *);
106 static int	ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
107 static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
108 static void	ixv_configure_ivars(struct adapter *);
109 static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
110 
111 static void	ixv_setup_vlan_support(struct adapter *);
112 static void	ixv_register_vlan(void *, struct ifnet *, u16);
113 static void	ixv_unregister_vlan(void *, struct ifnet *, u16);
114 
115 static void	ixv_save_stats(struct adapter *);
116 static void	ixv_init_stats(struct adapter *);
117 static void	ixv_update_stats(struct adapter *);
118 static void	ixv_add_stats_sysctls(struct adapter *);
119 
120 /* The MSI/X Interrupt handlers */
121 static void	ixv_msix_que(void *);
122 static void	ixv_msix_mbx(void *);
123 
124 /* Deferred interrupt tasklets */
125 static void	ixv_handle_que(void *, int);
126 static void	ixv_handle_mbx(void *, int);
127 
128 /*********************************************************************
129  *  FreeBSD Device Interface Entry Points
130  *********************************************************************/
131 
132 static device_method_t ixv_methods[] = {
133 	/* Device interface */
134 	DEVMETHOD(device_probe, ixv_probe),
135 	DEVMETHOD(device_attach, ixv_attach),
136 	DEVMETHOD(device_detach, ixv_detach),
137 	DEVMETHOD(device_shutdown, ixv_shutdown),
138 	DEVMETHOD_END
139 };
140 
141 static driver_t ixv_driver = {
142 	"ixv", ixv_methods, sizeof(struct adapter),
143 };
144 
145 devclass_t ixv_devclass;
146 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
147 MODULE_DEPEND(ixv, pci, 1, 1, 1);
148 MODULE_DEPEND(ixv, ether, 1, 1, 1);
149 /* XXX depend on 'ix' ? */
150 
151 /*
152 ** TUNEABLE PARAMETERS:
153 */
154 
155 /*
156 ** AIM: Adaptive Interrupt Moderation
157 ** which means that the interrupt rate
158 ** is varied over time based on the
159 ** traffic for that interrupt vector
160 */
161 static int ixv_enable_aim = FALSE;
162 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
163 
164 /* How many packets rxeof tries to clean at a time */
165 static int ixv_rx_process_limit = 256;
166 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
167 
168 /* How many packets txeof tries to clean at a time */
169 static int ixv_tx_process_limit = 256;
170 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
171 
172 /* Flow control setting, default to full */
173 static int ixv_flow_control = ixgbe_fc_full;
174 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
175 
176 /*
177  * Header split: this causes the hardware to DMA
178  * the header into a seperate mbuf from the payload,
179  * it can be a performance win in some workloads, but
180  * in others it actually hurts, its off by default.
181  */
182 static int ixv_header_split = FALSE;
183 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
184 
185 /*
186 ** Number of TX descriptors per ring,
187 ** setting higher than RX as this seems
188 ** the better performing choice.
189 */
190 static int ixv_txd = DEFAULT_TXD;
191 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
192 
193 /* Number of RX descriptors per ring */
194 static int ixv_rxd = DEFAULT_RXD;
195 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
196 
197 /*
198 ** Shadow VFTA table, this is needed because
199 ** the real filter table gets cleared during
200 ** a soft reset and we need to repopulate it.
201 */
202 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
203 
204 /*********************************************************************
205  *  Device identification routine
206  *
207  *  ixv_probe determines if the driver should be loaded on
208  *  adapter based on PCI vendor/device id of the adapter.
209  *
210  *  return BUS_PROBE_DEFAULT on success, positive on failure
211  *********************************************************************/
212 
213 static int
214 ixv_probe(device_t dev)
215 {
216 	ixgbe_vendor_info_t *ent;
217 
218 	u16	pci_vendor_id = 0;
219 	u16	pci_device_id = 0;
220 	u16	pci_subvendor_id = 0;
221 	u16	pci_subdevice_id = 0;
222 	char	adapter_name[256];
223 
224 
225 	pci_vendor_id = pci_get_vendor(dev);
226 	if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
227 		return (ENXIO);
228 
229 	pci_device_id = pci_get_device(dev);
230 	pci_subvendor_id = pci_get_subvendor(dev);
231 	pci_subdevice_id = pci_get_subdevice(dev);
232 
233 	ent = ixv_vendor_info_array;
234 	while (ent->vendor_id != 0) {
235 		if ((pci_vendor_id == ent->vendor_id) &&
236 		    (pci_device_id == ent->device_id) &&
237 
238 		    ((pci_subvendor_id == ent->subvendor_id) ||
239 		     (ent->subvendor_id == 0)) &&
240 
241 		    ((pci_subdevice_id == ent->subdevice_id) ||
242 		     (ent->subdevice_id == 0))) {
243 			sprintf(adapter_name, "%s, Version - %s",
244 				ixv_strings[ent->index],
245 				ixv_driver_version);
246 			device_set_desc_copy(dev, adapter_name);
247 			return (BUS_PROBE_DEFAULT);
248 		}
249 		ent++;
250 	}
251 	return (ENXIO);
252 }
253 
254 /*********************************************************************
255  *  Device initialization routine
256  *
257  *  The attach entry point is called when the driver is being loaded.
258  *  This routine identifies the type of hardware, allocates all resources
259  *  and initializes the hardware.
260  *
261  *  return 0 on success, positive on failure
262  *********************************************************************/
263 
264 static int
265 ixv_attach(device_t dev)
266 {
267 	struct adapter *adapter;
268 	struct ixgbe_hw *hw;
269 	int             error = 0;
270 
271 	INIT_DEBUGOUT("ixv_attach: begin");
272 
273 	/* Allocate, clear, and link in our adapter structure */
274 	adapter = device_get_softc(dev);
275 	adapter->dev = adapter->osdep.dev = dev;
276 	hw = &adapter->hw;
277 
278 	/* Core Lock Init*/
279 	IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
280 
281 	/* SYSCTL APIs */
282 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
283 			SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
284 			OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
285 			adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
286 
287 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
288 			SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
289 			OID_AUTO, "enable_aim", CTLFLAG_RW,
290 			&ixv_enable_aim, 1, "Interrupt Moderation");
291 
292 	/* Set up the timer callout */
293 	callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
294 
295 	/* Determine hardware revision */
296 	ixv_identify_hardware(adapter);
297 
298 	/* Do base PCI setup - map BAR0 */
299 	if (ixv_allocate_pci_resources(adapter)) {
300 		device_printf(dev, "Allocation of PCI resources failed\n");
301 		error = ENXIO;
302 		goto err_out;
303 	}
304 
305 	/* Do descriptor calc and sanity checks */
306 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
307 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
308 		device_printf(dev, "TXD config issue, using default!\n");
309 		adapter->num_tx_desc = DEFAULT_TXD;
310 	} else
311 		adapter->num_tx_desc = ixv_txd;
312 
313 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
314 	    ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
315 		device_printf(dev, "RXD config issue, using default!\n");
316 		adapter->num_rx_desc = DEFAULT_RXD;
317 	} else
318 		adapter->num_rx_desc = ixv_rxd;
319 
320 	/* Allocate our TX/RX Queues */
321 	if (ixgbe_allocate_queues(adapter)) {
322 		error = ENOMEM;
323 		goto err_out;
324 	}
325 
326 	/*
327 	** Initialize the shared code: its
328 	** at this point the mac type is set.
329 	*/
330 	error = ixgbe_init_shared_code(hw);
331 	if (error) {
332 		device_printf(dev,"Shared Code Initialization Failure\n");
333 		error = EIO;
334 		goto err_late;
335 	}
336 
337 	/* Setup the mailbox */
338 	ixgbe_init_mbx_params_vf(hw);
339 
340 	ixgbe_reset_hw(hw);
341 
342 	error = ixgbe_init_hw(hw);
343 	if (error) {
344 		device_printf(dev,"Hardware Initialization Failure\n");
345 		error = EIO;
346 		goto err_late;
347 	}
348 
349 	error = ixv_allocate_msix(adapter);
350 	if (error)
351 		goto err_late;
352 
353 	/* If no mac address was assigned, make a random one */
354 	if (!ixv_check_ether_addr(hw->mac.addr)) {
355 		u8 addr[ETHER_ADDR_LEN];
356 		arc4rand(&addr, sizeof(addr), 0);
357 		addr[0] &= 0xFE;
358 		addr[0] |= 0x02;
359 		bcopy(addr, hw->mac.addr, sizeof(addr));
360 	}
361 
362 	/* Setup OS specific network interface */
363 	ixv_setup_interface(dev, adapter);
364 
365 	/* Do the stats setup */
366 	ixv_save_stats(adapter);
367 	ixv_init_stats(adapter);
368 	ixv_add_stats_sysctls(adapter);
369 
370 	/* Register for VLAN events */
371 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
372 	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
373 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
374 	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
375 
376 	INIT_DEBUGOUT("ixv_attach: end");
377 	return (0);
378 
379 err_late:
380 	ixgbe_free_transmit_structures(adapter);
381 	ixgbe_free_receive_structures(adapter);
382 err_out:
383 	ixv_free_pci_resources(adapter);
384 	return (error);
385 
386 }
387 
388 /*********************************************************************
389  *  Device removal routine
390  *
391  *  The detach entry point is called when the driver is being removed.
392  *  This routine stops the adapter and deallocates all the resources
393  *  that were allocated for driver operation.
394  *
395  *  return 0 on success, positive on failure
396  *********************************************************************/
397 
398 static int
399 ixv_detach(device_t dev)
400 {
401 	struct adapter *adapter = device_get_softc(dev);
402 	struct ix_queue *que = adapter->queues;
403 
404 	INIT_DEBUGOUT("ixv_detach: begin");
405 
406 	/* Make sure VLANS are not using driver */
407 	if (adapter->ifp->if_vlantrunk != NULL) {
408 		device_printf(dev,"Vlan in use, detach first\n");
409 		return (EBUSY);
410 	}
411 
412 	IXGBE_CORE_LOCK(adapter);
413 	ixv_stop(adapter);
414 	IXGBE_CORE_UNLOCK(adapter);
415 
416 	for (int i = 0; i < adapter->num_queues; i++, que++) {
417 		if (que->tq) {
418 			struct tx_ring  *txr = que->txr;
419 			taskqueue_drain(que->tq, &txr->txq_task);
420 			taskqueue_drain(que->tq, &que->que_task);
421 			taskqueue_free(que->tq);
422 		}
423 	}
424 
425 	/* Drain the Mailbox(link) queue */
426 	if (adapter->tq) {
427 		taskqueue_drain(adapter->tq, &adapter->link_task);
428 		taskqueue_free(adapter->tq);
429 	}
430 
431 	/* Unregister VLAN events */
432 	if (adapter->vlan_attach != NULL)
433 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
434 	if (adapter->vlan_detach != NULL)
435 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
436 
437 	ether_ifdetach(adapter->ifp);
438 	callout_drain(&adapter->timer);
439 	ixv_free_pci_resources(adapter);
440 	bus_generic_detach(dev);
441 	if_free(adapter->ifp);
442 
443 	ixgbe_free_transmit_structures(adapter);
444 	ixgbe_free_receive_structures(adapter);
445 
446 	IXGBE_CORE_LOCK_DESTROY(adapter);
447 	return (0);
448 }
449 
450 /*********************************************************************
451  *
452  *  Shutdown entry point
453  *
454  **********************************************************************/
455 static int
456 ixv_shutdown(device_t dev)
457 {
458 	struct adapter *adapter = device_get_softc(dev);
459 	IXGBE_CORE_LOCK(adapter);
460 	ixv_stop(adapter);
461 	IXGBE_CORE_UNLOCK(adapter);
462 	return (0);
463 }
464 
465 
466 /*********************************************************************
467  *  Ioctl entry point
468  *
469  *  ixv_ioctl is called when the user wants to configure the
470  *  interface.
471  *
472  *  return 0 on success, positive on failure
473  **********************************************************************/
474 
475 static int
476 ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
477 {
478 	struct adapter	*adapter = ifp->if_softc;
479 	struct ifreq	*ifr = (struct ifreq *) data;
480 #if defined(INET) || defined(INET6)
481 	struct ifaddr	*ifa = (struct ifaddr *) data;
482 	bool		avoid_reset = FALSE;
483 #endif
484 	int             error = 0;
485 
486 	switch (command) {
487 
488 	case SIOCSIFADDR:
489 #ifdef INET
490 		if (ifa->ifa_addr->sa_family == AF_INET)
491 			avoid_reset = TRUE;
492 #endif
493 #ifdef INET6
494 		if (ifa->ifa_addr->sa_family == AF_INET6)
495 			avoid_reset = TRUE;
496 #endif
497 #if defined(INET) || defined(INET6)
498 		/*
499 		** Calling init results in link renegotiation,
500 		** so we avoid doing it when possible.
501 		*/
502 		if (avoid_reset) {
503 			ifp->if_flags |= IFF_UP;
504 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
505 				ixv_init(adapter);
506 			if (!(ifp->if_flags & IFF_NOARP))
507 				arp_ifinit(ifp, ifa);
508 		} else
509 			error = ether_ioctl(ifp, command, data);
510 		break;
511 #endif
512 	case SIOCSIFMTU:
513 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
514 		if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
515 			error = EINVAL;
516 		} else {
517 			IXGBE_CORE_LOCK(adapter);
518 			ifp->if_mtu = ifr->ifr_mtu;
519 			adapter->max_frame_size =
520 				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
521 			ixv_init_locked(adapter);
522 			IXGBE_CORE_UNLOCK(adapter);
523 		}
524 		break;
525 	case SIOCSIFFLAGS:
526 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
527 		IXGBE_CORE_LOCK(adapter);
528 		if (ifp->if_flags & IFF_UP) {
529 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
530 				ixv_init_locked(adapter);
531 		} else
532 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
533 				ixv_stop(adapter);
534 		adapter->if_flags = ifp->if_flags;
535 		IXGBE_CORE_UNLOCK(adapter);
536 		break;
537 	case SIOCADDMULTI:
538 	case SIOCDELMULTI:
539 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
540 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
541 			IXGBE_CORE_LOCK(adapter);
542 			ixv_disable_intr(adapter);
543 			ixv_set_multi(adapter);
544 			ixv_enable_intr(adapter);
545 			IXGBE_CORE_UNLOCK(adapter);
546 		}
547 		break;
548 	case SIOCSIFMEDIA:
549 	case SIOCGIFMEDIA:
550 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
551 		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
552 		break;
553 	case SIOCSIFCAP:
554 	{
555 		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
556 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
557 		if (mask & IFCAP_HWCSUM)
558 			ifp->if_capenable ^= IFCAP_HWCSUM;
559 		if (mask & IFCAP_TSO4)
560 			ifp->if_capenable ^= IFCAP_TSO4;
561 		if (mask & IFCAP_LRO)
562 			ifp->if_capenable ^= IFCAP_LRO;
563 		if (mask & IFCAP_VLAN_HWTAGGING)
564 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
565 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
566 			IXGBE_CORE_LOCK(adapter);
567 			ixv_init_locked(adapter);
568 			IXGBE_CORE_UNLOCK(adapter);
569 		}
570 		VLAN_CAPABILITIES(ifp);
571 		break;
572 	}
573 
574 	default:
575 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
576 		error = ether_ioctl(ifp, command, data);
577 		break;
578 	}
579 
580 	return (error);
581 }
582 
583 /*********************************************************************
584  *  Init entry point
585  *
586  *  This routine is used in two ways. It is used by the stack as
587  *  init entry point in network interface structure. It is also used
588  *  by the driver as a hw/sw initialization routine to get to a
589  *  consistent state.
590  *
591  *  return 0 on success, positive on failure
592  **********************************************************************/
593 #define IXGBE_MHADD_MFS_SHIFT 16
594 
595 static void
596 ixv_init_locked(struct adapter *adapter)
597 {
598 	struct ifnet	*ifp = adapter->ifp;
599 	device_t 	dev = adapter->dev;
600 	struct ixgbe_hw *hw = &adapter->hw;
601 	u32		mhadd, gpie;
602 
603 	INIT_DEBUGOUT("ixv_init: begin");
604 	mtx_assert(&adapter->core_mtx, MA_OWNED);
605 	hw->adapter_stopped = FALSE;
606 	ixgbe_stop_adapter(hw);
607         callout_stop(&adapter->timer);
608 
609         /* reprogram the RAR[0] in case user changed it. */
610         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
611 
612 	/* Get the latest mac address, User can use a LAA */
613 	bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
614 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
615         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
616 	hw->addr_ctrl.rar_used_count = 1;
617 
618 	/* Prepare transmit descriptors and buffers */
619 	if (ixgbe_setup_transmit_structures(adapter)) {
620 		device_printf(dev,"Could not setup transmit structures\n");
621 		ixv_stop(adapter);
622 		return;
623 	}
624 
625 	ixgbe_reset_hw(hw);
626 	ixv_initialize_transmit_units(adapter);
627 
628 	/* Setup Multicast table */
629 	ixv_set_multi(adapter);
630 
631 	/*
632 	** Determine the correct mbuf pool
633 	** for doing jumbo/headersplit
634 	*/
635 	if (ifp->if_mtu > ETHERMTU)
636 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
637 	else
638 		adapter->rx_mbuf_sz = MCLBYTES;
639 
640 	/* Prepare receive descriptors and buffers */
641 	if (ixgbe_setup_receive_structures(adapter)) {
642 		device_printf(dev,"Could not setup receive structures\n");
643 		ixv_stop(adapter);
644 		return;
645 	}
646 
647 	/* Configure RX settings */
648 	ixv_initialize_receive_units(adapter);
649 
650 	/* Enable Enhanced MSIX mode */
651 	gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
652 	gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
653 	gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
654         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
655 
656 	/* Set the various hardware offload abilities */
657 	ifp->if_hwassist = 0;
658 	if (ifp->if_capenable & IFCAP_TSO4)
659 		ifp->if_hwassist |= CSUM_TSO;
660 	if (ifp->if_capenable & IFCAP_TXCSUM) {
661 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
662 #if __FreeBSD_version >= 800000
663 		ifp->if_hwassist |= CSUM_SCTP;
664 #endif
665 	}
666 
667 	/* Set MTU size */
668 	if (ifp->if_mtu > ETHERMTU) {
669 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
670 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
671 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
672 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
673 	}
674 
675 	/* Set up VLAN offload and filter */
676 	ixv_setup_vlan_support(adapter);
677 
678 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
679 
680 	/* Set up MSI/X routing */
681 	ixv_configure_ivars(adapter);
682 
683 	/* Set up auto-mask */
684 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
685 
686         /* Set moderation on the Link interrupt */
687         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
688 
689 	/* Stats init */
690 	ixv_init_stats(adapter);
691 
692 	/* Config/Enable Link */
693 	ixv_config_link(adapter);
694 
695 	/* And now turn on interrupts */
696 	ixv_enable_intr(adapter);
697 
698 	/* Now inform the stack we're ready */
699 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
700 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
701 
702 	return;
703 }
704 
705 static void
706 ixv_init(void *arg)
707 {
708 	struct adapter *adapter = arg;
709 
710 	IXGBE_CORE_LOCK(adapter);
711 	ixv_init_locked(adapter);
712 	IXGBE_CORE_UNLOCK(adapter);
713 	return;
714 }
715 
716 
717 /*
718 **
719 ** MSIX Interrupt Handlers and Tasklets
720 **
721 */
722 
723 static inline void
724 ixv_enable_queue(struct adapter *adapter, u32 vector)
725 {
726 	struct ixgbe_hw *hw = &adapter->hw;
727 	u32	queue = 1 << vector;
728 	u32	mask;
729 
730 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
731 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
732 }
733 
734 static inline void
735 ixv_disable_queue(struct adapter *adapter, u32 vector)
736 {
737 	struct ixgbe_hw *hw = &adapter->hw;
738 	u64	queue = (u64)(1 << vector);
739 	u32	mask;
740 
741 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
742 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
743 }
744 
745 static inline void
746 ixv_rearm_queues(struct adapter *adapter, u64 queues)
747 {
748 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
749 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
750 }
751 
752 
753 static void
754 ixv_handle_que(void *context, int pending)
755 {
756 	struct ix_queue *que = context;
757 	struct adapter  *adapter = que->adapter;
758 	struct tx_ring	*txr = que->txr;
759 	struct ifnet    *ifp = adapter->ifp;
760 	bool		more;
761 
762 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
763 		more = ixgbe_rxeof(que);
764 		IXGBE_TX_LOCK(txr);
765 		ixgbe_txeof(txr);
766 #if __FreeBSD_version >= 800000
767 		if (!drbr_empty(ifp, txr->br))
768 			ixgbe_mq_start_locked(ifp, txr);
769 #else
770 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
771 			ixgbe_start_locked(txr, ifp);
772 #endif
773 		IXGBE_TX_UNLOCK(txr);
774 		if (more) {
775 			taskqueue_enqueue(que->tq, &que->que_task);
776 			return;
777 		}
778 	}
779 
780 	/* Reenable this interrupt */
781 	ixv_enable_queue(adapter, que->msix);
782 	return;
783 }
784 
785 /*********************************************************************
786  *
787  *  MSI Queue Interrupt Service routine
788  *
789  **********************************************************************/
790 void
791 ixv_msix_que(void *arg)
792 {
793 	struct ix_queue	*que = arg;
794 	struct adapter  *adapter = que->adapter;
795 	struct ifnet    *ifp = adapter->ifp;
796 	struct tx_ring	*txr = que->txr;
797 	struct rx_ring	*rxr = que->rxr;
798 	bool		more;
799 	u32		newitr = 0;
800 
801 	ixv_disable_queue(adapter, que->msix);
802 	++que->irqs;
803 
804 	more = ixgbe_rxeof(que);
805 
806 	IXGBE_TX_LOCK(txr);
807 	ixgbe_txeof(txr);
808 	/*
809 	** Make certain that if the stack
810 	** has anything queued the task gets
811 	** scheduled to handle it.
812 	*/
813 #ifdef IXGBE_LEGACY_TX
814 	if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
815 		ixgbe_start_locked(txr, ifp);
816 #else
817 	if (!drbr_empty(adapter->ifp, txr->br))
818 		ixgbe_mq_start_locked(ifp, txr);
819 #endif
820 	IXGBE_TX_UNLOCK(txr);
821 
822 	/* Do AIM now? */
823 
824 	if (ixv_enable_aim == FALSE)
825 		goto no_calc;
826 	/*
827 	** Do Adaptive Interrupt Moderation:
828         **  - Write out last calculated setting
829 	**  - Calculate based on average size over
830 	**    the last interval.
831 	*/
832         if (que->eitr_setting)
833                 IXGBE_WRITE_REG(&adapter->hw,
834                     IXGBE_VTEITR(que->msix),
835 		    que->eitr_setting);
836 
837         que->eitr_setting = 0;
838 
839         /* Idle, do nothing */
840         if ((txr->bytes == 0) && (rxr->bytes == 0))
841                 goto no_calc;
842 
843 	if ((txr->bytes) && (txr->packets))
844                	newitr = txr->bytes/txr->packets;
845 	if ((rxr->bytes) && (rxr->packets))
846 		newitr = max(newitr,
847 		    (rxr->bytes / rxr->packets));
848 	newitr += 24; /* account for hardware frame, crc */
849 
850 	/* set an upper boundary */
851 	newitr = min(newitr, 3000);
852 
853 	/* Be nice to the mid range */
854 	if ((newitr > 300) && (newitr < 1200))
855 		newitr = (newitr / 3);
856 	else
857 		newitr = (newitr / 2);
858 
859 	newitr |= newitr << 16;
860 
861         /* save for next interrupt */
862         que->eitr_setting = newitr;
863 
864         /* Reset state */
865         txr->bytes = 0;
866         txr->packets = 0;
867         rxr->bytes = 0;
868         rxr->packets = 0;
869 
870 no_calc:
871 	if (more)
872 		taskqueue_enqueue(que->tq, &que->que_task);
873 	else /* Reenable this interrupt */
874 		ixv_enable_queue(adapter, que->msix);
875 	return;
876 }
877 
878 static void
879 ixv_msix_mbx(void *arg)
880 {
881 	struct adapter	*adapter = arg;
882 	struct ixgbe_hw *hw = &adapter->hw;
883 	u32		reg;
884 
885 	++adapter->vector_irq;
886 
887 	/* First get the cause */
888 	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
889 	/* Clear interrupt with write */
890 	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
891 
892 	/* Link status change */
893 	if (reg & IXGBE_EICR_LSC)
894 		taskqueue_enqueue(adapter->tq, &adapter->link_task);
895 
896 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
897 	return;
898 }
899 
900 /*********************************************************************
901  *
902  *  Media Ioctl callback
903  *
904  *  This routine is called whenever the user queries the status of
905  *  the interface using ifconfig.
906  *
907  **********************************************************************/
908 static void
909 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
910 {
911 	struct adapter *adapter = ifp->if_softc;
912 
913 	INIT_DEBUGOUT("ixv_media_status: begin");
914 	IXGBE_CORE_LOCK(adapter);
915 	ixv_update_link_status(adapter);
916 
917 	ifmr->ifm_status = IFM_AVALID;
918 	ifmr->ifm_active = IFM_ETHER;
919 
920 	if (!adapter->link_active) {
921 		IXGBE_CORE_UNLOCK(adapter);
922 		return;
923 	}
924 
925 	ifmr->ifm_status |= IFM_ACTIVE;
926 
927 	switch (adapter->link_speed) {
928 		case IXGBE_LINK_SPEED_1GB_FULL:
929 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
930 			break;
931 		case IXGBE_LINK_SPEED_10GB_FULL:
932 			ifmr->ifm_active |= IFM_FDX;
933 			break;
934 	}
935 
936 	IXGBE_CORE_UNLOCK(adapter);
937 
938 	return;
939 }
940 
941 /*********************************************************************
942  *
943  *  Media Ioctl callback
944  *
945  *  This routine is called when the user changes speed/duplex using
946  *  media/mediopt option with ifconfig.
947  *
948  **********************************************************************/
949 static int
950 ixv_media_change(struct ifnet * ifp)
951 {
952 	struct adapter *adapter = ifp->if_softc;
953 	struct ifmedia *ifm = &adapter->media;
954 
955 	INIT_DEBUGOUT("ixv_media_change: begin");
956 
957 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
958 		return (EINVAL);
959 
960         switch (IFM_SUBTYPE(ifm->ifm_media)) {
961         case IFM_AUTO:
962                 break;
963         default:
964                 device_printf(adapter->dev, "Only auto media type\n");
965 		return (EINVAL);
966         }
967 
968 	return (0);
969 }
970 
971 
972 /*********************************************************************
973  *  Multicast Update
974  *
975  *  This routine is called whenever multicast address list is updated.
976  *
977  **********************************************************************/
978 #define IXGBE_RAR_ENTRIES 16
979 
980 static void
981 ixv_set_multi(struct adapter *adapter)
982 {
983 	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
984 	u8	*update_ptr;
985 	struct	ifmultiaddr *ifma;
986 	int	mcnt = 0;
987 	struct ifnet   *ifp = adapter->ifp;
988 
989 	IOCTL_DEBUGOUT("ixv_set_multi: begin");
990 
991 #if __FreeBSD_version < 800000
992 	IF_ADDR_LOCK(ifp);
993 #else
994 	if_maddr_rlock(ifp);
995 #endif
996 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
997 		if (ifma->ifma_addr->sa_family != AF_LINK)
998 			continue;
999 		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1000 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1001 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
1002 		mcnt++;
1003 	}
1004 #if __FreeBSD_version < 800000
1005 	IF_ADDR_UNLOCK(ifp);
1006 #else
1007 	if_maddr_runlock(ifp);
1008 #endif
1009 
1010 	update_ptr = mta;
1011 
1012 	ixgbe_update_mc_addr_list(&adapter->hw,
1013 	    update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1014 
1015 	return;
1016 }
1017 
1018 /*
1019  * This is an iterator function now needed by the multicast
1020  * shared code. It simply feeds the shared code routine the
1021  * addresses in the array of ixv_set_multi() one by one.
1022  */
1023 static u8 *
1024 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1025 {
1026 	u8 *addr = *update_ptr;
1027 	u8 *newptr;
1028 	*vmdq = 0;
1029 
1030 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1031 	*update_ptr = newptr;
1032 	return addr;
1033 }
1034 
1035 /*********************************************************************
1036  *  Timer routine
1037  *
1038  *  This routine checks for link status,updates statistics,
1039  *  and runs the watchdog check.
1040  *
1041  **********************************************************************/
1042 
1043 static void
1044 ixv_local_timer(void *arg)
1045 {
1046 	struct adapter	*adapter = arg;
1047 	device_t	dev = adapter->dev;
1048 	struct ix_queue	*que = adapter->queues;
1049 	u64		queues = 0;
1050 	int		hung = 0;
1051 
1052 	mtx_assert(&adapter->core_mtx, MA_OWNED);
1053 
1054 	ixv_update_link_status(adapter);
1055 
1056 	/* Stats Update */
1057 	ixv_update_stats(adapter);
1058 
1059 	/*
1060 	** Check the TX queues status
1061 	**      - mark hung queues so we don't schedule on them
1062 	**      - watchdog only if all queues show hung
1063 	*/
1064 	for (int i = 0; i < adapter->num_queues; i++, que++) {
1065 		/* Keep track of queues with work for soft irq */
1066 		if (que->txr->busy)
1067 			queues |= ((u64)1 << que->me);
1068 		/*
1069 		** Each time txeof runs without cleaning, but there
1070 		** are uncleaned descriptors it increments busy. If
1071 		** we get to the MAX we declare it hung.
1072 		*/
1073 		if (que->busy == IXGBE_QUEUE_HUNG) {
1074 			++hung;
1075 			/* Mark the queue as inactive */
1076 			adapter->active_queues &= ~((u64)1 << que->me);
1077 			continue;
1078 		} else {
1079 			/* Check if we've come back from hung */
1080 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1081                                 adapter->active_queues |= ((u64)1 << que->me);
1082 		}
1083 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
1084 			device_printf(dev,"Warning queue %d "
1085 			    "appears to be hung!\n", i);
1086 			que->txr->busy = IXGBE_QUEUE_HUNG;
1087 			++hung;
1088 		}
1089 
1090 	}
1091 
1092 	/* Only truely watchdog if all queues show hung */
1093 	if (hung == adapter->num_queues)
1094 		goto watchdog;
1095 	else if (queues != 0) { /* Force an IRQ on queues with work */
1096 		ixv_rearm_queues(adapter, queues);
1097 	}
1098 
1099 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1100 	return;
1101 
1102 watchdog:
1103 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1104 	adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1105 	adapter->watchdog_events++;
1106 	ixv_init_locked(adapter);
1107 }
1108 
1109 /*
1110 ** Note: this routine updates the OS on the link state
1111 **	the real check of the hardware only happens with
1112 **	a link interrupt.
1113 */
1114 static void
1115 ixv_update_link_status(struct adapter *adapter)
1116 {
1117 	struct ifnet	*ifp = adapter->ifp;
1118 	device_t dev = adapter->dev;
1119 
1120 	if (adapter->link_up){
1121 		if (adapter->link_active == FALSE) {
1122 			if (bootverbose)
1123 				device_printf(dev,"Link is up %d Gbps %s \n",
1124 				    ((adapter->link_speed == 128)? 10:1),
1125 				    "Full Duplex");
1126 			adapter->link_active = TRUE;
1127 			if_link_state_change(ifp, LINK_STATE_UP);
1128 		}
1129 	} else { /* Link down */
1130 		if (adapter->link_active == TRUE) {
1131 			if (bootverbose)
1132 				device_printf(dev,"Link is Down\n");
1133 			if_link_state_change(ifp, LINK_STATE_DOWN);
1134 			adapter->link_active = FALSE;
1135 		}
1136 	}
1137 
1138 	return;
1139 }
1140 
1141 
1142 /*********************************************************************
1143  *
1144  *  This routine disables all traffic on the adapter by issuing a
1145  *  global reset on the MAC and deallocates TX/RX buffers.
1146  *
1147  **********************************************************************/
1148 
1149 static void
1150 ixv_stop(void *arg)
1151 {
1152 	struct ifnet   *ifp;
1153 	struct adapter *adapter = arg;
1154 	struct ixgbe_hw *hw = &adapter->hw;
1155 	ifp = adapter->ifp;
1156 
1157 	mtx_assert(&adapter->core_mtx, MA_OWNED);
1158 
1159 	INIT_DEBUGOUT("ixv_stop: begin\n");
1160 	ixv_disable_intr(adapter);
1161 
1162 	/* Tell the stack that the interface is no longer active */
1163 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1164 
1165 	ixgbe_reset_hw(hw);
1166 	adapter->hw.adapter_stopped = FALSE;
1167 	ixgbe_stop_adapter(hw);
1168 	callout_stop(&adapter->timer);
1169 
1170 	/* reprogram the RAR[0] in case user changed it. */
1171 	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1172 
1173 	return;
1174 }
1175 
1176 
1177 /*********************************************************************
1178  *
1179  *  Determine hardware revision.
1180  *
1181  **********************************************************************/
1182 static void
1183 ixv_identify_hardware(struct adapter *adapter)
1184 {
1185 	device_t        dev = adapter->dev;
1186 	struct ixgbe_hw *hw = &adapter->hw;
1187 
1188 	/*
1189 	** Make sure BUSMASTER is set, on a VM under
1190 	** KVM it may not be and will break things.
1191 	*/
1192 	pci_enable_busmaster(dev);
1193 
1194 	/* Save off the information about this board */
1195 	hw->vendor_id = pci_get_vendor(dev);
1196 	hw->device_id = pci_get_device(dev);
1197 	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
1198 	hw->subsystem_vendor_id =
1199 	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
1200 	hw->subsystem_device_id =
1201 	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
1202 
1203 	/* We need this to determine device-specific things */
1204 	ixgbe_set_mac_type(hw);
1205 
1206 	/* Set the right number of segments */
1207 	adapter->num_segs = IXGBE_82599_SCATTER;
1208 
1209 	return;
1210 }
1211 
1212 /*********************************************************************
1213  *
1214  *  Setup MSIX Interrupt resources and handlers
1215  *
1216  **********************************************************************/
1217 static int
1218 ixv_allocate_msix(struct adapter *adapter)
1219 {
1220 	device_t	dev = adapter->dev;
1221 	struct 		ix_queue *que = adapter->queues;
1222 	struct		tx_ring *txr = adapter->tx_rings;
1223 	int 		error, rid, vector = 0;
1224 
1225 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
1226 		rid = vector + 1;
1227 		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1228 		    RF_SHAREABLE | RF_ACTIVE);
1229 		if (que->res == NULL) {
1230 			device_printf(dev,"Unable to allocate"
1231 		    	    " bus resource: que interrupt [%d]\n", vector);
1232 			return (ENXIO);
1233 		}
1234 		/* Set the handler function */
1235 		error = bus_setup_intr(dev, que->res,
1236 		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1237 		    ixv_msix_que, que, &que->tag);
1238 		if (error) {
1239 			que->res = NULL;
1240 			device_printf(dev, "Failed to register QUE handler");
1241 			return (error);
1242 		}
1243 #if __FreeBSD_version >= 800504
1244 		bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1245 #endif
1246 		que->msix = vector;
1247         	adapter->active_queues |= (u64)(1 << que->msix);
1248 		/*
1249 		** Bind the msix vector, and thus the
1250 		** ring to the corresponding cpu.
1251 		*/
1252 		if (adapter->num_queues > 1)
1253 			bus_bind_intr(dev, que->res, i);
1254 		TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
1255 		TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
1256 		que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
1257 		    taskqueue_thread_enqueue, &que->tq);
1258 		taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1259 		    device_get_nameunit(adapter->dev));
1260 	}
1261 
1262 	/* and Mailbox */
1263 	rid = vector + 1;
1264 	adapter->res = bus_alloc_resource_any(dev,
1265     	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1266 	if (!adapter->res) {
1267 		device_printf(dev,"Unable to allocate"
1268     	    " bus resource: MBX interrupt [%d]\n", rid);
1269 		return (ENXIO);
1270 	}
1271 	/* Set the mbx handler function */
1272 	error = bus_setup_intr(dev, adapter->res,
1273 	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1274 	    ixv_msix_mbx, adapter, &adapter->tag);
1275 	if (error) {
1276 		adapter->res = NULL;
1277 		device_printf(dev, "Failed to register LINK handler");
1278 		return (error);
1279 	}
1280 #if __FreeBSD_version >= 800504
1281 	bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
1282 #endif
1283 	adapter->vector = vector;
1284 	/* Tasklets for Mailbox */
1285 	TASK_INIT(&adapter->link_task, 0, ixv_handle_mbx, adapter);
1286 	adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
1287 	    taskqueue_thread_enqueue, &adapter->tq);
1288 	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
1289 	    device_get_nameunit(adapter->dev));
1290 	/*
1291 	** Due to a broken design QEMU will fail to properly
1292 	** enable the guest for MSIX unless the vectors in
1293 	** the table are all set up, so we must rewrite the
1294 	** ENABLE in the MSIX control register again at this
1295 	** point to cause it to successfully initialize us.
1296 	*/
1297 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1298 		int msix_ctrl;
1299 		pci_find_cap(dev, PCIY_MSIX, &rid);
1300 		rid += PCIR_MSIX_CTRL;
1301 		msix_ctrl = pci_read_config(dev, rid, 2);
1302 		msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1303 		pci_write_config(dev, rid, msix_ctrl, 2);
1304 	}
1305 
1306 	return (0);
1307 }
1308 
1309 /*
1310  * Setup MSIX resources, note that the VF
1311  * device MUST use MSIX, there is no fallback.
1312  */
1313 static int
1314 ixv_setup_msix(struct adapter *adapter)
1315 {
1316 	device_t dev = adapter->dev;
1317 	int rid, want;
1318 
1319 
1320 	/* First try MSI/X */
1321 	rid = PCIR_BAR(3);
1322 	adapter->msix_mem = bus_alloc_resource_any(dev,
1323 	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
1324        	if (adapter->msix_mem == NULL) {
1325 		device_printf(adapter->dev,
1326 		    "Unable to map MSIX table \n");
1327 		goto out;
1328 	}
1329 
1330 	/*
1331 	** Want two vectors: one for a queue,
1332 	** plus an additional for mailbox.
1333 	*/
1334 	want = 2;
1335 	if ((pci_alloc_msix(dev, &want) == 0) && (want == 2)) {
1336                	device_printf(adapter->dev,
1337 		    "Using MSIX interrupts with %d vectors\n", want);
1338 		return (want);
1339 	}
1340 	/* Release in case alloc was insufficient */
1341 	pci_release_msi(dev);
1342 out:
1343        	if (adapter->msix_mem != NULL) {
1344 		bus_release_resource(dev, SYS_RES_MEMORY,
1345 		    rid, adapter->msix_mem);
1346 		adapter->msix_mem = NULL;
1347 	}
1348 	device_printf(adapter->dev,"MSIX config error\n");
1349 	return (ENXIO);
1350 }
1351 
1352 
1353 static int
1354 ixv_allocate_pci_resources(struct adapter *adapter)
1355 {
1356 	int             rid;
1357 	device_t        dev = adapter->dev;
1358 
1359 	rid = PCIR_BAR(0);
1360 	adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1361 	    &rid, RF_ACTIVE);
1362 
1363 	if (!(adapter->pci_mem)) {
1364 		device_printf(dev,"Unable to allocate bus resource: memory\n");
1365 		return (ENXIO);
1366 	}
1367 
1368 	adapter->osdep.mem_bus_space_tag =
1369 		rman_get_bustag(adapter->pci_mem);
1370 	adapter->osdep.mem_bus_space_handle =
1371 		rman_get_bushandle(adapter->pci_mem);
1372 	adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
1373 
1374 	adapter->num_queues = 1;
1375 	adapter->hw.back = &adapter->osdep;
1376 
1377 	/*
1378 	** Now setup MSI/X, should
1379 	** return us the number of
1380 	** configured vectors.
1381 	*/
1382 	adapter->msix = ixv_setup_msix(adapter);
1383 	if (adapter->msix == ENXIO)
1384 		return (ENXIO);
1385 	else
1386 		return (0);
1387 }
1388 
1389 static void
1390 ixv_free_pci_resources(struct adapter * adapter)
1391 {
1392 	struct 		ix_queue *que = adapter->queues;
1393 	device_t	dev = adapter->dev;
1394 	int		rid, memrid;
1395 
1396 	memrid = PCIR_BAR(MSIX_82598_BAR);
1397 
1398 	/*
1399 	** There is a slight possibility of a failure mode
1400 	** in attach that will result in entering this function
1401 	** before interrupt resources have been initialized, and
1402 	** in that case we do not want to execute the loops below
1403 	** We can detect this reliably by the state of the adapter
1404 	** res pointer.
1405 	*/
1406 	if (adapter->res == NULL)
1407 		goto mem;
1408 
1409 	/*
1410 	**  Release all msix queue resources:
1411 	*/
1412 	for (int i = 0; i < adapter->num_queues; i++, que++) {
1413 		rid = que->msix + 1;
1414 		if (que->tag != NULL) {
1415 			bus_teardown_intr(dev, que->res, que->tag);
1416 			que->tag = NULL;
1417 		}
1418 		if (que->res != NULL)
1419 			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1420 	}
1421 
1422 
1423 	/* Clean the Legacy or Link interrupt last */
1424 	if (adapter->vector) /* we are doing MSIX */
1425 		rid = adapter->vector + 1;
1426 	else
1427 		(adapter->msix != 0) ? (rid = 1):(rid = 0);
1428 
1429 	if (adapter->tag != NULL) {
1430 		bus_teardown_intr(dev, adapter->res, adapter->tag);
1431 		adapter->tag = NULL;
1432 	}
1433 	if (adapter->res != NULL)
1434 		bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
1435 
1436 mem:
1437 	if (adapter->msix)
1438 		pci_release_msi(dev);
1439 
1440 	if (adapter->msix_mem != NULL)
1441 		bus_release_resource(dev, SYS_RES_MEMORY,
1442 		    memrid, adapter->msix_mem);
1443 
1444 	if (adapter->pci_mem != NULL)
1445 		bus_release_resource(dev, SYS_RES_MEMORY,
1446 		    PCIR_BAR(0), adapter->pci_mem);
1447 
1448 	return;
1449 }
1450 
1451 /*********************************************************************
1452  *
1453  *  Setup networking device structure and register an interface.
1454  *
1455  **********************************************************************/
1456 static void
1457 ixv_setup_interface(device_t dev, struct adapter *adapter)
1458 {
1459 	struct ifnet   *ifp;
1460 
1461 	INIT_DEBUGOUT("ixv_setup_interface: begin");
1462 
1463 	ifp = adapter->ifp = if_alloc(IFT_ETHER);
1464 	if (ifp == NULL)
1465 		panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1466 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1467 	ifp->if_baudrate = 1000000000;
1468 	ifp->if_init = ixv_init;
1469 	ifp->if_softc = adapter;
1470 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1471 	ifp->if_ioctl = ixv_ioctl;
1472 #if __FreeBSD_version >= 800000
1473 	ifp->if_transmit = ixgbe_mq_start;
1474 	ifp->if_qflush = ixgbe_qflush;
1475 #else
1476 	ifp->if_start = ixgbe_start;
1477 #endif
1478 	ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
1479 
1480 	ether_ifattach(ifp, adapter->hw.mac.addr);
1481 
1482 	adapter->max_frame_size =
1483 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1484 
1485 	/*
1486 	 * Tell the upper layer(s) we support long frames.
1487 	 */
1488 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1489 
1490 	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
1491 	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1492 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1493 			     |  IFCAP_VLAN_HWTSO
1494 			     |  IFCAP_VLAN_MTU;
1495 	ifp->if_capabilities |= IFCAP_LRO;
1496 	ifp->if_capenable = ifp->if_capabilities;
1497 
1498 	/*
1499 	 * Specify the media types supported by this adapter and register
1500 	 * callbacks to update media and link information
1501 	 */
1502 	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1503 		     ixv_media_status);
1504 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
1505 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1506 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1507 
1508 	return;
1509 }
1510 
1511 static void
1512 ixv_config_link(struct adapter *adapter)
1513 {
1514 	struct ixgbe_hw *hw = &adapter->hw;
1515 	u32	autoneg, err = 0;
1516 
1517 	if (hw->mac.ops.check_link)
1518 		err = hw->mac.ops.check_link(hw, &autoneg,
1519 		    &adapter->link_up, FALSE);
1520 	if (err)
1521 		goto out;
1522 
1523 	if (hw->mac.ops.setup_link)
1524                	err = hw->mac.ops.setup_link(hw,
1525 		    autoneg, adapter->link_up);
1526 out:
1527 	return;
1528 }
1529 
1530 
1531 /*********************************************************************
1532  *
1533  *  Enable transmit unit.
1534  *
1535  **********************************************************************/
1536 static void
1537 ixv_initialize_transmit_units(struct adapter *adapter)
1538 {
1539 	struct tx_ring	*txr = adapter->tx_rings;
1540 	struct ixgbe_hw	*hw = &adapter->hw;
1541 
1542 
1543 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
1544 		u64	tdba = txr->txdma.dma_paddr;
1545 		u32	txctrl, txdctl;
1546 
1547 		/* Set WTHRESH to 8, burst writeback */
1548 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1549 		txdctl |= (8 << 16);
1550 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1551 
1552 		/* Set the HW Tx Head and Tail indices */
1553 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
1554 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
1555 
1556 		/* Set Tx Tail register */
1557 		txr->tail = IXGBE_VFTDT(i);
1558 
1559 		/* Set the processing limit */
1560 		txr->process_limit = ixv_tx_process_limit;
1561 
1562 		/* Set Ring parameters */
1563 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
1564 		       (tdba & 0x00000000ffffffffULL));
1565 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
1566 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
1567 		    adapter->num_tx_desc *
1568 		    sizeof(struct ixgbe_legacy_tx_desc));
1569 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
1570 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1571 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
1572 
1573 		/* Now enable */
1574 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1575 		txdctl |= IXGBE_TXDCTL_ENABLE;
1576 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1577 	}
1578 
1579 	return;
1580 }
1581 
1582 
1583 /*********************************************************************
1584  *
1585  *  Setup receive registers and features.
1586  *
1587  **********************************************************************/
1588 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1589 
1590 static void
1591 ixv_initialize_receive_units(struct adapter *adapter)
1592 {
1593 	struct	rx_ring	*rxr = adapter->rx_rings;
1594 	struct ixgbe_hw	*hw = &adapter->hw;
1595 	struct ifnet   *ifp = adapter->ifp;
1596 	u32		bufsz, fctrl, rxcsum, hlreg;
1597 
1598 
1599 	/* Enable broadcasts */
1600 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1601 	fctrl |= IXGBE_FCTRL_BAM;
1602 	fctrl |= IXGBE_FCTRL_DPF;
1603 	fctrl |= IXGBE_FCTRL_PMCF;
1604 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1605 
1606 	/* Set for Jumbo Frames? */
1607 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1608 	if (ifp->if_mtu > ETHERMTU) {
1609 		hlreg |= IXGBE_HLREG0_JUMBOEN;
1610 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1611 	} else {
1612 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
1613 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1614 	}
1615 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
1616 
1617 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1618 		u64 rdba = rxr->rxdma.dma_paddr;
1619 		u32 reg, rxdctl;
1620 
1621 		/* Setup the Base and Length of the Rx Descriptor Ring */
1622 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
1623 		    (rdba & 0x00000000ffffffffULL));
1624 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
1625 		    (rdba >> 32));
1626 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
1627 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1628 
1629 		/* Set up the SRRCTL register */
1630 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
1631 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1632 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1633 		reg |= bufsz;
1634 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1635 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
1636 
1637 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
1638 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1639 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1640 		    adapter->num_rx_desc - 1);
1641 		/* Set the processing limit */
1642 		rxr->process_limit = ixv_rx_process_limit;
1643 
1644 		/* Set Rx Tail register */
1645 		rxr->tail = IXGBE_VFRDT(rxr->me);
1646 
1647 		/* Do the queue enabling last */
1648 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1649 		rxdctl |= IXGBE_RXDCTL_ENABLE;
1650 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1651 		for (int k = 0; k < 10; k++) {
1652 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1653 			    IXGBE_RXDCTL_ENABLE)
1654 				break;
1655 			else
1656 				msec_delay(1);
1657 		}
1658 		wmb();
1659 	}
1660 
1661 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1662 
1663 	if (ifp->if_capenable & IFCAP_RXCSUM)
1664 		rxcsum |= IXGBE_RXCSUM_PCSD;
1665 
1666 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
1667 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
1668 
1669 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1670 
1671 	return;
1672 }
1673 
1674 static void
1675 ixv_setup_vlan_support(struct adapter *adapter)
1676 {
1677 	struct ixgbe_hw *hw = &adapter->hw;
1678 	u32		ctrl, vid, vfta, retry;
1679 
1680 
1681 	/*
1682 	** We get here thru init_locked, meaning
1683 	** a soft reset, this has already cleared
1684 	** the VFTA and other state, so if there
1685 	** have been no vlan's registered do nothing.
1686 	*/
1687 	if (adapter->num_vlans == 0)
1688 		return;
1689 
1690 	/* Enable the queues */
1691 	for (int i = 0; i < adapter->num_queues; i++) {
1692 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1693 		ctrl |= IXGBE_RXDCTL_VME;
1694 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1695 	}
1696 
1697 	/*
1698 	** A soft reset zero's out the VFTA, so
1699 	** we need to repopulate it now.
1700 	*/
1701 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1702 		if (ixv_shadow_vfta[i] == 0)
1703 			continue;
1704 		vfta = ixv_shadow_vfta[i];
1705 		/*
1706 		** Reconstruct the vlan id's
1707 		** based on the bits set in each
1708 		** of the array ints.
1709 		*/
1710 		for ( int j = 0; j < 32; j++) {
1711 			retry = 0;
1712 			if ((vfta & (1 << j)) == 0)
1713 				continue;
1714 			vid = (i * 32) + j;
1715 			/* Call the shared code mailbox routine */
1716 			while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
1717 				if (++retry > 5)
1718 					break;
1719 			}
1720 		}
1721 	}
1722 }
1723 
1724 /*
1725 ** This routine is run via an vlan config EVENT,
1726 ** it enables us to use the HW Filter table since
1727 ** we can get the vlan id. This just creates the
1728 ** entry in the soft version of the VFTA, init will
1729 ** repopulate the real table.
1730 */
1731 static void
1732 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1733 {
1734 	struct adapter	*adapter = ifp->if_softc;
1735 	u16		index, bit;
1736 
1737 	if (ifp->if_softc !=  arg)   /* Not our event */
1738 		return;
1739 
1740 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1741 		return;
1742 
1743 	IXGBE_CORE_LOCK(adapter);
1744 	index = (vtag >> 5) & 0x7F;
1745 	bit = vtag & 0x1F;
1746 	ixv_shadow_vfta[index] |= (1 << bit);
1747 	++adapter->num_vlans;
1748 	/* Re-init to load the changes */
1749 	ixv_init_locked(adapter);
1750 	IXGBE_CORE_UNLOCK(adapter);
1751 }
1752 
1753 /*
1754 ** This routine is run via an vlan
1755 ** unconfig EVENT, remove our entry
1756 ** in the soft vfta.
1757 */
1758 static void
1759 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1760 {
1761 	struct adapter	*adapter = ifp->if_softc;
1762 	u16		index, bit;
1763 
1764 	if (ifp->if_softc !=  arg)
1765 		return;
1766 
1767 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1768 		return;
1769 
1770 	IXGBE_CORE_LOCK(adapter);
1771 	index = (vtag >> 5) & 0x7F;
1772 	bit = vtag & 0x1F;
1773 	ixv_shadow_vfta[index] &= ~(1 << bit);
1774 	--adapter->num_vlans;
1775 	/* Re-init to load the changes */
1776 	ixv_init_locked(adapter);
1777 	IXGBE_CORE_UNLOCK(adapter);
1778 }
1779 
1780 static void
1781 ixv_enable_intr(struct adapter *adapter)
1782 {
1783 	struct ixgbe_hw *hw = &adapter->hw;
1784 	struct ix_queue *que = adapter->queues;
1785 	u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1786 
1787 
1788 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1789 
1790 	mask = IXGBE_EIMS_ENABLE_MASK;
1791 	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1792 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1793 
1794         for (int i = 0; i < adapter->num_queues; i++, que++)
1795 		ixv_enable_queue(adapter, que->msix);
1796 
1797 	IXGBE_WRITE_FLUSH(hw);
1798 
1799 	return;
1800 }
1801 
1802 static void
1803 ixv_disable_intr(struct adapter *adapter)
1804 {
1805 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
1806 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
1807 	IXGBE_WRITE_FLUSH(&adapter->hw);
1808 	return;
1809 }
1810 
1811 /*
1812 ** Setup the correct IVAR register for a particular MSIX interrupt
1813 **  - entry is the register array entry
1814 **  - vector is the MSIX vector for this queue
1815 **  - type is RX/TX/MISC
1816 */
1817 static void
1818 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
1819 {
1820 	struct ixgbe_hw *hw = &adapter->hw;
1821 	u32 ivar, index;
1822 
1823 	vector |= IXGBE_IVAR_ALLOC_VAL;
1824 
1825 	if (type == -1) { /* MISC IVAR */
1826 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1827 		ivar &= ~0xFF;
1828 		ivar |= vector;
1829 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1830 	} else {	/* RX/TX IVARS */
1831 		index = (16 * (entry & 1)) + (8 * type);
1832 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1833 		ivar &= ~(0xFF << index);
1834 		ivar |= (vector << index);
1835 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1836 	}
1837 }
1838 
1839 static void
1840 ixv_configure_ivars(struct adapter *adapter)
1841 {
1842 	struct  ix_queue *que = adapter->queues;
1843 
1844         for (int i = 0; i < adapter->num_queues; i++, que++) {
1845 		/* First the RX queue entry */
1846                 ixv_set_ivar(adapter, i, que->msix, 0);
1847 		/* ... and the TX */
1848 		ixv_set_ivar(adapter, i, que->msix, 1);
1849 		/* Set an initial value in EITR */
1850                 IXGBE_WRITE_REG(&adapter->hw,
1851                     IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
1852 	}
1853 
1854 	/* For the mailbox interrupt */
1855         ixv_set_ivar(adapter, 1, adapter->vector, -1);
1856 }
1857 
1858 
1859 /*
1860 ** Tasklet handler for MSIX MBX interrupts
1861 **  - do outside interrupt since it might sleep
1862 */
1863 static void
1864 ixv_handle_mbx(void *context, int pending)
1865 {
1866 	struct adapter  *adapter = context;
1867 
1868 	ixgbe_check_link(&adapter->hw,
1869 	    &adapter->link_speed, &adapter->link_up, 0);
1870 	ixv_update_link_status(adapter);
1871 }
1872 
1873 /*
1874 ** The VF stats registers never have a truely virgin
1875 ** starting point, so this routine tries to make an
1876 ** artificial one, marking ground zero on attach as
1877 ** it were.
1878 */
1879 static void
1880 ixv_save_stats(struct adapter *adapter)
1881 {
1882 	if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) {
1883 		adapter->stats.vf.saved_reset_vfgprc +=
1884 		    adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc;
1885 		adapter->stats.vf.saved_reset_vfgptc +=
1886 		    adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc;
1887 		adapter->stats.vf.saved_reset_vfgorc +=
1888 		    adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc;
1889 		adapter->stats.vf.saved_reset_vfgotc +=
1890 		    adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc;
1891 		adapter->stats.vf.saved_reset_vfmprc +=
1892 		    adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc;
1893 	}
1894 }
1895 
1896 static void
1897 ixv_init_stats(struct adapter *adapter)
1898 {
1899 	struct ixgbe_hw *hw = &adapter->hw;
1900 
1901 	adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1902 	adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1903 	adapter->stats.vf.last_vfgorc |=
1904 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1905 
1906 	adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1907 	adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1908 	adapter->stats.vf.last_vfgotc |=
1909 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1910 
1911 	adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1912 
1913 	adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
1914 	adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
1915 	adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
1916 	adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
1917 	adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
1918 }
1919 
1920 #define UPDATE_STAT_32(reg, last, count)		\
1921 {							\
1922 	u32 current = IXGBE_READ_REG(hw, reg);		\
1923 	if (current < last)				\
1924 		count += 0x100000000LL;			\
1925 	last = current;					\
1926 	count &= 0xFFFFFFFF00000000LL;			\
1927 	count |= current;				\
1928 }
1929 
1930 #define UPDATE_STAT_36(lsb, msb, last, count) 		\
1931 {							\
1932 	u64 cur_lsb = IXGBE_READ_REG(hw, lsb);		\
1933 	u64 cur_msb = IXGBE_READ_REG(hw, msb);		\
1934 	u64 current = ((cur_msb << 32) | cur_lsb);	\
1935 	if (current < last)				\
1936 		count += 0x1000000000LL;		\
1937 	last = current;					\
1938 	count &= 0xFFFFFFF000000000LL;			\
1939 	count |= current;				\
1940 }
1941 
1942 /*
1943 ** ixv_update_stats - Update the board statistics counters.
1944 */
1945 void
1946 ixv_update_stats(struct adapter *adapter)
1947 {
1948         struct ixgbe_hw *hw = &adapter->hw;
1949 
1950         UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
1951 	    adapter->stats.vf.vfgprc);
1952         UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
1953 	    adapter->stats.vf.vfgptc);
1954         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
1955 	    adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
1956         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
1957 	    adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
1958         UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
1959 	    adapter->stats.vf.vfmprc);
1960 }
1961 
1962 /*
1963  * Add statistic sysctls for the VF.
1964  */
1965 static void
1966 ixv_add_stats_sysctls(struct adapter *adapter)
1967 {
1968 	device_t dev = adapter->dev;
1969 	struct ix_queue *que = &adapter->queues[0];
1970 	struct tx_ring *txr = que->txr;
1971 	struct rx_ring *rxr = que->rxr;
1972 
1973 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1974 	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1975 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1976 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1977 
1978 	struct sysctl_oid *stat_node, *queue_node;
1979 	struct sysctl_oid_list *stat_list, *queue_list;
1980 
1981 	/* Driver Statistics */
1982 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1983 			CTLFLAG_RD, &adapter->dropped_pkts,
1984 			"Driver dropped packets");
1985 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
1986 			CTLFLAG_RD, &adapter->mbuf_defrag_failed,
1987 			"m_defrag() failed");
1988 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1989 			CTLFLAG_RD, &adapter->watchdog_events,
1990 			"Watchdog timeouts");
1991 
1992 	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1993 				    CTLFLAG_RD, NULL,
1994 				    "VF Statistics (read from HW registers)");
1995 	stat_list = SYSCTL_CHILDREN(stat_node);
1996 
1997 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1998 			CTLFLAG_RD, &stats->vfgprc,
1999 			"Good Packets Received");
2000 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
2001 			CTLFLAG_RD, &stats->vfgorc,
2002 			"Good Octets Received");
2003 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
2004 			CTLFLAG_RD, &stats->vfmprc,
2005 			"Multicast Packets Received");
2006 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
2007 			CTLFLAG_RD, &stats->vfgptc,
2008 			"Good Packets Transmitted");
2009 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
2010 			CTLFLAG_RD, &stats->vfgotc,
2011 			"Good Octets Transmitted");
2012 
2013 	queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "que",
2014 				    CTLFLAG_RD, NULL,
2015 				    "Queue Statistics (collected by SW)");
2016 	queue_list = SYSCTL_CHILDREN(queue_node);
2017 
2018 	SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2019 			CTLFLAG_RD, &(que->irqs),
2020 			"IRQs on queue");
2021 	SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_irqs",
2022 			CTLFLAG_RD, &(rxr->rx_irq),
2023 			"RX irqs on queue");
2024 	SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2025 			CTLFLAG_RD, &(rxr->rx_packets),
2026 			"RX packets");
2027 	SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2028 			CTLFLAG_RD, &(rxr->rx_bytes),
2029 			"RX bytes");
2030 	SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
2031 			CTLFLAG_RD, &(rxr->rx_discarded),
2032 			"Discarded RX packets");
2033 
2034 	SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2035 			CTLFLAG_RD, &(txr->total_packets),
2036 			"TX Packets");
2037 	SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
2038 			CTLFLAG_RD, &(txr->tx_bytes),
2039 			"TX Bytes");
2040 	SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_no_desc",
2041 			CTLFLAG_RD, &(txr->no_desc_avail),
2042 			"# of times not enough descriptors were available during TX");
2043 }
2044 
2045 /**********************************************************************
2046  *
2047  *  This routine is called only when em_display_debug_stats is enabled.
2048  *  This routine provides a way to take a look at important statistics
2049  *  maintained by the driver and hardware.
2050  *
2051  **********************************************************************/
2052 static void
2053 ixv_print_debug_info(struct adapter *adapter)
2054 {
2055         device_t dev = adapter->dev;
2056         struct ixgbe_hw         *hw = &adapter->hw;
2057         struct ix_queue         *que = adapter->queues;
2058         struct rx_ring          *rxr;
2059         struct tx_ring          *txr;
2060         struct lro_ctrl         *lro;
2061 
2062         device_printf(dev,"Error Byte Count = %u \n",
2063             IXGBE_READ_REG(hw, IXGBE_ERRBC));
2064 
2065         for (int i = 0; i < adapter->num_queues; i++, que++) {
2066                 txr = que->txr;
2067                 rxr = que->rxr;
2068                 lro = &rxr->lro;
2069                 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
2070                     que->msix, (long)que->irqs);
2071                 device_printf(dev,"RX(%d) Packets Received: %lld\n",
2072                     rxr->me, (long long)rxr->rx_packets);
2073                 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
2074                     rxr->me, (long)rxr->rx_bytes);
2075                 device_printf(dev,"RX(%d) LRO Queued= %d\n",
2076                     rxr->me, lro->lro_queued);
2077                 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
2078                     rxr->me, lro->lro_flushed);
2079                 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
2080                     txr->me, (long)txr->total_packets);
2081                 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
2082                     txr->me, (long)txr->no_desc_avail);
2083         }
2084 
2085         device_printf(dev,"MBX IRQ Handled: %lu\n",
2086             (long)adapter->vector_irq);
2087         return;
2088 }
2089 
2090 static int
2091 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
2092 {
2093 	int error, result;
2094 	struct adapter *adapter;
2095 
2096 	result = -1;
2097 	error = sysctl_handle_int(oidp, &result, 0, req);
2098 
2099 	if (error || !req->newptr)
2100 		return (error);
2101 
2102 	if (result == 1) {
2103 		adapter = (struct adapter *) arg1;
2104 		ixv_print_debug_info(adapter);
2105 	}
2106 	return error;
2107 }
2108 
2109