1 /******************************************************************************
2 
3   Copyright (c) 2001-2013, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD: head/sys/dev/ixgbe/ixv.c 275358 2014-12-01 11:45:24Z hselasky $*/
34 /*$NetBSD: ixv.c,v 1.17 2016/06/10 13:27:15 ozaki-r Exp $*/
35 
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 
39 #include "ixv.h"
40 #include "vlan.h"
41 
42 /*********************************************************************
43  *  Driver version
44  *********************************************************************/
45 char ixv_driver_version[] = "1.1.4";
46 
47 /*********************************************************************
48  *  PCI Device ID Table
49  *
50  *  Used by probe to select devices to load on
51  *  Last field stores an index into ixv_strings
52  *  Last entry must be all 0s
53  *
54  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
55  *********************************************************************/
56 
57 static ixv_vendor_info_t ixv_vendor_info_array[] =
58 {
59 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
60 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
61 	/* required last entry */
62 	{0, 0, 0, 0, 0}
63 };
64 
65 /*********************************************************************
66  *  Table of branding strings
67  *********************************************************************/
68 
69 static const char    *ixv_strings[] = {
70 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
71 };
72 
73 /*********************************************************************
74  *  Function prototypes
75  *********************************************************************/
76 static int      ixv_probe(device_t, cfdata_t, void *);
77 static void      ixv_attach(device_t, device_t, void *);
78 static int      ixv_detach(device_t, int);
79 #if 0
80 static int      ixv_shutdown(device_t);
81 #endif
82 #if __FreeBSD_version < 800000
83 static void     ixv_start(struct ifnet *);
84 static void     ixv_start_locked(struct tx_ring *, struct ifnet *);
85 #else
86 static int	ixv_mq_start(struct ifnet *, struct mbuf *);
87 static int	ixv_mq_start_locked(struct ifnet *,
88 		    struct tx_ring *, struct mbuf *);
89 static void	ixv_qflush(struct ifnet *);
90 #endif
91 static int      ixv_ioctl(struct ifnet *, u_long, void *);
92 static int	ixv_init(struct ifnet *);
93 static void	ixv_init_locked(struct adapter *);
94 static void     ixv_stop(void *);
95 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
96 static int      ixv_media_change(struct ifnet *);
97 static void     ixv_identify_hardware(struct adapter *);
98 static int      ixv_allocate_pci_resources(struct adapter *,
99 		    const struct pci_attach_args *);
100 static int      ixv_allocate_msix(struct adapter *,
101 		    const struct pci_attach_args *);
102 static int	ixv_allocate_queues(struct adapter *);
103 static int	ixv_setup_msix(struct adapter *);
104 static void	ixv_free_pci_resources(struct adapter *);
105 static void     ixv_local_timer(void *);
106 static void     ixv_setup_interface(device_t, struct adapter *);
107 static void     ixv_config_link(struct adapter *);
108 
109 static int      ixv_allocate_transmit_buffers(struct tx_ring *);
110 static int	ixv_setup_transmit_structures(struct adapter *);
111 static void	ixv_setup_transmit_ring(struct tx_ring *);
112 static void     ixv_initialize_transmit_units(struct adapter *);
113 static void     ixv_free_transmit_structures(struct adapter *);
114 static void     ixv_free_transmit_buffers(struct tx_ring *);
115 
116 static int      ixv_allocate_receive_buffers(struct rx_ring *);
117 static int      ixv_setup_receive_structures(struct adapter *);
118 static int	ixv_setup_receive_ring(struct rx_ring *);
119 static void     ixv_initialize_receive_units(struct adapter *);
120 static void     ixv_free_receive_structures(struct adapter *);
121 static void     ixv_free_receive_buffers(struct rx_ring *);
122 
123 static void     ixv_enable_intr(struct adapter *);
124 static void     ixv_disable_intr(struct adapter *);
125 static bool	ixv_txeof(struct tx_ring *);
126 static bool	ixv_rxeof(struct ix_queue *, int);
127 static void	ixv_rx_checksum(u32, struct mbuf *, u32,
128 		    struct ixgbevf_hw_stats *);
129 static void     ixv_set_multi(struct adapter *);
130 static void     ixv_update_link_status(struct adapter *);
131 static void	ixv_refresh_mbufs(struct rx_ring *, int);
132 static int      ixv_xmit(struct tx_ring *, struct mbuf *);
133 static int	ixv_sysctl_stats(SYSCTLFN_PROTO);
134 static int	ixv_sysctl_debug(SYSCTLFN_PROTO);
135 static int	ixv_set_flowcntl(SYSCTLFN_PROTO);
136 static int	ixv_dma_malloc(struct adapter *, bus_size_t,
137 		    struct ixv_dma_alloc *, int);
138 static void     ixv_dma_free(struct adapter *, struct ixv_dma_alloc *);
139 static void	ixv_add_rx_process_limit(struct adapter *, const char *,
140 		    const char *, int *, int);
141 static u32	ixv_tx_ctx_setup(struct tx_ring *, struct mbuf *);
142 static bool	ixv_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
143 static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
144 static void	ixv_configure_ivars(struct adapter *);
145 static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
146 
147 static void	ixv_setup_vlan_support(struct adapter *);
148 #if 0
149 static void	ixv_register_vlan(void *, struct ifnet *, u16);
150 static void	ixv_unregister_vlan(void *, struct ifnet *, u16);
151 #endif
152 
153 static void	ixv_save_stats(struct adapter *);
154 static void	ixv_init_stats(struct adapter *);
155 static void	ixv_update_stats(struct adapter *);
156 
157 static __inline void ixv_rx_discard(struct rx_ring *, int);
158 static __inline void ixv_rx_input(struct rx_ring *, struct ifnet *,
159 		    struct mbuf *, u32);
160 
161 /* The MSI/X Interrupt handlers */
162 static int	ixv_msix_que(void *);
163 static int	ixv_msix_mbx(void *);
164 
165 /* Deferred interrupt tasklets */
166 static void	ixv_handle_que(void *);
167 static void	ixv_handle_mbx(void *);
168 
169 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
170 static ixv_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
171 
172 /*********************************************************************
173  *  FreeBSD Device Interface Entry Points
174  *********************************************************************/
175 
176 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
177     ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
178     DVF_DETACH_SHUTDOWN);
179 
180 # if 0
181 static device_method_t ixv_methods[] = {
182 	/* Device interface */
183 	DEVMETHOD(device_probe, ixv_probe),
184 	DEVMETHOD(device_attach, ixv_attach),
185 	DEVMETHOD(device_detach, ixv_detach),
186 	DEVMETHOD(device_shutdown, ixv_shutdown),
187 	DEVMETHOD_END
188 };
189 #endif
190 
191 #if 0
192 static driver_t ixv_driver = {
193 	"ix", ixv_methods, sizeof(struct adapter),
194 };
195 
196 extern devclass_t ixgbe_devclass;
197 DRIVER_MODULE(ixv, pci, ixv_driver, ixgbe_devclass, 0, 0);
198 MODULE_DEPEND(ixv, pci, 1, 1, 1);
199 MODULE_DEPEND(ixv, ether, 1, 1, 1);
200 #endif
201 
202 /*
203 ** TUNEABLE PARAMETERS:
204 */
205 
206 /*
207 ** AIM: Adaptive Interrupt Moderation
208 ** which means that the interrupt rate
209 ** is varied over time based on the
210 ** traffic for that interrupt vector
211 */
212 static int ixv_enable_aim = FALSE;
213 #define	TUNABLE_INT(__x, __y)
214 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
215 
216 /* How many packets rxeof tries to clean at a time */
217 static int ixv_rx_process_limit = 128;
218 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
219 
220 /* Flow control setting, default to full */
221 static int ixv_flow_control = ixgbe_fc_full;
222 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
223 
224 /*
225  * Header split: this causes the hardware to DMA
226  * the header into a seperate mbuf from the payload,
227  * it can be a performance win in some workloads, but
228  * in others it actually hurts, its off by default.
229  */
230 static int ixv_header_split = FALSE;
231 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
232 
233 /*
234 ** Number of TX descriptors per ring,
235 ** setting higher than RX as this seems
236 ** the better performing choice.
237 */
238 static int ixv_txd = DEFAULT_TXD;
239 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
240 
241 /* Number of RX descriptors per ring */
242 static int ixv_rxd = DEFAULT_RXD;
243 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
244 
245 /*
246 ** Shadow VFTA table, this is needed because
247 ** the real filter table gets cleared during
248 ** a soft reset and we need to repopulate it.
249 */
250 static u32 ixv_shadow_vfta[VFTA_SIZE];
251 
252 /* Keep running tab on them for sanity check */
253 static int ixv_total_ports;
254 
255 /*********************************************************************
256  *  Device identification routine
257  *
258  *  ixv_probe determines if the driver should be loaded on
259  *  adapter based on PCI vendor/device id of the adapter.
260  *
261  *  return 1 on success, 0 on failure
262  *********************************************************************/
263 
264 static int
ixv_probe(device_t dev,cfdata_t cf,void * aux)265 ixv_probe(device_t dev, cfdata_t cf, void *aux)
266 {
267 	const struct pci_attach_args *pa = aux;
268 
269 	return (ixv_lookup(pa) != NULL) ? 1 : 0;
270 }
271 
272 static ixv_vendor_info_t *
ixv_lookup(const struct pci_attach_args * pa)273 ixv_lookup(const struct pci_attach_args *pa)
274 {
275 	pcireg_t subid;
276 	ixv_vendor_info_t *ent;
277 
278 	INIT_DEBUGOUT("ixv_probe: begin");
279 
280 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
281 		return NULL;
282 
283 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
284 
285 	for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
286 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
287 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
288 
289 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
290 		     (ent->subvendor_id == 0)) &&
291 
292 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
293 		     (ent->subdevice_id == 0))) {
294 			++ixv_total_ports;
295 			return ent;
296 		}
297 	}
298 	return NULL;
299 }
300 
301 
302 static void
ixv_sysctl_attach(struct adapter * adapter)303 ixv_sysctl_attach(struct adapter *adapter)
304 {
305 	struct sysctllog **log;
306 	const struct sysctlnode *rnode, *cnode;
307 	device_t dev;
308 
309 	dev = adapter->dev;
310 	log = &adapter->sysctllog;
311 
312 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
313 		aprint_error_dev(dev, "could not create sysctl root\n");
314 		return;
315 	}
316 
317 	if (sysctl_createv(log, 0, &rnode, &cnode,
318 	    CTLFLAG_READWRITE, CTLTYPE_INT,
319 	    "stats", SYSCTL_DESCR("Statistics"),
320 	    ixv_sysctl_stats, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
321 		aprint_error_dev(dev, "could not create sysctl\n");
322 
323 	if (sysctl_createv(log, 0, &rnode, &cnode,
324 	    CTLFLAG_READWRITE, CTLTYPE_INT,
325 	    "debug", SYSCTL_DESCR("Debug Info"),
326 	    ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
327 		aprint_error_dev(dev, "could not create sysctl\n");
328 
329 	if (sysctl_createv(log, 0, &rnode, &cnode,
330 	    CTLFLAG_READWRITE, CTLTYPE_INT,
331 	    "flow_control", SYSCTL_DESCR("Flow Control"),
332 	    ixv_set_flowcntl, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
333 		aprint_error_dev(dev, "could not create sysctl\n");
334 
335 	/* XXX This is an *instance* sysctl controlling a *global* variable.
336 	 * XXX It's that way in the FreeBSD driver that this derives from.
337 	 */
338 	if (sysctl_createv(log, 0, &rnode, &cnode,
339 	    CTLFLAG_READWRITE, CTLTYPE_INT,
340 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
341 	    NULL, 0, &ixv_enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
342 		aprint_error_dev(dev, "could not create sysctl\n");
343 }
344 
345 /*********************************************************************
346  *  Device initialization routine
347  *
348  *  The attach entry point is called when the driver is being loaded.
349  *  This routine identifies the type of hardware, allocates all resources
350  *  and initializes the hardware.
351  *
352  *  return 0 on success, positive on failure
353  *********************************************************************/
354 
355 static void
ixv_attach(device_t parent,device_t dev,void * aux)356 ixv_attach(device_t parent, device_t dev, void *aux)
357 {
358 	struct adapter *adapter;
359 	struct ixgbe_hw *hw;
360 	int             error = 0;
361 	ixv_vendor_info_t *ent;
362 	const struct pci_attach_args *pa = aux;
363 
364 	INIT_DEBUGOUT("ixv_attach: begin");
365 
366 	/* Allocate, clear, and link in our adapter structure */
367 	adapter = device_private(dev);
368 	adapter->dev = adapter->osdep.dev = dev;
369 	hw = &adapter->hw;
370 	adapter->osdep.pc = pa->pa_pc;
371 	adapter->osdep.tag = pa->pa_tag;
372 	adapter->osdep.dmat = pa->pa_dmat;
373 	adapter->osdep.attached = false;
374 
375 	ent = ixv_lookup(pa);
376 
377 	KASSERT(ent != NULL);
378 
379 	aprint_normal(": %s, Version - %s\n",
380 	    ixv_strings[ent->index], ixv_driver_version);
381 
382 	/* Core Lock Init*/
383 	IXV_CORE_LOCK_INIT(adapter, device_xname(dev));
384 
385 	/* SYSCTL APIs */
386 	ixv_sysctl_attach(adapter);
387 
388 	/* Set up the timer callout */
389 	callout_init(&adapter->timer, 0);
390 
391 	/* Determine hardware revision */
392 	ixv_identify_hardware(adapter);
393 
394 	/* Do base PCI setup - map BAR0 */
395 	if (ixv_allocate_pci_resources(adapter, pa)) {
396 		aprint_error_dev(dev, "Allocation of PCI resources failed\n");
397 		error = ENXIO;
398 		goto err_out;
399 	}
400 
401 	/* Do descriptor calc and sanity checks */
402 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
403 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
404 		aprint_error_dev(dev, "TXD config issue, using default!\n");
405 		adapter->num_tx_desc = DEFAULT_TXD;
406 	} else
407 		adapter->num_tx_desc = ixv_txd;
408 
409 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
410 	    ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
411 		aprint_error_dev(dev, "RXD config issue, using default!\n");
412 		adapter->num_rx_desc = DEFAULT_RXD;
413 	} else
414 		adapter->num_rx_desc = ixv_rxd;
415 
416 	/* Allocate our TX/RX Queues */
417 	if (ixv_allocate_queues(adapter)) {
418 		error = ENOMEM;
419 		goto err_out;
420 	}
421 
422 	/*
423 	** Initialize the shared code: its
424 	** at this point the mac type is set.
425 	*/
426 	error = ixgbe_init_shared_code(hw);
427 	if (error) {
428 		aprint_error_dev(dev,"Shared Code Initialization Failure\n");
429 		error = EIO;
430 		goto err_late;
431 	}
432 
433 	/* Setup the mailbox */
434 	ixgbe_init_mbx_params_vf(hw);
435 
436 	ixgbe_reset_hw(hw);
437 
438 	/* Get Hardware Flow Control setting */
439 	hw->fc.requested_mode = ixgbe_fc_full;
440 	hw->fc.pause_time = IXV_FC_PAUSE;
441 	hw->fc.low_water[0] = IXV_FC_LO;
442 	hw->fc.high_water[0] = IXV_FC_HI;
443 	hw->fc.send_xon = TRUE;
444 
445 	error = ixgbe_init_hw(hw);
446 	if (error) {
447 		aprint_error_dev(dev,"Hardware Initialization Failure\n");
448 		error = EIO;
449 		goto err_late;
450 	}
451 
452 	error = ixv_allocate_msix(adapter, pa);
453 	if (error)
454 		goto err_late;
455 
456 	/* Setup OS specific network interface */
457 	ixv_setup_interface(dev, adapter);
458 
459 	/* Sysctl for limiting the amount of work done in the taskqueue */
460 	ixv_add_rx_process_limit(adapter, "rx_processing_limit",
461 	    "max number of rx packets to process", &adapter->rx_process_limit,
462 	    ixv_rx_process_limit);
463 
464 	/* Do the stats setup */
465 	ixv_save_stats(adapter);
466 	ixv_init_stats(adapter);
467 
468 	/* Register for VLAN events */
469 #if 0 /* XXX delete after write? */
470 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
471 	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
472 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
473 	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
474 #endif
475 
476 	INIT_DEBUGOUT("ixv_attach: end");
477 	adapter->osdep.attached = true;
478 	return;
479 
480 err_late:
481 	ixv_free_transmit_structures(adapter);
482 	ixv_free_receive_structures(adapter);
483 err_out:
484 	ixv_free_pci_resources(adapter);
485 	return;
486 
487 }
488 
489 /*********************************************************************
490  *  Device removal routine
491  *
492  *  The detach entry point is called when the driver is being removed.
493  *  This routine stops the adapter and deallocates all the resources
494  *  that were allocated for driver operation.
495  *
496  *  return 0 on success, positive on failure
497  *********************************************************************/
498 
499 static int
ixv_detach(device_t dev,int flags)500 ixv_detach(device_t dev, int flags)
501 {
502 	struct adapter *adapter = device_private(dev);
503 	struct ix_queue *que = adapter->queues;
504 
505 	INIT_DEBUGOUT("ixv_detach: begin");
506 	if (adapter->osdep.attached == false)
507 		return 0;
508 
509 #if NVLAN > 0
510 	/* Make sure VLANS are not using driver */
511 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
512 		;	/* nothing to do: no VLANs */
513 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
514 		vlan_ifdetach(adapter->ifp);
515 	else {
516 		aprint_error_dev(dev, "VLANs in use\n");
517 		return EBUSY;
518 	}
519 #endif
520 
521 	IXV_CORE_LOCK(adapter);
522 	ixv_stop(adapter);
523 	IXV_CORE_UNLOCK(adapter);
524 
525 	for (int i = 0; i < adapter->num_queues; i++, que++) {
526 		softint_disestablish(que->que_si);
527 	}
528 
529 	/* Drain the Link queue */
530 	softint_disestablish(adapter->mbx_si);
531 
532 	/* Unregister VLAN events */
533 #if 0 /* XXX msaitoh delete after write? */
534 	if (adapter->vlan_attach != NULL)
535 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
536 	if (adapter->vlan_detach != NULL)
537 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
538 #endif
539 
540 	ether_ifdetach(adapter->ifp);
541 	callout_halt(&adapter->timer, NULL);
542 	ixv_free_pci_resources(adapter);
543 #if 0 /* XXX the NetBSD port is probably missing something here */
544 	bus_generic_detach(dev);
545 #endif
546 	if_detach(adapter->ifp);
547 
548 	ixv_free_transmit_structures(adapter);
549 	ixv_free_receive_structures(adapter);
550 
551 	IXV_CORE_LOCK_DESTROY(adapter);
552 	return (0);
553 }
554 
555 /*********************************************************************
556  *
557  *  Shutdown entry point
558  *
559  **********************************************************************/
560 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
561 static int
562 ixv_shutdown(device_t dev)
563 {
564 	struct adapter *adapter = device_private(dev);
565 	IXV_CORE_LOCK(adapter);
566 	ixv_stop(adapter);
567 	IXV_CORE_UNLOCK(adapter);
568 	return (0);
569 }
570 #endif
571 
572 #if __FreeBSD_version < 800000
573 /*********************************************************************
574  *  Transmit entry point
575  *
576  *  ixv_start is called by the stack to initiate a transmit.
577  *  The driver will remain in this routine as long as there are
578  *  packets to transmit and transmit resources are available.
579  *  In case resources are not available stack is notified and
580  *  the packet is requeued.
581  **********************************************************************/
582 static void
ixv_start_locked(struct tx_ring * txr,struct ifnet * ifp)583 ixv_start_locked(struct tx_ring *txr, struct ifnet * ifp)
584 {
585 	int rc;
586 	struct mbuf    *m_head;
587 	struct adapter *adapter = txr->adapter;
588 
589 	IXV_TX_LOCK_ASSERT(txr);
590 
591 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) !=
592 	    IFF_RUNNING)
593 		return;
594 	if (!adapter->link_active)
595 		return;
596 
597 	while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
598 
599 		IFQ_POLL(&ifp->if_snd, m_head);
600 		if (m_head == NULL)
601 			break;
602 
603 		if ((rc = ixv_xmit(txr, m_head)) == EAGAIN) {
604 			ifp->if_flags |= IFF_OACTIVE;
605 			break;
606 		}
607 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
608 		if (rc == EFBIG) {
609 			struct mbuf *mtmp;
610 
611 			if ((mtmp = m_defrag(m_head, M_NOWAIT)) != NULL) {
612 				m_head = mtmp;
613 				rc = ixv_xmit(txr, m_head);
614 				if (rc != 0)
615 					adapter->efbig2_tx_dma_setup.ev_count++;
616 			} else
617 				adapter->m_defrag_failed.ev_count++;
618 		}
619 		if (rc != 0) {
620 			m_freem(m_head);
621 			continue;
622 		}
623 		/* Send a copy of the frame to the BPF listener */
624 		bpf_mtap(ifp, m_head);
625 
626 		/* Set watchdog on */
627 		txr->watchdog_check = TRUE;
628 		getmicrotime(&txr->watchdog_time);
629 	}
630 	return;
631 }
632 
633 /*
634  * Legacy TX start - called by the stack, this
635  * always uses the first tx ring, and should
636  * not be used with multiqueue tx enabled.
637  */
638 static void
ixv_start(struct ifnet * ifp)639 ixv_start(struct ifnet *ifp)
640 {
641 	struct adapter *adapter = ifp->if_softc;
642 	struct tx_ring	*txr = adapter->tx_rings;
643 
644 	if (ifp->if_flags & IFF_RUNNING) {
645 		IXV_TX_LOCK(txr);
646 		ixv_start_locked(txr, ifp);
647 		IXV_TX_UNLOCK(txr);
648 	}
649 	return;
650 }
651 
652 #else
653 
654 /*
655 ** Multiqueue Transmit driver
656 **
657 */
658 static int
ixv_mq_start(struct ifnet * ifp,struct mbuf * m)659 ixv_mq_start(struct ifnet *ifp, struct mbuf *m)
660 {
661 	struct adapter	*adapter = ifp->if_softc;
662 	struct ix_queue	*que;
663 	struct tx_ring	*txr;
664 	int 		i = 0, err = 0;
665 
666 	/* Which queue to use */
667 	if ((m->m_flags & M_FLOWID) != 0)
668 		i = m->m_pkthdr.flowid % adapter->num_queues;
669 
670 	txr = &adapter->tx_rings[i];
671 	que = &adapter->queues[i];
672 
673 	if (IXV_TX_TRYLOCK(txr)) {
674 		err = ixv_mq_start_locked(ifp, txr, m);
675 		IXV_TX_UNLOCK(txr);
676 	} else {
677 		err = drbr_enqueue(ifp, txr->br, m);
678 		softint_schedule(que->que_si);
679 	}
680 
681 	return (err);
682 }
683 
684 static int
ixv_mq_start_locked(struct ifnet * ifp,struct tx_ring * txr,struct mbuf * m)685 ixv_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
686 {
687 	struct adapter  *adapter = txr->adapter;
688         struct mbuf     *next;
689         int             enqueued, err = 0;
690 
691 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) !=
692 	    IFF_RUNNING || adapter->link_active == 0) {
693 		if (m != NULL)
694 			err = drbr_enqueue(ifp, txr->br, m);
695 		return (err);
696 	}
697 
698 	/* Do a clean if descriptors are low */
699 	if (txr->tx_avail <= IXV_TX_CLEANUP_THRESHOLD)
700 		ixv_txeof(txr);
701 
702 	enqueued = 0;
703 	if (m != NULL) {
704 		err = drbr_dequeue(ifp, txr->br, m);
705 		if (err) {
706 			return (err);
707 		}
708 	}
709 	/* Process the queue */
710 	while ((next = drbr_peek(ifp, txr->br)) != NULL) {
711 		if ((err = ixv_xmit(txr, next)) != 0) {
712 			if (next != NULL) {
713 				drbr_advance(ifp, txr->br);
714 			} else {
715 				drbr_putback(ifp, txr->br, next);
716 			}
717 			break;
718 		}
719 		drbr_advance(ifp, txr->br);
720 		enqueued++;
721 		ifp->if_obytes += next->m_pkthdr.len;
722 		if (next->m_flags & M_MCAST)
723 			ifp->if_omcasts++;
724 		/* Send a copy of the frame to the BPF listener */
725 		ETHER_BPF_MTAP(ifp, next);
726 		if ((ifp->if_flags & IFF_RUNNING) == 0)
727 			break;
728 		if (txr->tx_avail <= IXV_TX_OP_THRESHOLD) {
729 			ifp->if_flags |= IFF_OACTIVE;
730 			break;
731 		}
732 	}
733 
734 	if (enqueued > 0) {
735 		/* Set watchdog on */
736 		txr->watchdog_check = TRUE;
737 		getmicrotime(&txr->watchdog_time);
738 	}
739 
740 	return (err);
741 }
742 
743 /*
744 ** Flush all ring buffers
745 */
746 static void
ixv_qflush(struct ifnet * ifp)747 ixv_qflush(struct ifnet *ifp)
748 {
749 	struct adapter  *adapter = ifp->if_softc;
750 	struct tx_ring  *txr = adapter->tx_rings;
751 	struct mbuf     *m;
752 
753 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
754 		IXV_TX_LOCK(txr);
755 		while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
756 			m_freem(m);
757 		IXV_TX_UNLOCK(txr);
758 	}
759 	if_qflush(ifp);
760 }
761 
762 #endif
763 
764 static int
ixv_ifflags_cb(struct ethercom * ec)765 ixv_ifflags_cb(struct ethercom *ec)
766 {
767 	struct ifnet *ifp = &ec->ec_if;
768 	struct adapter *adapter = ifp->if_softc;
769 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
770 
771 	IXV_CORE_LOCK(adapter);
772 
773 	if (change != 0)
774 		adapter->if_flags = ifp->if_flags;
775 
776 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
777 		rc = ENETRESET;
778 
779 	IXV_CORE_UNLOCK(adapter);
780 
781 	return rc;
782 }
783 
784 /*********************************************************************
785  *  Ioctl entry point
786  *
787  *  ixv_ioctl is called when the user wants to configure the
788  *  interface.
789  *
790  *  return 0 on success, positive on failure
791  **********************************************************************/
792 
793 static int
ixv_ioctl(struct ifnet * ifp,u_long command,void * data)794 ixv_ioctl(struct ifnet * ifp, u_long command, void *data)
795 {
796 	struct adapter	*adapter = ifp->if_softc;
797 	struct ifcapreq *ifcr = data;
798 	struct ifreq	*ifr = (struct ifreq *) data;
799 	int             error = 0;
800 	int l4csum_en;
801 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
802 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
803 
804 	switch (command) {
805 	case SIOCSIFFLAGS:
806 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
807 		break;
808 	case SIOCADDMULTI:
809 	case SIOCDELMULTI:
810 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
811 		break;
812 	case SIOCSIFMEDIA:
813 	case SIOCGIFMEDIA:
814 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
815 		break;
816 	case SIOCSIFCAP:
817 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
818 		break;
819 	case SIOCSIFMTU:
820 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
821 		break;
822 	default:
823 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
824 		break;
825 	}
826 
827 	switch (command) {
828 	case SIOCSIFMEDIA:
829 	case SIOCGIFMEDIA:
830 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
831 	case SIOCSIFCAP:
832 		/* Layer-4 Rx checksum offload has to be turned on and
833 		 * off as a unit.
834 		 */
835 		l4csum_en = ifcr->ifcr_capenable & l4csum;
836 		if (l4csum_en != l4csum && l4csum_en != 0)
837 			return EINVAL;
838 		/*FALLTHROUGH*/
839 	case SIOCADDMULTI:
840 	case SIOCDELMULTI:
841 	case SIOCSIFFLAGS:
842 	case SIOCSIFMTU:
843 	default:
844 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
845 			return error;
846 		if ((ifp->if_flags & IFF_RUNNING) == 0)
847 			;
848 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
849 			IXV_CORE_LOCK(adapter);
850 			ixv_init_locked(adapter);
851 			IXV_CORE_UNLOCK(adapter);
852 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
853 			/*
854 			 * Multicast list has changed; set the hardware filter
855 			 * accordingly.
856 			 */
857 			IXV_CORE_LOCK(adapter);
858 			ixv_disable_intr(adapter);
859 			ixv_set_multi(adapter);
860 			ixv_enable_intr(adapter);
861 			IXV_CORE_UNLOCK(adapter);
862 		}
863 		return 0;
864 	}
865 }
866 
867 /*********************************************************************
868  *  Init entry point
869  *
870  *  This routine is used in two ways. It is used by the stack as
871  *  init entry point in network interface structure. It is also used
872  *  by the driver as a hw/sw initialization routine to get to a
873  *  consistent state.
874  *
875  *  return 0 on success, positive on failure
876  **********************************************************************/
877 #define IXGBE_MHADD_MFS_SHIFT 16
878 
879 static void
ixv_init_locked(struct adapter * adapter)880 ixv_init_locked(struct adapter *adapter)
881 {
882 	struct ifnet	*ifp = adapter->ifp;
883 	device_t 	dev = adapter->dev;
884 	struct ixgbe_hw *hw = &adapter->hw;
885 	u32		mhadd, gpie;
886 
887 	INIT_DEBUGOUT("ixv_init: begin");
888 	KASSERT(mutex_owned(&adapter->core_mtx));
889 	hw->adapter_stopped = FALSE;
890 	ixgbe_stop_adapter(hw);
891         callout_stop(&adapter->timer);
892 
893         /* reprogram the RAR[0] in case user changed it. */
894         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
895 
896 	/* Get the latest mac address, User can use a LAA */
897 	memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
898 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
899         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
900 	hw->addr_ctrl.rar_used_count = 1;
901 
902 	/* Prepare transmit descriptors and buffers */
903 	if (ixv_setup_transmit_structures(adapter)) {
904 		aprint_error_dev(dev,"Could not setup transmit structures\n");
905 		ixv_stop(adapter);
906 		return;
907 	}
908 
909 	ixgbe_reset_hw(hw);
910 	ixv_initialize_transmit_units(adapter);
911 
912 	/* Setup Multicast table */
913 	ixv_set_multi(adapter);
914 
915 	/*
916 	** Determine the correct mbuf pool
917 	** for doing jumbo/headersplit
918 	*/
919 	if (ifp->if_mtu > ETHERMTU)
920 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
921 	else
922 		adapter->rx_mbuf_sz = MCLBYTES;
923 
924 	/* Prepare receive descriptors and buffers */
925 	if (ixv_setup_receive_structures(adapter)) {
926 		device_printf(dev,"Could not setup receive structures\n");
927 		ixv_stop(adapter);
928 		return;
929 	}
930 
931 	/* Configure RX settings */
932 	ixv_initialize_receive_units(adapter);
933 
934 	/* Enable Enhanced MSIX mode */
935 	gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
936 	gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
937 	gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
938         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
939 
940 #if 0 /* XXX isn't it required? -- msaitoh  */
941 	/* Set the various hardware offload abilities */
942 	ifp->if_hwassist = 0;
943 	if (ifp->if_capenable & IFCAP_TSO4)
944 		ifp->if_hwassist |= CSUM_TSO;
945 	if (ifp->if_capenable & IFCAP_TXCSUM) {
946 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
947 #if __FreeBSD_version >= 800000
948 		ifp->if_hwassist |= CSUM_SCTP;
949 #endif
950 	}
951 #endif
952 
953 	/* Set MTU size */
954 	if (ifp->if_mtu > ETHERMTU) {
955 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
956 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
957 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
958 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
959 	}
960 
961 	/* Set up VLAN offload and filter */
962 	ixv_setup_vlan_support(adapter);
963 
964 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
965 
966 	/* Set up MSI/X routing */
967 	ixv_configure_ivars(adapter);
968 
969 	/* Set up auto-mask */
970 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
971 
972         /* Set moderation on the Link interrupt */
973         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->mbxvec), IXV_LINK_ITR);
974 
975 	/* Stats init */
976 	ixv_init_stats(adapter);
977 
978 	/* Config/Enable Link */
979 	ixv_config_link(adapter);
980 
981 	/* And now turn on interrupts */
982 	ixv_enable_intr(adapter);
983 
984 	/* Now inform the stack we're ready */
985 	ifp->if_flags |= IFF_RUNNING;
986 	ifp->if_flags &= ~IFF_OACTIVE;
987 
988 	return;
989 }
990 
991 static int
ixv_init(struct ifnet * ifp)992 ixv_init(struct ifnet *ifp)
993 {
994 	struct adapter *adapter = ifp->if_softc;
995 
996 	IXV_CORE_LOCK(adapter);
997 	ixv_init_locked(adapter);
998 	IXV_CORE_UNLOCK(adapter);
999 	return 0;
1000 }
1001 
1002 
1003 /*
1004 **
1005 ** MSIX Interrupt Handlers and Tasklets
1006 **
1007 */
1008 
1009 static inline void
ixv_enable_queue(struct adapter * adapter,u32 vector)1010 ixv_enable_queue(struct adapter *adapter, u32 vector)
1011 {
1012 	struct ixgbe_hw *hw = &adapter->hw;
1013 	u32	queue = 1 << vector;
1014 	u32	mask;
1015 
1016 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1017 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1018 }
1019 
1020 static inline void
ixv_disable_queue(struct adapter * adapter,u32 vector)1021 ixv_disable_queue(struct adapter *adapter, u32 vector)
1022 {
1023 	struct ixgbe_hw *hw = &adapter->hw;
1024 	u64	queue = (u64)(1 << vector);
1025 	u32	mask;
1026 
1027 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1028 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
1029 }
1030 
1031 static inline void
ixv_rearm_queues(struct adapter * adapter,u64 queues)1032 ixv_rearm_queues(struct adapter *adapter, u64 queues)
1033 {
1034 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
1035 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
1036 }
1037 
1038 
1039 static void
ixv_handle_que(void * context)1040 ixv_handle_que(void *context)
1041 {
1042 	struct ix_queue *que = context;
1043 	struct adapter  *adapter = que->adapter;
1044 	struct tx_ring  *txr = que->txr;
1045 	struct ifnet    *ifp = adapter->ifp;
1046 	bool		more;
1047 
1048 	if (ifp->if_flags & IFF_RUNNING) {
1049 		more = ixv_rxeof(que, adapter->rx_process_limit);
1050 		IXV_TX_LOCK(txr);
1051 		ixv_txeof(txr);
1052 #if __FreeBSD_version >= 800000
1053 		if (!drbr_empty(ifp, txr->br))
1054 			ixv_mq_start_locked(ifp, txr, NULL);
1055 #else
1056 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
1057 			ixv_start_locked(txr, ifp);
1058 #endif
1059 		IXV_TX_UNLOCK(txr);
1060 		if (more) {
1061 			adapter->req.ev_count++;
1062 			softint_schedule(que->que_si);
1063 			return;
1064 		}
1065 	}
1066 
1067 	/* Reenable this interrupt */
1068 	ixv_enable_queue(adapter, que->msix);
1069 	return;
1070 }
1071 
1072 /*********************************************************************
1073  *
1074  *  MSI Queue Interrupt Service routine
1075  *
1076  **********************************************************************/
1077 int
ixv_msix_que(void * arg)1078 ixv_msix_que(void *arg)
1079 {
1080 	struct ix_queue	*que = arg;
1081 	struct adapter  *adapter = que->adapter;
1082 	struct tx_ring	*txr = que->txr;
1083 	struct rx_ring	*rxr = que->rxr;
1084 	bool		more_tx, more_rx;
1085 	u32		newitr = 0;
1086 
1087 	ixv_disable_queue(adapter, que->msix);
1088 	++que->irqs;
1089 
1090 	more_rx = ixv_rxeof(que, adapter->rx_process_limit);
1091 
1092 	IXV_TX_LOCK(txr);
1093 	more_tx = ixv_txeof(txr);
1094 	/*
1095 	** Make certain that if the stack
1096 	** has anything queued the task gets
1097 	** scheduled to handle it.
1098 	*/
1099 #if __FreeBSD_version < 800000
1100 	if (!IFQ_IS_EMPTY(&adapter->ifp->if_snd))
1101 #else
1102 	if (!drbr_empty(adapter->ifp, txr->br))
1103 #endif
1104                 more_tx = 1;
1105 	IXV_TX_UNLOCK(txr);
1106 
1107 	more_rx = ixv_rxeof(que, adapter->rx_process_limit);
1108 
1109 	/* Do AIM now? */
1110 
1111 	if (ixv_enable_aim == FALSE)
1112 		goto no_calc;
1113 	/*
1114 	** Do Adaptive Interrupt Moderation:
1115         **  - Write out last calculated setting
1116 	**  - Calculate based on average size over
1117 	**    the last interval.
1118 	*/
1119         if (que->eitr_setting)
1120                 IXGBE_WRITE_REG(&adapter->hw,
1121                     IXGBE_VTEITR(que->msix),
1122 		    que->eitr_setting);
1123 
1124         que->eitr_setting = 0;
1125 
1126         /* Idle, do nothing */
1127         if ((txr->bytes == 0) && (rxr->bytes == 0))
1128                 goto no_calc;
1129 
1130 	if ((txr->bytes) && (txr->packets))
1131                	newitr = txr->bytes/txr->packets;
1132 	if ((rxr->bytes) && (rxr->packets))
1133 		newitr = max(newitr,
1134 		    (rxr->bytes / rxr->packets));
1135 	newitr += 24; /* account for hardware frame, crc */
1136 
1137 	/* set an upper boundary */
1138 	newitr = min(newitr, 3000);
1139 
1140 	/* Be nice to the mid range */
1141 	if ((newitr > 300) && (newitr < 1200))
1142 		newitr = (newitr / 3);
1143 	else
1144 		newitr = (newitr / 2);
1145 
1146 	newitr |= newitr << 16;
1147 
1148         /* save for next interrupt */
1149         que->eitr_setting = newitr;
1150 
1151         /* Reset state */
1152         txr->bytes = 0;
1153         txr->packets = 0;
1154         rxr->bytes = 0;
1155         rxr->packets = 0;
1156 
1157 no_calc:
1158 	if (more_tx || more_rx)
1159 		softint_schedule(que->que_si);
1160 	else /* Reenable this interrupt */
1161 		ixv_enable_queue(adapter, que->msix);
1162 	return 1;
1163 }
1164 
1165 static int
ixv_msix_mbx(void * arg)1166 ixv_msix_mbx(void *arg)
1167 {
1168 	struct adapter	*adapter = arg;
1169 	struct ixgbe_hw *hw = &adapter->hw;
1170 	u32		reg;
1171 
1172 	++adapter->mbx_irq.ev_count;
1173 
1174 	/* First get the cause */
1175 	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
1176 	/* Clear interrupt with write */
1177 	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
1178 
1179 	/* Link status change */
1180 	if (reg & IXGBE_EICR_LSC)
1181 		softint_schedule(adapter->mbx_si);
1182 
1183 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
1184 	return 1;
1185 }
1186 
1187 /*********************************************************************
1188  *
1189  *  Media Ioctl callback
1190  *
1191  *  This routine is called whenever the user queries the status of
1192  *  the interface using ifconfig.
1193  *
1194  **********************************************************************/
1195 static void
ixv_media_status(struct ifnet * ifp,struct ifmediareq * ifmr)1196 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1197 {
1198 	struct adapter *adapter = ifp->if_softc;
1199 
1200 	INIT_DEBUGOUT("ixv_media_status: begin");
1201 	IXV_CORE_LOCK(adapter);
1202 	ixv_update_link_status(adapter);
1203 
1204 	ifmr->ifm_status = IFM_AVALID;
1205 	ifmr->ifm_active = IFM_ETHER;
1206 
1207 	if (!adapter->link_active) {
1208 		IXV_CORE_UNLOCK(adapter);
1209 		return;
1210 	}
1211 
1212 	ifmr->ifm_status |= IFM_ACTIVE;
1213 
1214 	switch (adapter->link_speed) {
1215 		case IXGBE_LINK_SPEED_1GB_FULL:
1216 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1217 			break;
1218 		case IXGBE_LINK_SPEED_10GB_FULL:
1219 			ifmr->ifm_active |= IFM_FDX;
1220 			break;
1221 	}
1222 
1223 	IXV_CORE_UNLOCK(adapter);
1224 
1225 	return;
1226 }
1227 
1228 /*********************************************************************
1229  *
1230  *  Media Ioctl callback
1231  *
1232  *  This routine is called when the user changes speed/duplex using
1233  *  media/mediopt option with ifconfig.
1234  *
1235  **********************************************************************/
1236 static int
ixv_media_change(struct ifnet * ifp)1237 ixv_media_change(struct ifnet * ifp)
1238 {
1239 	struct adapter *adapter = ifp->if_softc;
1240 	struct ifmedia *ifm = &adapter->media;
1241 
1242 	INIT_DEBUGOUT("ixv_media_change: begin");
1243 
1244 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1245 		return (EINVAL);
1246 
1247         switch (IFM_SUBTYPE(ifm->ifm_media)) {
1248         case IFM_AUTO:
1249                 break;
1250         default:
1251                 device_printf(adapter->dev, "Only auto media type\n");
1252 		return (EINVAL);
1253         }
1254 
1255 	return (0);
1256 }
1257 
1258 /*********************************************************************
1259  *
1260  *  This routine maps the mbufs to tx descriptors, allowing the
1261  *  TX engine to transmit the packets.
1262  *  	- return 0 on success, positive on failure
1263  *
1264  **********************************************************************/
1265 
1266 static int
ixv_xmit(struct tx_ring * txr,struct mbuf * m_head)1267 ixv_xmit(struct tx_ring *txr, struct mbuf *m_head)
1268 {
1269 	struct m_tag *mtag;
1270 	struct adapter  *adapter = txr->adapter;
1271 	struct ethercom *ec = &adapter->osdep.ec;
1272 	u32		olinfo_status = 0, cmd_type_len;
1273 	u32		paylen = 0;
1274 	int             i, j, error;
1275 	int		first, last = 0;
1276 	bus_dmamap_t	map;
1277 	struct ixv_tx_buf *txbuf;
1278 	union ixgbe_adv_tx_desc *txd = NULL;
1279 
1280 	/* Basic descriptor defines */
1281         cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
1282 	    IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
1283 
1284 	if ((mtag = VLAN_OUTPUT_TAG(ec, m_head)) != NULL)
1285         	cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1286 
1287         /*
1288          * Important to capture the first descriptor
1289          * used because it will contain the index of
1290          * the one we tell the hardware to report back
1291          */
1292         first = txr->next_avail_desc;
1293 	txbuf = &txr->tx_buffers[first];
1294 	map = txbuf->map;
1295 
1296 	/*
1297 	 * Map the packet for DMA.
1298 	 */
1299 	error = bus_dmamap_load_mbuf(txr->txtag->dt_dmat, map,
1300 	    m_head, BUS_DMA_NOWAIT);
1301 
1302 	switch (error) {
1303 	case EAGAIN:
1304 		adapter->eagain_tx_dma_setup.ev_count++;
1305 		return EAGAIN;
1306 	case ENOMEM:
1307 		adapter->enomem_tx_dma_setup.ev_count++;
1308 		return EAGAIN;
1309 	case EFBIG:
1310 		adapter->efbig_tx_dma_setup.ev_count++;
1311 		return error;
1312 	case EINVAL:
1313 		adapter->einval_tx_dma_setup.ev_count++;
1314 		return error;
1315 	default:
1316 		adapter->other_tx_dma_setup.ev_count++;
1317 		return error;
1318 	case 0:
1319 		break;
1320 	}
1321 
1322 	/* Make certain there are enough descriptors */
1323 	if (map->dm_nsegs > txr->tx_avail - 2) {
1324 		txr->no_desc_avail.ev_count++;
1325 		/* XXX s/ixgbe/ixv/ */
1326 		ixgbe_dmamap_unload(txr->txtag, txbuf->map);
1327 		return EAGAIN;
1328 	}
1329 
1330 	/*
1331 	** Set up the appropriate offload context
1332 	** this becomes the first descriptor of
1333 	** a packet.
1334 	*/
1335 	if (m_head->m_pkthdr.csum_flags & (M_CSUM_TSOv4|M_CSUM_TSOv6)) {
1336 		if (ixv_tso_setup(txr, m_head, &paylen)) {
1337 			cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1338 			olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1339 			olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1340 			olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1341 			++adapter->tso_tx.ev_count;
1342 		} else {
1343 			++adapter->tso_err.ev_count;
1344 			/* XXX unload DMA map! --dyoung -> easy? --msaitoh */
1345 			return (ENXIO);
1346 		}
1347 	} else
1348 		olinfo_status |= ixv_tx_ctx_setup(txr, m_head);
1349 
1350         /* Record payload length */
1351 	if (paylen == 0)
1352         	olinfo_status |= m_head->m_pkthdr.len <<
1353 		    IXGBE_ADVTXD_PAYLEN_SHIFT;
1354 
1355 	i = txr->next_avail_desc;
1356 	for (j = 0; j < map->dm_nsegs; j++) {
1357 		bus_size_t seglen;
1358 		bus_addr_t segaddr;
1359 
1360 		txbuf = &txr->tx_buffers[i];
1361 		txd = &txr->tx_base[i];
1362 		seglen = map->dm_segs[j].ds_len;
1363 		segaddr = htole64(map->dm_segs[j].ds_addr);
1364 
1365 		txd->read.buffer_addr = segaddr;
1366 		txd->read.cmd_type_len = htole32(txr->txd_cmd |
1367 		    cmd_type_len |seglen);
1368 		txd->read.olinfo_status = htole32(olinfo_status);
1369 		last = i; /* descriptor that will get completion IRQ */
1370 
1371 		if (++i == adapter->num_tx_desc)
1372 			i = 0;
1373 
1374 		txbuf->m_head = NULL;
1375 		txbuf->eop_index = -1;
1376 	}
1377 
1378 	txd->read.cmd_type_len |=
1379 	    htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1380 	txr->tx_avail -= map->dm_nsegs;
1381 	txr->next_avail_desc = i;
1382 
1383 	txbuf->m_head = m_head;
1384 	/* Swap the dma map between the first and last descriptor */
1385 	txr->tx_buffers[first].map = txbuf->map;
1386 	txbuf->map = map;
1387 	bus_dmamap_sync(txr->txtag->dt_dmat, map, 0, m_head->m_pkthdr.len,
1388 	    BUS_DMASYNC_PREWRITE);
1389 
1390         /* Set the index of the descriptor that will be marked done */
1391         txbuf = &txr->tx_buffers[first];
1392 	txbuf->eop_index = last;
1393 
1394 	/* XXX s/ixgbe/ixg/ */
1395         ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1396             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1397 	/*
1398 	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
1399 	 * hardware that this frame is available to transmit.
1400 	 */
1401 	++txr->total_packets.ev_count;
1402 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(txr->me), i);
1403 
1404 	return 0;
1405 }
1406 
1407 
1408 /*********************************************************************
1409  *  Multicast Update
1410  *
1411  *  This routine is called whenever multicast address list is updated.
1412  *
1413  **********************************************************************/
1414 #define IXGBE_RAR_ENTRIES 16
1415 
1416 static void
ixv_set_multi(struct adapter * adapter)1417 ixv_set_multi(struct adapter *adapter)
1418 {
1419 	struct ether_multi *enm;
1420 	struct ether_multistep step;
1421 	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1422 	u8	*update_ptr;
1423 	int	mcnt = 0;
1424 	struct ethercom *ec = &adapter->osdep.ec;
1425 
1426 	IOCTL_DEBUGOUT("ixv_set_multi: begin");
1427 
1428 	ETHER_FIRST_MULTI(step, ec, enm);
1429 	while (enm != NULL) {
1430 		bcopy(enm->enm_addrlo,
1431 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1432 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
1433 		mcnt++;
1434 		/* XXX This might be required --msaitoh */
1435 		if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
1436 			break;
1437 		ETHER_NEXT_MULTI(step, enm);
1438 	}
1439 
1440 	update_ptr = mta;
1441 
1442 	ixgbe_update_mc_addr_list(&adapter->hw,
1443 	    update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1444 
1445 	return;
1446 }
1447 
1448 /*
1449  * This is an iterator function now needed by the multicast
1450  * shared code. It simply feeds the shared code routine the
1451  * addresses in the array of ixv_set_multi() one by one.
1452  */
1453 static u8 *
ixv_mc_array_itr(struct ixgbe_hw * hw,u8 ** update_ptr,u32 * vmdq)1454 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1455 {
1456 	u8 *addr = *update_ptr;
1457 	u8 *newptr;
1458 	*vmdq = 0;
1459 
1460 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1461 	*update_ptr = newptr;
1462 	return addr;
1463 }
1464 
1465 /*********************************************************************
1466  *  Timer routine
1467  *
1468  *  This routine checks for link status,updates statistics,
1469  *  and runs the watchdog check.
1470  *
1471  **********************************************************************/
1472 
1473 static void
ixv_local_timer1(void * arg)1474 ixv_local_timer1(void *arg)
1475 {
1476 	struct adapter	*adapter = arg;
1477 	device_t	dev = adapter->dev;
1478 	struct tx_ring	*txr = adapter->tx_rings;
1479 	int		i;
1480 	struct timeval now, elapsed;
1481 
1482 	KASSERT(mutex_owned(&adapter->core_mtx));
1483 
1484 	ixv_update_link_status(adapter);
1485 
1486 	/* Stats Update */
1487 	ixv_update_stats(adapter);
1488 
1489 	/*
1490 	 * If the interface has been paused
1491 	 * then don't do the watchdog check
1492 	 */
1493 	if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
1494 		goto out;
1495 	/*
1496 	** Check for time since any descriptor was cleaned
1497 	*/
1498         for (i = 0; i < adapter->num_queues; i++, txr++) {
1499 		IXV_TX_LOCK(txr);
1500 		if (txr->watchdog_check == FALSE) {
1501 			IXV_TX_UNLOCK(txr);
1502 			continue;
1503 		}
1504 		getmicrotime(&now);
1505 		timersub(&now, &txr->watchdog_time, &elapsed);
1506 		if (tvtohz(&elapsed) > IXV_WATCHDOG)
1507 			goto hung;
1508 		IXV_TX_UNLOCK(txr);
1509 	}
1510 out:
1511        	ixv_rearm_queues(adapter, adapter->que_mask);
1512 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1513 	return;
1514 
1515 hung:
1516 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1517 	device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
1518 	    IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDH(i)),
1519 	    IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDT(i)));
1520 	device_printf(dev,"TX(%d) desc avail = %d,"
1521 	    "Next TX to Clean = %d\n",
1522 	    txr->me, txr->tx_avail, txr->next_to_clean);
1523 	adapter->ifp->if_flags &= ~IFF_RUNNING;
1524 	adapter->watchdog_events.ev_count++;
1525 	IXV_TX_UNLOCK(txr);
1526 	ixv_init_locked(adapter);
1527 }
1528 
1529 static void
ixv_local_timer(void * arg)1530 ixv_local_timer(void *arg)
1531 {
1532 	struct adapter *adapter = arg;
1533 
1534 	IXV_CORE_LOCK(adapter);
1535 	ixv_local_timer1(adapter);
1536 	IXV_CORE_UNLOCK(adapter);
1537 }
1538 
1539 /*
1540 ** Note: this routine updates the OS on the link state
1541 **	the real check of the hardware only happens with
1542 **	a link interrupt.
1543 */
1544 static void
ixv_update_link_status(struct adapter * adapter)1545 ixv_update_link_status(struct adapter *adapter)
1546 {
1547 	struct ifnet	*ifp = adapter->ifp;
1548 	struct tx_ring *txr = adapter->tx_rings;
1549 	device_t dev = adapter->dev;
1550 
1551 
1552 	if (adapter->link_up){
1553 		if (adapter->link_active == FALSE) {
1554 			if (bootverbose)
1555 				device_printf(dev,"Link is up %d Gbps %s \n",
1556 				    ((adapter->link_speed == 128)? 10:1),
1557 				    "Full Duplex");
1558 			adapter->link_active = TRUE;
1559 			if_link_state_change(ifp, LINK_STATE_UP);
1560 		}
1561 	} else { /* Link down */
1562 		if (adapter->link_active == TRUE) {
1563 			if (bootverbose)
1564 				device_printf(dev,"Link is Down\n");
1565 			if_link_state_change(ifp, LINK_STATE_DOWN);
1566 			adapter->link_active = FALSE;
1567 			for (int i = 0; i < adapter->num_queues;
1568 			    i++, txr++)
1569 				txr->watchdog_check = FALSE;
1570 		}
1571 	}
1572 
1573 	return;
1574 }
1575 
1576 
1577 static void
ixv_ifstop(struct ifnet * ifp,int disable)1578 ixv_ifstop(struct ifnet *ifp, int disable)
1579 {
1580 	struct adapter *adapter = ifp->if_softc;
1581 
1582 	IXV_CORE_LOCK(adapter);
1583 	ixv_stop(adapter);
1584 	IXV_CORE_UNLOCK(adapter);
1585 }
1586 
1587 /*********************************************************************
1588  *
1589  *  This routine disables all traffic on the adapter by issuing a
1590  *  global reset on the MAC and deallocates TX/RX buffers.
1591  *
1592  **********************************************************************/
1593 
1594 static void
ixv_stop(void * arg)1595 ixv_stop(void *arg)
1596 {
1597 	struct ifnet   *ifp;
1598 	struct adapter *adapter = arg;
1599 	struct ixgbe_hw *hw = &adapter->hw;
1600 	ifp = adapter->ifp;
1601 
1602 	KASSERT(mutex_owned(&adapter->core_mtx));
1603 
1604 	INIT_DEBUGOUT("ixv_stop: begin\n");
1605 	ixv_disable_intr(adapter);
1606 
1607 	/* Tell the stack that the interface is no longer active */
1608 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1609 
1610 	ixgbe_reset_hw(hw);
1611 	adapter->hw.adapter_stopped = FALSE;
1612 	ixgbe_stop_adapter(hw);
1613 	callout_stop(&adapter->timer);
1614 
1615 	/* reprogram the RAR[0] in case user changed it. */
1616 	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1617 
1618 	return;
1619 }
1620 
1621 
1622 /*********************************************************************
1623  *
1624  *  Determine hardware revision.
1625  *
1626  **********************************************************************/
1627 static void
ixv_identify_hardware(struct adapter * adapter)1628 ixv_identify_hardware(struct adapter *adapter)
1629 {
1630 	u16		pci_cmd_word;
1631 	pcitag_t tag;
1632 	pci_chipset_tag_t pc;
1633 	pcireg_t subid, id;
1634 	struct ixgbe_hw *hw = &adapter->hw;
1635 
1636 	pc = adapter->osdep.pc;
1637 	tag = adapter->osdep.tag;
1638 
1639 	/*
1640 	** Make sure BUSMASTER is set, on a VM under
1641 	** KVM it may not be and will break things.
1642 	*/
1643 	pci_cmd_word = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
1644 	if (!(pci_cmd_word & PCI_COMMAND_MASTER_ENABLE)) {
1645 		INIT_DEBUGOUT("Bus Master bit was not set!\n");
1646 		pci_cmd_word |= PCI_COMMAND_MASTER_ENABLE;
1647 		pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, pci_cmd_word);
1648 	}
1649 
1650 	id = pci_conf_read(pc, tag, PCI_ID_REG);
1651 	subid = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
1652 
1653 	/* Save off the information about this board */
1654 	hw->vendor_id = PCI_VENDOR(id);
1655 	hw->device_id = PCI_PRODUCT(id);
1656 	hw->revision_id = PCI_REVISION(pci_conf_read(pc, tag, PCI_CLASS_REG));
1657 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
1658 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
1659 
1660 	return;
1661 }
1662 
1663 /*********************************************************************
1664  *
1665  *  Setup MSIX Interrupt resources and handlers
1666  *
1667  **********************************************************************/
1668 static int
ixv_allocate_msix(struct adapter * adapter,const struct pci_attach_args * pa)1669 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
1670 {
1671 #if !defined(NETBSD_MSI_OR_MSIX)
1672 	return 0;
1673 #else
1674 	device_t        dev = adapter->dev;
1675 	struct ix_queue *que = adapter->queues;
1676 	int 		error, rid, vector = 0;
1677 	pci_chipset_tag_t pc;
1678 	pcitag_t	tag;
1679 	char intrbuf[PCI_INTRSTR_LEN];
1680 	const char	*intrstr = NULL;
1681 	kcpuset_t	*affinity;
1682 	int		cpu_id = 0;
1683 
1684 	pc = adapter->osdep.pc;
1685 	tag = adapter->osdep.tag;
1686 
1687 	if (pci_msix_alloc_exact(pa,
1688 		&adapter->osdep.intrs, IXG_MSIX_NINTR) != 0)
1689 		return (ENXIO);
1690 
1691 	kcpuset_create(&affinity, false);
1692 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
1693 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
1694 		    sizeof(intrbuf));
1695 #ifdef IXV_MPSAFE
1696 		pci_intr_setattr(pc, adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
1697 		    true);
1698 #endif
1699 		/* Set the handler function */
1700 		adapter->osdep.ihs[i] = pci_intr_establish(pc,
1701 		    adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que);
1702 		if (adapter->osdep.ihs[i] == NULL) {
1703 			que->res = NULL;
1704 			aprint_error_dev(dev,
1705 			    "Failed to register QUE handler");
1706 			kcpuset_destroy(affinity);
1707 			return (ENXIO);
1708 		}
1709 		que->msix = vector;
1710         	adapter->que_mask |= (u64)(1 << que->msix);
1711 
1712 		cpu_id = i;
1713 		/* Round-robin affinity */
1714 		kcpuset_zero(affinity);
1715 		kcpuset_set(affinity, cpu_id % ncpu);
1716 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
1717 		    NULL);
1718 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
1719 		    intrstr);
1720 		if (error == 0)
1721 			aprint_normal(", bound queue %d to cpu %d\n",
1722 			    i, cpu_id);
1723 		else
1724 			aprint_normal("\n");
1725 
1726 		que->que_si = softint_establish(SOFTINT_NET, ixv_handle_que,
1727 		    que);
1728 		if (que->que_si == NULL) {
1729 			aprint_error_dev(dev,
1730 			    "could not establish software interrupt\n");
1731 		}
1732 	}
1733 
1734 	/* and Mailbox */
1735 	cpu_id++;
1736 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
1737 	    sizeof(intrbuf));
1738 #ifdef IXG_MPSAFE
1739 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE, true);
1740 #endif
1741 	/* Set the mbx handler function */
1742 	adapter->osdep.ihs[vector] = pci_intr_establish(pc,
1743 	    adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter);
1744 	if (adapter->osdep.ihs[vector] == NULL) {
1745 		adapter->res = NULL;
1746 		aprint_error_dev(dev, "Failed to register LINK handler\n");
1747 		kcpuset_destroy(affinity);
1748 		return (ENXIO);
1749 	}
1750 	/* Round-robin affinity */
1751 	kcpuset_zero(affinity);
1752 	kcpuset_set(affinity, cpu_id % ncpu);
1753 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
1754 
1755 	aprint_normal_dev(dev,
1756 	    "for link, interrupting at %s, ", intrstr);
1757 	if (error == 0) {
1758 		aprint_normal("affinity to cpu %d\n", cpu_id);
1759 	}
1760 	adapter->mbxvec = vector;
1761 	/* Tasklets for Mailbox */
1762 	adapter->mbx_si = softint_establish(SOFTINT_NET, ixv_handle_mbx,
1763 	    adapter);
1764 	/*
1765 	** Due to a broken design QEMU will fail to properly
1766 	** enable the guest for MSIX unless the vectors in
1767 	** the table are all set up, so we must rewrite the
1768 	** ENABLE in the MSIX control register again at this
1769 	** point to cause it to successfully initialize us.
1770 	*/
1771 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1772 		int msix_ctrl;
1773 		pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
1774 		rid += PCI_MSIX_CTL;
1775 		msix_ctrl = pci_conf_read(pc, tag, rid);
1776 		msix_ctrl |= PCI_MSIX_CTL_ENABLE;
1777 		pci_conf_write(pc, tag, rid, msix_ctrl);
1778 	}
1779 
1780 	return (0);
1781 #endif
1782 }
1783 
1784 /*
1785  * Setup MSIX resources, note that the VF
1786  * device MUST use MSIX, there is no fallback.
1787  */
1788 static int
ixv_setup_msix(struct adapter * adapter)1789 ixv_setup_msix(struct adapter *adapter)
1790 {
1791 #if !defined(NETBSD_MSI_OR_MSIX)
1792 	return 0;
1793 #else
1794 	device_t dev = adapter->dev;
1795 	int want, msgs;
1796 
1797 	/*
1798 	** Want two vectors: one for a queue,
1799 	** plus an additional for mailbox.
1800 	*/
1801 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
1802 	if (msgs < IXG_MSIX_NINTR) {
1803 		aprint_error_dev(dev,"MSIX config error\n");
1804 		return (ENXIO);
1805 	}
1806 	want = MIN(msgs, IXG_MSIX_NINTR);
1807 
1808 	adapter->msix_mem = (void *)1; /* XXX */
1809 	aprint_normal_dev(dev,
1810 	    "Using MSIX interrupts with %d vectors\n", msgs);
1811 	return (want);
1812 #endif
1813 }
1814 
1815 
1816 static int
ixv_allocate_pci_resources(struct adapter * adapter,const struct pci_attach_args * pa)1817 ixv_allocate_pci_resources(struct adapter *adapter,
1818     const struct pci_attach_args *pa)
1819 {
1820 	pcireg_t	memtype;
1821 	device_t        dev = adapter->dev;
1822 	bus_addr_t addr;
1823 	int flags;
1824 
1825 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
1826 
1827 	switch (memtype) {
1828 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1829 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1830 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
1831 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
1832 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
1833 			goto map_err;
1834 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
1835 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
1836 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
1837 		}
1838 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
1839 		     adapter->osdep.mem_size, flags,
1840 		     &adapter->osdep.mem_bus_space_handle) != 0) {
1841 map_err:
1842 			adapter->osdep.mem_size = 0;
1843 			aprint_error_dev(dev, "unable to map BAR0\n");
1844 			return ENXIO;
1845 		}
1846 		break;
1847 	default:
1848 		aprint_error_dev(dev, "unexpected type on BAR0\n");
1849 		return ENXIO;
1850 	}
1851 
1852 	adapter->num_queues = 1;
1853 	adapter->hw.back = &adapter->osdep;
1854 
1855 	/*
1856 	** Now setup MSI/X, should
1857 	** return us the number of
1858 	** configured vectors.
1859 	*/
1860 	adapter->msix = ixv_setup_msix(adapter);
1861 	if (adapter->msix == ENXIO)
1862 		return (ENXIO);
1863 	else
1864 		return (0);
1865 }
1866 
1867 static void
ixv_free_pci_resources(struct adapter * adapter)1868 ixv_free_pci_resources(struct adapter * adapter)
1869 {
1870 #if !defined(NETBSD_MSI_OR_MSIX)
1871 #else
1872 	struct 		ix_queue *que = adapter->queues;
1873 	int		rid;
1874 
1875 	/*
1876 	**  Release all msix queue resources:
1877 	*/
1878 	for (int i = 0; i < adapter->num_queues; i++, que++) {
1879 		rid = que->msix + 1;
1880 		if (que->res != NULL)
1881 			pci_intr_disestablish(adapter->osdep.pc,
1882 			    adapter->osdep.ihs[i]);
1883 	}
1884 
1885 
1886 	/* Clean the Legacy or Link interrupt last */
1887 	if (adapter->mbxvec) /* we are doing MSIX */
1888 		rid = adapter->mbxvec + 1;
1889 	else
1890 		(adapter->msix != 0) ? (rid = 1):(rid = 0);
1891 
1892 	if (adapter->osdep.ihs[rid] != NULL)
1893 		pci_intr_disestablish(adapter->osdep.pc,
1894 		    adapter->osdep.ihs[rid]);
1895 	adapter->osdep.ihs[rid] = NULL;
1896 
1897 #if defined(NETBSD_MSI_OR_MSIX)
1898 	pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
1899 	    adapter->osdep.nintrs);
1900 #endif
1901 
1902 	if (adapter->osdep.mem_size != 0) {
1903 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
1904 		    adapter->osdep.mem_bus_space_handle,
1905 		    adapter->osdep.mem_size);
1906 	}
1907 
1908 #endif
1909 	return;
1910 }
1911 
1912 /*********************************************************************
1913  *
1914  *  Setup networking device structure and register an interface.
1915  *
1916  **********************************************************************/
1917 static void
ixv_setup_interface(device_t dev,struct adapter * adapter)1918 ixv_setup_interface(device_t dev, struct adapter *adapter)
1919 {
1920 	struct ethercom *ec = &adapter->osdep.ec;
1921 	struct ifnet   *ifp;
1922 
1923 	INIT_DEBUGOUT("ixv_setup_interface: begin");
1924 
1925 	ifp = adapter->ifp = &ec->ec_if;
1926 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1927 	ifp->if_baudrate = 1000000000;
1928 	ifp->if_init = ixv_init;
1929 	ifp->if_stop = ixv_ifstop;
1930 	ifp->if_softc = adapter;
1931 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1932 	ifp->if_ioctl = ixv_ioctl;
1933 #if __FreeBSD_version >= 800000
1934 	ifp->if_transmit = ixv_mq_start;
1935 	ifp->if_qflush = ixv_qflush;
1936 #else
1937 	ifp->if_start = ixv_start;
1938 #endif
1939 	ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
1940 
1941 	if_attach(ifp);
1942 	ether_ifattach(ifp, adapter->hw.mac.addr);
1943 	ether_set_ifflags_cb(ec, ixv_ifflags_cb);
1944 
1945 	adapter->max_frame_size =
1946 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1947 
1948 	/*
1949 	 * Tell the upper layer(s) we support long frames.
1950 	 */
1951 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1952 
1953 	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSOv4;
1954 	ifp->if_capenable = 0;
1955 
1956 	ec->ec_capabilities |= ETHERCAP_VLAN_HWCSUM;
1957 	ec->ec_capabilities |= ETHERCAP_JUMBO_MTU;
1958 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1959 	    		| ETHERCAP_VLAN_MTU;
1960 	ec->ec_capenable = ec->ec_capabilities;
1961 
1962 	/* Don't enable LRO by default */
1963 	ifp->if_capabilities |= IFCAP_LRO;
1964 
1965 	/*
1966 	** Dont turn this on by default, if vlans are
1967 	** created on another pseudo device (eg. lagg)
1968 	** then vlan events are not passed thru, breaking
1969 	** operation, but with HW FILTER off it works. If
1970 	** using vlans directly on the em driver you can
1971 	** enable this and get full hardware tag filtering.
1972 	*/
1973 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1974 
1975 	/*
1976 	 * Specify the media types supported by this adapter and register
1977 	 * callbacks to update media and link information
1978 	 */
1979 	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1980 		     ixv_media_status);
1981 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
1982 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1983 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1984 
1985 	return;
1986 }
1987 
1988 static void
ixv_config_link(struct adapter * adapter)1989 ixv_config_link(struct adapter *adapter)
1990 {
1991 	struct ixgbe_hw *hw = &adapter->hw;
1992 	u32	autoneg, err = 0;
1993 
1994 	if (hw->mac.ops.check_link)
1995 		err = hw->mac.ops.check_link(hw, &autoneg,
1996 		    &adapter->link_up, FALSE);
1997 	if (err)
1998 		goto out;
1999 
2000 	if (hw->mac.ops.setup_link)
2001                	err = hw->mac.ops.setup_link(hw,
2002 		    autoneg, adapter->link_up);
2003 out:
2004 	return;
2005 }
2006 
2007 /********************************************************************
2008  * Manage DMA'able memory.
2009  *******************************************************************/
2010 
2011 static int
ixv_dma_malloc(struct adapter * adapter,bus_size_t size,struct ixv_dma_alloc * dma,int mapflags)2012 ixv_dma_malloc(struct adapter *adapter, bus_size_t size,
2013 		struct ixv_dma_alloc *dma, int mapflags)
2014 {
2015 	device_t dev = adapter->dev;
2016 	int             r, rsegs;
2017 
2018 	r = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
2019 			       DBA_ALIGN, 0,	/* alignment, bounds */
2020 			       size,	/* maxsize */
2021 			       1,	/* nsegments */
2022 			       size,	/* maxsegsize */
2023 			       BUS_DMA_ALLOCNOW,	/* flags */
2024 			       &dma->dma_tag);
2025 	if (r != 0) {
2026 		aprint_error_dev(dev,
2027 		    "ixv_dma_malloc: bus_dma_tag_create failed; error %u\n", r);
2028 		goto fail_0;
2029 	}
2030 	r = bus_dmamem_alloc(dma->dma_tag->dt_dmat,
2031 		size,
2032 		dma->dma_tag->dt_alignment,
2033 		dma->dma_tag->dt_boundary,
2034 		&dma->dma_seg, 1, &rsegs, BUS_DMA_NOWAIT);
2035 	if (r != 0) {
2036 		aprint_error_dev(dev,
2037 		    "%s: bus_dmamem_alloc failed; error %u\n", __func__, r);
2038 		goto fail_1;
2039 	}
2040 
2041 	r = bus_dmamem_map(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs,
2042 	    size, &dma->dma_vaddr, BUS_DMA_NOWAIT);
2043 	if (r != 0) {
2044 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
2045 		    __func__, r);
2046 		goto fail_2;
2047 	}
2048 
2049 	r = ixgbe_dmamap_create(dma->dma_tag, 0, &dma->dma_map);
2050 	if (r != 0) {
2051 		aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
2052 		    __func__, r);
2053 		goto fail_3;
2054 	}
2055 
2056 	r = bus_dmamap_load(dma->dma_tag->dt_dmat, dma->dma_map, dma->dma_vaddr,
2057 			    size,
2058 			    NULL,
2059 			    mapflags | BUS_DMA_NOWAIT);
2060 	if (r != 0) {
2061 		aprint_error_dev(dev,"%s: bus_dmamap_load failed; error %u\n",
2062 		    __func__, r);
2063 		goto fail_4;
2064 	}
2065 	dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
2066 	dma->dma_size = size;
2067 	return 0;
2068 fail_4:
2069 	ixgbe_dmamap_destroy(dma->dma_tag, dma->dma_map);
2070 fail_3:
2071 	bus_dmamem_unmap(dma->dma_tag->dt_dmat, dma->dma_vaddr, size);
2072 fail_2:
2073 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs);
2074 fail_1:
2075 	ixgbe_dma_tag_destroy(dma->dma_tag);
2076 fail_0:
2077 	dma->dma_tag = NULL;
2078 	return (r);
2079 }
2080 
2081 static void
ixv_dma_free(struct adapter * adapter,struct ixv_dma_alloc * dma)2082 ixv_dma_free(struct adapter *adapter, struct ixv_dma_alloc *dma)
2083 {
2084 	bus_dmamap_sync(dma->dma_tag->dt_dmat, dma->dma_map, 0, dma->dma_size,
2085 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2086 	ixgbe_dmamap_unload(dma->dma_tag, dma->dma_map);
2087 	bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, 1);
2088 	ixgbe_dma_tag_destroy(dma->dma_tag);
2089 }
2090 
2091 
2092 /*********************************************************************
2093  *
2094  *  Allocate memory for the transmit and receive rings, and then
2095  *  the descriptors associated with each, called only once at attach.
2096  *
2097  **********************************************************************/
2098 static int
ixv_allocate_queues(struct adapter * adapter)2099 ixv_allocate_queues(struct adapter *adapter)
2100 {
2101 	device_t	dev = adapter->dev;
2102 	struct ix_queue	*que;
2103 	struct tx_ring	*txr;
2104 	struct rx_ring	*rxr;
2105 	int rsize, tsize, error = 0;
2106 	int txconf = 0, rxconf = 0;
2107 
2108         /* First allocate the top level queue structs */
2109         if (!(adapter->queues =
2110             (struct ix_queue *) malloc(sizeof(struct ix_queue) *
2111             adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2112                 aprint_error_dev(dev, "Unable to allocate queue memory\n");
2113                 error = ENOMEM;
2114                 goto fail;
2115         }
2116 
2117 	/* First allocate the TX ring struct memory */
2118 	if (!(adapter->tx_rings =
2119 	    (struct tx_ring *) malloc(sizeof(struct tx_ring) *
2120 	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2121 		aprint_error_dev(dev, "Unable to allocate TX ring memory\n");
2122 		error = ENOMEM;
2123 		goto tx_fail;
2124 	}
2125 
2126 	/* Next allocate the RX */
2127 	if (!(adapter->rx_rings =
2128 	    (struct rx_ring *) malloc(sizeof(struct rx_ring) *
2129 	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2130 		aprint_error_dev(dev, "Unable to allocate RX ring memory\n");
2131 		error = ENOMEM;
2132 		goto rx_fail;
2133 	}
2134 
2135 	/* For the ring itself */
2136 	tsize = roundup2(adapter->num_tx_desc *
2137 	    sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
2138 
2139 	/*
2140 	 * Now set up the TX queues, txconf is needed to handle the
2141 	 * possibility that things fail midcourse and we need to
2142 	 * undo memory gracefully
2143 	 */
2144 	for (int i = 0; i < adapter->num_queues; i++, txconf++) {
2145 		/* Set up some basics */
2146 		txr = &adapter->tx_rings[i];
2147 		txr->adapter = adapter;
2148 		txr->me = i;
2149 
2150 		/* Initialize the TX side lock */
2151 		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2152 		    device_xname(dev), txr->me);
2153 		mutex_init(&txr->tx_mtx, MUTEX_DEFAULT, IPL_NET);
2154 
2155 		if (ixv_dma_malloc(adapter, tsize,
2156 			&txr->txdma, BUS_DMA_NOWAIT)) {
2157 			aprint_error_dev(dev,
2158 			    "Unable to allocate TX Descriptor memory\n");
2159 			error = ENOMEM;
2160 			goto err_tx_desc;
2161 		}
2162 		txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2163 		bzero((void *)txr->tx_base, tsize);
2164 
2165         	/* Now allocate transmit buffers for the ring */
2166         	if (ixv_allocate_transmit_buffers(txr)) {
2167 			aprint_error_dev(dev,
2168 			    "Critical Failure setting up transmit buffers\n");
2169 			error = ENOMEM;
2170 			goto err_tx_desc;
2171         	}
2172 #if __FreeBSD_version >= 800000
2173 		/* Allocate a buf ring */
2174 		txr->br = buf_ring_alloc(IXV_BR_SIZE, M_DEVBUF,
2175 		    M_WAITOK, &txr->tx_mtx);
2176 		if (txr->br == NULL) {
2177 			aprint_error_dev(dev,
2178 			    "Critical Failure setting up buf ring\n");
2179 			error = ENOMEM;
2180 			goto err_tx_desc;
2181 		}
2182 #endif
2183 	}
2184 
2185 	/*
2186 	 * Next the RX queues...
2187 	 */
2188 	rsize = roundup2(adapter->num_rx_desc *
2189 	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2190 	for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2191 		rxr = &adapter->rx_rings[i];
2192 		/* Set up some basics */
2193 		rxr->adapter = adapter;
2194 		rxr->me = i;
2195 
2196 		/* Initialize the RX side lock */
2197 		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2198 		    device_xname(dev), rxr->me);
2199 		mutex_init(&rxr->rx_mtx, MUTEX_DEFAULT, IPL_NET);
2200 
2201 		if (ixv_dma_malloc(adapter, rsize,
2202 			&rxr->rxdma, BUS_DMA_NOWAIT)) {
2203 			aprint_error_dev(dev,
2204 			    "Unable to allocate RxDescriptor memory\n");
2205 			error = ENOMEM;
2206 			goto err_rx_desc;
2207 		}
2208 		rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2209 		bzero((void *)rxr->rx_base, rsize);
2210 
2211         	/* Allocate receive buffers for the ring*/
2212 		if (ixv_allocate_receive_buffers(rxr)) {
2213 			aprint_error_dev(dev,
2214 			    "Critical Failure setting up receive buffers\n");
2215 			error = ENOMEM;
2216 			goto err_rx_desc;
2217 		}
2218 	}
2219 
2220 	/*
2221 	** Finally set up the queue holding structs
2222 	*/
2223 	for (int i = 0; i < adapter->num_queues; i++) {
2224 		que = &adapter->queues[i];
2225 		que->adapter = adapter;
2226 		que->txr = &adapter->tx_rings[i];
2227 		que->rxr = &adapter->rx_rings[i];
2228 	}
2229 
2230 	return (0);
2231 
2232 err_rx_desc:
2233 	for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2234 		ixv_dma_free(adapter, &rxr->rxdma);
2235 err_tx_desc:
2236 	for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2237 		ixv_dma_free(adapter, &txr->txdma);
2238 	free(adapter->rx_rings, M_DEVBUF);
2239 rx_fail:
2240 	free(adapter->tx_rings, M_DEVBUF);
2241 tx_fail:
2242 	free(adapter->queues, M_DEVBUF);
2243 fail:
2244 	return (error);
2245 }
2246 
2247 
2248 /*********************************************************************
2249  *
2250  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
2251  *  the information needed to transmit a packet on the wire. This is
2252  *  called only once at attach, setup is done every reset.
2253  *
2254  **********************************************************************/
2255 static int
ixv_allocate_transmit_buffers(struct tx_ring * txr)2256 ixv_allocate_transmit_buffers(struct tx_ring *txr)
2257 {
2258 	struct adapter *adapter = txr->adapter;
2259 	device_t dev = adapter->dev;
2260 	struct ixv_tx_buf *txbuf;
2261 	int error, i;
2262 
2263 	/*
2264 	 * Setup DMA descriptor areas.
2265 	 */
2266 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
2267 			       1, 0,		/* alignment, bounds */
2268 			       IXV_TSO_SIZE,		/* maxsize */
2269 			       32,			/* nsegments */
2270 			       PAGE_SIZE,		/* maxsegsize */
2271 			       0,			/* flags */
2272 			       &txr->txtag))) {
2273 		aprint_error_dev(dev,"Unable to allocate TX DMA tag\n");
2274 		goto fail;
2275 	}
2276 
2277 	if (!(txr->tx_buffers =
2278 	    (struct ixv_tx_buf *) malloc(sizeof(struct ixv_tx_buf) *
2279 	    adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2280 		aprint_error_dev(dev, "Unable to allocate tx_buffer memory\n");
2281 		error = ENOMEM;
2282 		goto fail;
2283 	}
2284 
2285         /* Create the descriptor buffer dma maps */
2286 	txbuf = txr->tx_buffers;
2287 	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2288 		error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map);
2289 		if (error != 0) {
2290 			aprint_error_dev(dev, "Unable to create TX DMA map\n");
2291 			goto fail;
2292 		}
2293 	}
2294 
2295 	return 0;
2296 fail:
2297 	/* We free all, it handles case where we are in the middle */
2298 	ixv_free_transmit_structures(adapter);
2299 	return (error);
2300 }
2301 
2302 /*********************************************************************
2303  *
2304  *  Initialize a transmit ring.
2305  *
2306  **********************************************************************/
2307 static void
ixv_setup_transmit_ring(struct tx_ring * txr)2308 ixv_setup_transmit_ring(struct tx_ring *txr)
2309 {
2310 	struct adapter *adapter = txr->adapter;
2311 	struct ixv_tx_buf *txbuf;
2312 	int i;
2313 
2314 	/* Clear the old ring contents */
2315 	IXV_TX_LOCK(txr);
2316 	bzero((void *)txr->tx_base,
2317 	      (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
2318 	/* Reset indices */
2319 	txr->next_avail_desc = 0;
2320 	txr->next_to_clean = 0;
2321 
2322 	/* Free any existing tx buffers. */
2323         txbuf = txr->tx_buffers;
2324 	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2325 		if (txbuf->m_head != NULL) {
2326 			bus_dmamap_sync(txr->txtag->dt_dmat, txbuf->map,
2327 			    0, txbuf->m_head->m_pkthdr.len,
2328 			    BUS_DMASYNC_POSTWRITE);
2329 			ixgbe_dmamap_unload(txr->txtag, txbuf->map);
2330 			m_freem(txbuf->m_head);
2331 			txbuf->m_head = NULL;
2332 		}
2333 		/* Clear the EOP index */
2334 		txbuf->eop_index = -1;
2335         }
2336 
2337 	/* Set number of descriptors available */
2338 	txr->tx_avail = adapter->num_tx_desc;
2339 
2340 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2341 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2342 	IXV_TX_UNLOCK(txr);
2343 }
2344 
2345 /*********************************************************************
2346  *
2347  *  Initialize all transmit rings.
2348  *
2349  **********************************************************************/
2350 static int
ixv_setup_transmit_structures(struct adapter * adapter)2351 ixv_setup_transmit_structures(struct adapter *adapter)
2352 {
2353 	struct tx_ring *txr = adapter->tx_rings;
2354 
2355 	for (int i = 0; i < adapter->num_queues; i++, txr++)
2356 		ixv_setup_transmit_ring(txr);
2357 
2358 	return (0);
2359 }
2360 
2361 /*********************************************************************
2362  *
2363  *  Enable transmit unit.
2364  *
2365  **********************************************************************/
2366 static void
ixv_initialize_transmit_units(struct adapter * adapter)2367 ixv_initialize_transmit_units(struct adapter *adapter)
2368 {
2369 	struct tx_ring	*txr = adapter->tx_rings;
2370 	struct ixgbe_hw	*hw = &adapter->hw;
2371 
2372 
2373 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
2374 		u64	tdba = txr->txdma.dma_paddr;
2375 		u32	txctrl, txdctl;
2376 
2377 		/* Set WTHRESH to 8, burst writeback */
2378 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
2379 		txdctl |= (8 << 16);
2380 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
2381 		/* Now enable */
2382 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
2383 		txdctl |= IXGBE_TXDCTL_ENABLE;
2384 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
2385 
2386 		/* Set the HW Tx Head and Tail indices */
2387 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
2388 	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
2389 
2390 		/* Setup Transmit Descriptor Cmd Settings */
2391 		txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2392 		txr->watchdog_check = FALSE;
2393 
2394 		/* Set Ring parameters */
2395 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
2396 		       (tdba & 0x00000000ffffffffULL));
2397 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
2398 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
2399 		    adapter->num_tx_desc *
2400 		    sizeof(struct ixgbe_legacy_tx_desc));
2401 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
2402 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2403 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
2404 		break;
2405 	}
2406 
2407 	return;
2408 }
2409 
2410 /*********************************************************************
2411  *
2412  *  Free all transmit rings.
2413  *
2414  **********************************************************************/
2415 static void
ixv_free_transmit_structures(struct adapter * adapter)2416 ixv_free_transmit_structures(struct adapter *adapter)
2417 {
2418 	struct tx_ring *txr = adapter->tx_rings;
2419 
2420 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
2421 		ixv_free_transmit_buffers(txr);
2422 		ixv_dma_free(adapter, &txr->txdma);
2423 		IXV_TX_LOCK_DESTROY(txr);
2424 	}
2425 	free(adapter->tx_rings, M_DEVBUF);
2426 }
2427 
2428 /*********************************************************************
2429  *
2430  *  Free transmit ring related data structures.
2431  *
2432  **********************************************************************/
2433 static void
ixv_free_transmit_buffers(struct tx_ring * txr)2434 ixv_free_transmit_buffers(struct tx_ring *txr)
2435 {
2436 	struct adapter *adapter = txr->adapter;
2437 	struct ixv_tx_buf *tx_buffer;
2438 	int             i;
2439 
2440 	INIT_DEBUGOUT("free_transmit_ring: begin");
2441 
2442 	if (txr->tx_buffers == NULL)
2443 		return;
2444 
2445 	tx_buffer = txr->tx_buffers;
2446 	for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2447 		if (tx_buffer->m_head != NULL) {
2448 			bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map,
2449 			    0, tx_buffer->m_head->m_pkthdr.len,
2450 			    BUS_DMASYNC_POSTWRITE);
2451 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
2452 			m_freem(tx_buffer->m_head);
2453 			tx_buffer->m_head = NULL;
2454 			if (tx_buffer->map != NULL) {
2455 				ixgbe_dmamap_destroy(txr->txtag,
2456 				    tx_buffer->map);
2457 				tx_buffer->map = NULL;
2458 			}
2459 		} else if (tx_buffer->map != NULL) {
2460 			ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
2461 			ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map);
2462 			tx_buffer->map = NULL;
2463 		}
2464 	}
2465 #if __FreeBSD_version >= 800000
2466 	if (txr->br != NULL)
2467 		buf_ring_free(txr->br, M_DEVBUF);
2468 #endif
2469 	if (txr->tx_buffers != NULL) {
2470 		free(txr->tx_buffers, M_DEVBUF);
2471 		txr->tx_buffers = NULL;
2472 	}
2473 	if (txr->txtag != NULL) {
2474 		ixgbe_dma_tag_destroy(txr->txtag);
2475 		txr->txtag = NULL;
2476 	}
2477 	return;
2478 }
2479 
2480 /*********************************************************************
2481  *
2482  *  Advanced Context Descriptor setup for VLAN or CSUM
2483  *
2484  **********************************************************************/
2485 
2486 static u32
ixv_tx_ctx_setup(struct tx_ring * txr,struct mbuf * mp)2487 ixv_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
2488 {
2489 	struct m_tag *mtag;
2490 	struct adapter *adapter = txr->adapter;
2491 	struct ethercom *ec = &adapter->osdep.ec;
2492 	struct ixgbe_adv_tx_context_desc *TXD;
2493 	struct ixv_tx_buf        *tx_buffer;
2494 	u32 olinfo = 0, vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2495 	struct ether_vlan_header *eh;
2496 	struct ip ip;
2497 	struct ip6_hdr ip6;
2498 	int  ehdrlen, ip_hlen = 0;
2499 	u16	etype;
2500 	u8	ipproto __diagused = 0;
2501 	bool	offload;
2502 	int ctxd = txr->next_avail_desc;
2503 	u16 vtag = 0;
2504 
2505 
2506 	offload = ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) != 0);
2507 
2508 	tx_buffer = &txr->tx_buffers[ctxd];
2509 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2510 
2511 	/*
2512 	** In advanced descriptors the vlan tag must
2513 	** be placed into the descriptor itself.
2514 	*/
2515 	if ((mtag = VLAN_OUTPUT_TAG(ec, mp)) != NULL) {
2516 		vtag = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2517 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2518 	} else if (!offload)
2519 		return 0;
2520 
2521 	/*
2522 	 * Determine where frame payload starts.
2523 	 * Jump over vlan headers if already present,
2524 	 * helpful for QinQ too.
2525 	 */
2526 	KASSERT(mp->m_len >= offsetof(struct ether_vlan_header, evl_tag));
2527 	eh = mtod(mp, struct ether_vlan_header *);
2528 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2529 		KASSERT(mp->m_len >= sizeof(struct ether_vlan_header));
2530 		etype = ntohs(eh->evl_proto);
2531 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2532 	} else {
2533 		etype = ntohs(eh->evl_encap_proto);
2534 		ehdrlen = ETHER_HDR_LEN;
2535 	}
2536 
2537 	/* Set the ether header length */
2538 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2539 
2540 	switch (etype) {
2541 	case ETHERTYPE_IP:
2542 		m_copydata(mp, ehdrlen, sizeof(ip), &ip);
2543 		ip_hlen = ip.ip_hl << 2;
2544 		ipproto = ip.ip_p;
2545 #if 0
2546 		ip.ip_sum = 0;
2547 		m_copyback(mp, ehdrlen, sizeof(ip), &ip);
2548 #else
2549 		KASSERT((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0 ||
2550 		    ip.ip_sum == 0);
2551 #endif
2552 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2553 		break;
2554 	case ETHERTYPE_IPV6:
2555 		m_copydata(mp, ehdrlen, sizeof(ip6), &ip6);
2556 		ip_hlen = sizeof(ip6);
2557 		ipproto = ip6.ip6_nxt;
2558 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
2559 		break;
2560 	default:
2561 		break;
2562 	}
2563 
2564 	if ((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0)
2565 		olinfo |= IXGBE_TXD_POPTS_IXSM << 8;
2566 
2567 	vlan_macip_lens |= ip_hlen;
2568 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2569 
2570 	if (mp->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_TCPv6)) {
2571 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2572 		olinfo |= IXGBE_TXD_POPTS_TXSM << 8;
2573 		KASSERT(ipproto == IPPROTO_TCP);
2574 	} else if (mp->m_pkthdr.csum_flags & (M_CSUM_UDPv4|M_CSUM_UDPv6)) {
2575 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
2576 		olinfo |= IXGBE_TXD_POPTS_TXSM << 8;
2577 		KASSERT(ipproto == IPPROTO_UDP);
2578 	}
2579 
2580 	/* Now copy bits into descriptor */
2581 	TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2582 	TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2583 	TXD->seqnum_seed = htole32(0);
2584 	TXD->mss_l4len_idx = htole32(0);
2585 
2586 	tx_buffer->m_head = NULL;
2587 	tx_buffer->eop_index = -1;
2588 
2589 	/* We've consumed the first desc, adjust counters */
2590 	if (++ctxd == adapter->num_tx_desc)
2591 		ctxd = 0;
2592 	txr->next_avail_desc = ctxd;
2593 	--txr->tx_avail;
2594 
2595         return olinfo;
2596 }
2597 
2598 /**********************************************************************
2599  *
2600  *  Setup work for hardware segmentation offload (TSO) on
2601  *  adapters using advanced tx descriptors
2602  *
2603  **********************************************************************/
2604 static bool
ixv_tso_setup(struct tx_ring * txr,struct mbuf * mp,u32 * paylen)2605 ixv_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
2606 {
2607 	struct m_tag *mtag;
2608 	struct adapter *adapter = txr->adapter;
2609 	struct ethercom *ec = &adapter->osdep.ec;
2610 	struct ixgbe_adv_tx_context_desc *TXD;
2611 	struct ixv_tx_buf        *tx_buffer;
2612 	u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2613 	u32 mss_l4len_idx = 0;
2614 	u16 vtag = 0;
2615 	int ctxd, ehdrlen,  hdrlen, ip_hlen, tcp_hlen;
2616 	struct ether_vlan_header *eh;
2617 	struct ip *ip;
2618 	struct tcphdr *th;
2619 
2620 
2621 	/*
2622 	 * Determine where frame payload starts.
2623 	 * Jump over vlan headers if already present
2624 	 */
2625 	eh = mtod(mp, struct ether_vlan_header *);
2626 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
2627 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2628 	else
2629 		ehdrlen = ETHER_HDR_LEN;
2630 
2631         /* Ensure we have at least the IP+TCP header in the first mbuf. */
2632         if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
2633 		return FALSE;
2634 
2635 	ctxd = txr->next_avail_desc;
2636 	tx_buffer = &txr->tx_buffers[ctxd];
2637 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2638 
2639 	ip = (struct ip *)(mp->m_data + ehdrlen);
2640 	if (ip->ip_p != IPPROTO_TCP)
2641 		return FALSE;   /* 0 */
2642 	ip->ip_sum = 0;
2643 	ip_hlen = ip->ip_hl << 2;
2644 	th = (struct tcphdr *)((char *)ip + ip_hlen);
2645 	/* XXX Educated guess: FreeBSD's in_pseudo == NetBSD's in_cksum_phdr */
2646 	th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2647 	    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2648 	tcp_hlen = th->th_off << 2;
2649 	hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2650 
2651 	/* This is used in the transmit desc in encap */
2652 	*paylen = mp->m_pkthdr.len - hdrlen;
2653 
2654 	/* VLAN MACLEN IPLEN */
2655 	if ((mtag = VLAN_OUTPUT_TAG(ec, mp)) != NULL) {
2656 		vtag = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2657                 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2658 	}
2659 
2660 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2661 	vlan_macip_lens |= ip_hlen;
2662 	TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2663 
2664 	/* ADV DTYPE TUCMD */
2665 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2666 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2667 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2668 	TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2669 
2670 
2671 	/* MSS L4LEN IDX */
2672 	mss_l4len_idx |= (mp->m_pkthdr.segsz << IXGBE_ADVTXD_MSS_SHIFT);
2673 	mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
2674 	TXD->mss_l4len_idx = htole32(mss_l4len_idx);
2675 
2676 	TXD->seqnum_seed = htole32(0);
2677 	tx_buffer->m_head = NULL;
2678 	tx_buffer->eop_index = -1;
2679 
2680 	if (++ctxd == adapter->num_tx_desc)
2681 		ctxd = 0;
2682 
2683 	txr->tx_avail--;
2684 	txr->next_avail_desc = ctxd;
2685 	return TRUE;
2686 }
2687 
2688 
2689 /**********************************************************************
2690  *
2691  *  Examine each tx_buffer in the used queue. If the hardware is done
2692  *  processing the packet then free associated resources. The
2693  *  tx_buffer is put back on the free queue.
2694  *
2695  **********************************************************************/
2696 static bool
ixv_txeof(struct tx_ring * txr)2697 ixv_txeof(struct tx_ring *txr)
2698 {
2699 	struct adapter	*adapter = txr->adapter;
2700 	struct ifnet	*ifp = adapter->ifp;
2701 	u32	first, last, done;
2702 	struct ixv_tx_buf *tx_buffer;
2703 	struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
2704 
2705 	KASSERT(mutex_owned(&txr->tx_mtx));
2706 
2707 	if (txr->tx_avail == adapter->num_tx_desc)
2708 		return false;
2709 
2710 	first = txr->next_to_clean;
2711 	tx_buffer = &txr->tx_buffers[first];
2712 	/* For cleanup we just use legacy struct */
2713 	tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2714 	last = tx_buffer->eop_index;
2715 	if (last == -1)
2716 		return false;
2717 	eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2718 
2719 	/*
2720 	** Get the index of the first descriptor
2721 	** BEYOND the EOP and call that 'done'.
2722 	** I do this so the comparison in the
2723 	** inner while loop below can be simple
2724 	*/
2725 	if (++last == adapter->num_tx_desc) last = 0;
2726 	done = last;
2727 
2728         ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2729             BUS_DMASYNC_POSTREAD);
2730 	/*
2731 	** Only the EOP descriptor of a packet now has the DD
2732 	** bit set, this is what we look for...
2733 	*/
2734 	while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
2735 		/* We clean the range of the packet */
2736 		while (first != done) {
2737 			tx_desc->upper.data = 0;
2738 			tx_desc->lower.data = 0;
2739 			tx_desc->buffer_addr = 0;
2740 			++txr->tx_avail;
2741 
2742 			if (tx_buffer->m_head) {
2743 				bus_dmamap_sync(txr->txtag->dt_dmat,
2744 				    tx_buffer->map,
2745 				    0, tx_buffer->m_head->m_pkthdr.len,
2746 				    BUS_DMASYNC_POSTWRITE);
2747 				ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
2748 				m_freem(tx_buffer->m_head);
2749 				tx_buffer->m_head = NULL;
2750 				tx_buffer->map = NULL;
2751 			}
2752 			tx_buffer->eop_index = -1;
2753 			getmicrotime(&txr->watchdog_time);
2754 
2755 			if (++first == adapter->num_tx_desc)
2756 				first = 0;
2757 
2758 			tx_buffer = &txr->tx_buffers[first];
2759 			tx_desc =
2760 			    (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2761 		}
2762 		++ifp->if_opackets;
2763 		/* See if there is more work now */
2764 		last = tx_buffer->eop_index;
2765 		if (last != -1) {
2766 			eop_desc =
2767 			    (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2768 			/* Get next done point */
2769 			if (++last == adapter->num_tx_desc) last = 0;
2770 			done = last;
2771 		} else
2772 			break;
2773 	}
2774 	ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2775 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2776 
2777 	txr->next_to_clean = first;
2778 
2779 	/*
2780 	 * If we have enough room, clear IFF_OACTIVE to tell the stack that
2781 	 * it is OK to send packets. If there are no pending descriptors,
2782 	 * clear the timeout. Otherwise, if some descriptors have been freed,
2783 	 * restart the timeout.
2784 	 */
2785 	if (txr->tx_avail > IXV_TX_CLEANUP_THRESHOLD) {
2786 		ifp->if_flags &= ~IFF_OACTIVE;
2787 		if (txr->tx_avail == adapter->num_tx_desc) {
2788 			txr->watchdog_check = FALSE;
2789 			return false;
2790 		}
2791 	}
2792 
2793 	return true;
2794 }
2795 
2796 /*********************************************************************
2797  *
2798  *  Refresh mbuf buffers for RX descriptor rings
2799  *   - now keeps its own state so discards due to resource
2800  *     exhaustion are unnecessary, if an mbuf cannot be obtained
2801  *     it just returns, keeping its placeholder, thus it can simply
2802  *     be recalled to try again.
2803  *
2804  **********************************************************************/
2805 static void
ixv_refresh_mbufs(struct rx_ring * rxr,int limit)2806 ixv_refresh_mbufs(struct rx_ring *rxr, int limit)
2807 {
2808 	struct adapter		*adapter = rxr->adapter;
2809 	struct ixv_rx_buf	*rxbuf;
2810 	struct mbuf		*mh, *mp;
2811 	int			i, j, error;
2812 	bool			refreshed = false;
2813 
2814 	i = j = rxr->next_to_refresh;
2815         /* Get the control variable, one beyond refresh point */
2816 	if (++j == adapter->num_rx_desc)
2817 		j = 0;
2818 	while (j != limit) {
2819 		rxbuf = &rxr->rx_buffers[i];
2820 		if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
2821 			mh = m_gethdr(M_NOWAIT, MT_DATA);
2822 			if (mh == NULL)
2823 				goto update;
2824 			mh->m_pkthdr.len = mh->m_len = MHLEN;
2825 			mh->m_flags |= M_PKTHDR;
2826 			m_adj(mh, ETHER_ALIGN);
2827 			/* Get the memory mapping */
2828 			error = bus_dmamap_load_mbuf(rxr->htag->dt_dmat,
2829 			    rxbuf->hmap, mh, BUS_DMA_NOWAIT);
2830 			if (error != 0) {
2831 				printf("GET BUF: dmamap load"
2832 				    " failure - %d\n", error);
2833 				m_free(mh);
2834 				goto update;
2835 			}
2836 			rxbuf->m_head = mh;
2837 			ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap,
2838 			    BUS_DMASYNC_PREREAD);
2839 			rxr->rx_base[i].read.hdr_addr =
2840 			    htole64(rxbuf->hmap->dm_segs[0].ds_addr);
2841 		}
2842 
2843 		if (rxbuf->m_pack == NULL) {
2844 			mp = ixgbe_getjcl(&adapter->jcl_head, M_NOWAIT,
2845 			    MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
2846 			if (mp == NULL) {
2847 				rxr->no_jmbuf.ev_count++;
2848 				goto update;
2849 			} else
2850 				mp = rxbuf->m_pack;
2851 
2852 			mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
2853 			/* Get the memory mapping */
2854 			error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
2855 			    rxbuf->pmap, mp, BUS_DMA_NOWAIT);
2856 			if (error != 0) {
2857 				printf("GET BUF: dmamap load"
2858 				    " failure - %d\n", error);
2859 				m_free(mp);
2860 				rxbuf->m_pack = NULL;
2861 				goto update;
2862 			}
2863 			rxbuf->m_pack = mp;
2864 			bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
2865 			    0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD);
2866 			rxr->rx_base[i].read.pkt_addr =
2867 			    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
2868 		}
2869 
2870 		refreshed = true;
2871 		rxr->next_to_refresh = i = j;
2872 		/* Calculate next index */
2873 		if (++j == adapter->num_rx_desc)
2874 			j = 0;
2875 	}
2876 update:
2877 	if (refreshed) /* update tail index */
2878 		IXGBE_WRITE_REG(&adapter->hw,
2879 		    IXGBE_VFRDT(rxr->me), rxr->next_to_refresh);
2880 	return;
2881 }
2882 
2883 /*********************************************************************
2884  *
2885  *  Allocate memory for rx_buffer structures. Since we use one
2886  *  rx_buffer per received packet, the maximum number of rx_buffer's
2887  *  that we'll need is equal to the number of receive descriptors
2888  *  that we've allocated.
2889  *
2890  **********************************************************************/
2891 static int
ixv_allocate_receive_buffers(struct rx_ring * rxr)2892 ixv_allocate_receive_buffers(struct rx_ring *rxr)
2893 {
2894 	struct	adapter 	*adapter = rxr->adapter;
2895 	device_t 		dev = adapter->dev;
2896 	struct ixv_rx_buf 	*rxbuf;
2897 	int             	i, bsize, error;
2898 
2899 	bsize = sizeof(struct ixv_rx_buf) * adapter->num_rx_desc;
2900 	if (!(rxr->rx_buffers =
2901 	    (struct ixv_rx_buf *) malloc(bsize,
2902 	    M_DEVBUF, M_NOWAIT | M_ZERO))) {
2903 		aprint_error_dev(dev, "Unable to allocate rx_buffer memory\n");
2904 		error = ENOMEM;
2905 		goto fail;
2906 	}
2907 
2908 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
2909 				   1, 0,	/* alignment, bounds */
2910 				   MSIZE,		/* maxsize */
2911 				   1,			/* nsegments */
2912 				   MSIZE,		/* maxsegsize */
2913 				   0,			/* flags */
2914 				   &rxr->htag))) {
2915 		aprint_error_dev(dev, "Unable to create RX DMA tag\n");
2916 		goto fail;
2917 	}
2918 
2919 	if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat,	/* parent */
2920 				   1, 0,	/* alignment, bounds */
2921 				   MJUMPAGESIZE,	/* maxsize */
2922 				   1,			/* nsegments */
2923 				   MJUMPAGESIZE,	/* maxsegsize */
2924 				   0,			/* flags */
2925 				   &rxr->ptag))) {
2926 		aprint_error_dev(dev, "Unable to create RX DMA tag\n");
2927 		goto fail;
2928 	}
2929 
2930 	for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
2931 		rxbuf = &rxr->rx_buffers[i];
2932 		error = ixgbe_dmamap_create(rxr->htag,
2933 		    BUS_DMA_NOWAIT, &rxbuf->hmap);
2934 		if (error) {
2935 			aprint_error_dev(dev, "Unable to create RX head map\n");
2936 			goto fail;
2937 		}
2938 		error = ixgbe_dmamap_create(rxr->ptag,
2939 		    BUS_DMA_NOWAIT, &rxbuf->pmap);
2940 		if (error) {
2941 			aprint_error_dev(dev, "Unable to create RX pkt map\n");
2942 			goto fail;
2943 		}
2944 	}
2945 
2946 	return (0);
2947 
2948 fail:
2949 	/* Frees all, but can handle partial completion */
2950 	ixv_free_receive_structures(adapter);
2951 	return (error);
2952 }
2953 
2954 static void
ixv_free_receive_ring(struct rx_ring * rxr)2955 ixv_free_receive_ring(struct rx_ring *rxr)
2956 {
2957 	struct  adapter         *adapter;
2958 	struct ixv_rx_buf       *rxbuf;
2959 	int i;
2960 
2961 	adapter = rxr->adapter;
2962 	for (i = 0; i < adapter->num_rx_desc; i++) {
2963 		rxbuf = &rxr->rx_buffers[i];
2964 		if (rxbuf->m_head != NULL) {
2965 			ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap,
2966 			    BUS_DMASYNC_POSTREAD);
2967 			ixgbe_dmamap_unload(rxr->htag, rxbuf->hmap);
2968 			rxbuf->m_head->m_flags |= M_PKTHDR;
2969 			m_freem(rxbuf->m_head);
2970 		}
2971 		if (rxbuf->m_pack != NULL) {
2972 			/* XXX not ixgbe_ ? */
2973 			bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
2974 			    0, rxbuf->m_pack->m_pkthdr.len,
2975 			    BUS_DMASYNC_POSTREAD);
2976 			ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
2977 			rxbuf->m_pack->m_flags |= M_PKTHDR;
2978 			m_freem(rxbuf->m_pack);
2979 		}
2980 		rxbuf->m_head = NULL;
2981 		rxbuf->m_pack = NULL;
2982 	}
2983 }
2984 
2985 
2986 /*********************************************************************
2987  *
2988  *  Initialize a receive ring and its buffers.
2989  *
2990  **********************************************************************/
2991 static int
ixv_setup_receive_ring(struct rx_ring * rxr)2992 ixv_setup_receive_ring(struct rx_ring *rxr)
2993 {
2994 	struct	adapter 	*adapter;
2995 	struct ixv_rx_buf	*rxbuf;
2996 #ifdef LRO
2997 	struct ifnet		*ifp;
2998 	struct lro_ctrl		*lro = &rxr->lro;
2999 #endif /* LRO */
3000 	int			rsize, error = 0;
3001 
3002 	adapter = rxr->adapter;
3003 #ifdef LRO
3004 	ifp = adapter->ifp;
3005 #endif /* LRO */
3006 
3007 	/* Clear the ring contents */
3008 	IXV_RX_LOCK(rxr);
3009 	rsize = roundup2(adapter->num_rx_desc *
3010 	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
3011 	bzero((void *)rxr->rx_base, rsize);
3012 
3013 	/* Free current RX buffer structs and their mbufs */
3014 	ixv_free_receive_ring(rxr);
3015 
3016 	IXV_RX_UNLOCK(rxr);
3017 
3018 	/* Now reinitialize our supply of jumbo mbufs.  The number
3019 	 * or size of jumbo mbufs may have changed.
3020 	 */
3021 	ixgbe_jcl_reinit(&adapter->jcl_head, rxr->ptag->dt_dmat,
3022 	    2 * adapter->num_rx_desc, adapter->rx_mbuf_sz);
3023 
3024 	IXV_RX_LOCK(rxr);
3025 
3026 	/* Configure header split? */
3027 	if (ixv_header_split)
3028 		rxr->hdr_split = TRUE;
3029 
3030 	/* Now replenish the mbufs */
3031 	for (int j = 0; j != adapter->num_rx_desc; ++j) {
3032 		struct mbuf	*mh, *mp;
3033 
3034 		rxbuf = &rxr->rx_buffers[j];
3035 		/*
3036 		** Dont allocate mbufs if not
3037 		** doing header split, its wasteful
3038 		*/
3039 		if (rxr->hdr_split == FALSE)
3040 			goto skip_head;
3041 
3042 		/* First the header */
3043 		rxbuf->m_head = m_gethdr(M_DONTWAIT, MT_DATA);
3044 		if (rxbuf->m_head == NULL) {
3045 			error = ENOBUFS;
3046 			goto fail;
3047 		}
3048 		m_adj(rxbuf->m_head, ETHER_ALIGN);
3049 		mh = rxbuf->m_head;
3050 		mh->m_len = mh->m_pkthdr.len = MHLEN;
3051 		mh->m_flags |= M_PKTHDR;
3052 		/* Get the memory mapping */
3053 		error = bus_dmamap_load_mbuf(rxr->htag->dt_dmat,
3054 		    rxbuf->hmap, rxbuf->m_head, BUS_DMA_NOWAIT);
3055 		if (error != 0) /* Nothing elegant to do here */
3056 			goto fail;
3057 		bus_dmamap_sync(rxr->htag->dt_dmat, rxbuf->hmap,
3058 		    0, mh->m_pkthdr.len, BUS_DMASYNC_PREREAD);
3059 		/* Update descriptor */
3060 		rxr->rx_base[j].read.hdr_addr =
3061 		    htole64(rxbuf->hmap->dm_segs[0].ds_addr);
3062 
3063 skip_head:
3064 		/* Now the payload cluster */
3065 		rxbuf->m_pack = ixgbe_getjcl(&adapter->jcl_head, M_DONTWAIT,
3066 		    MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
3067 		if (rxbuf->m_pack == NULL) {
3068 			error = ENOBUFS;
3069                         goto fail;
3070 		}
3071 		mp = rxbuf->m_pack;
3072 		mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
3073 		/* Get the memory mapping */
3074 		error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
3075 		    rxbuf->pmap, mp, BUS_DMA_NOWAIT);
3076 		if (error != 0)
3077                         goto fail;
3078 		bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
3079 		    0, adapter->rx_mbuf_sz, BUS_DMASYNC_PREREAD);
3080 		/* Update descriptor */
3081 		rxr->rx_base[j].read.pkt_addr =
3082 		    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
3083 	}
3084 
3085 
3086 	/* Setup our descriptor indices */
3087 	rxr->next_to_check = 0;
3088 	rxr->next_to_refresh = 0;
3089 	rxr->lro_enabled = FALSE;
3090 	rxr->rx_split_packets.ev_count = 0;
3091 	rxr->rx_bytes.ev_count = 0;
3092 	rxr->discard = FALSE;
3093 
3094 	ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3095 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3096 
3097 #ifdef LRO
3098 	/*
3099 	** Now set up the LRO interface:
3100 	*/
3101 	if (ifp->if_capenable & IFCAP_LRO) {
3102 		device_t dev = adapter->dev;
3103 		int err = tcp_lro_init(lro);
3104 		if (err) {
3105 			device_printf(dev, "LRO Initialization failed!\n");
3106 			goto fail;
3107 		}
3108 		INIT_DEBUGOUT("RX Soft LRO Initialized\n");
3109 		rxr->lro_enabled = TRUE;
3110 		lro->ifp = adapter->ifp;
3111 	}
3112 #endif /* LRO */
3113 
3114 	IXV_RX_UNLOCK(rxr);
3115 	return (0);
3116 
3117 fail:
3118 	ixv_free_receive_ring(rxr);
3119 	IXV_RX_UNLOCK(rxr);
3120 	return (error);
3121 }
3122 
3123 /*********************************************************************
3124  *
3125  *  Initialize all receive rings.
3126  *
3127  **********************************************************************/
3128 static int
ixv_setup_receive_structures(struct adapter * adapter)3129 ixv_setup_receive_structures(struct adapter *adapter)
3130 {
3131 	struct rx_ring *rxr = adapter->rx_rings;
3132 	int j;
3133 
3134 	for (j = 0; j < adapter->num_queues; j++, rxr++)
3135 		if (ixv_setup_receive_ring(rxr))
3136 			goto fail;
3137 
3138 	return (0);
3139 fail:
3140 	/*
3141 	 * Free RX buffers allocated so far, we will only handle
3142 	 * the rings that completed, the failing case will have
3143 	 * cleaned up for itself. 'j' failed, so its the terminus.
3144 	 */
3145 	for (int i = 0; i < j; ++i) {
3146 		rxr = &adapter->rx_rings[i];
3147 		ixv_free_receive_ring(rxr);
3148 	}
3149 
3150 	return (ENOBUFS);
3151 }
3152 
3153 /*********************************************************************
3154  *
3155  *  Setup receive registers and features.
3156  *
3157  **********************************************************************/
3158 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3159 
3160 static void
ixv_initialize_receive_units(struct adapter * adapter)3161 ixv_initialize_receive_units(struct adapter *adapter)
3162 {
3163 	int i;
3164 	struct	rx_ring	*rxr = adapter->rx_rings;
3165 	struct ixgbe_hw	*hw = &adapter->hw;
3166 	struct ifnet   *ifp = adapter->ifp;
3167 	u32		bufsz, fctrl, rxcsum, hlreg;
3168 
3169 
3170 	/* Enable broadcasts */
3171 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3172 	fctrl |= IXGBE_FCTRL_BAM;
3173 	fctrl |= IXGBE_FCTRL_DPF;
3174 	fctrl |= IXGBE_FCTRL_PMCF;
3175 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3176 
3177 	/* Set for Jumbo Frames? */
3178 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3179 	if (ifp->if_mtu > ETHERMTU) {
3180 		hlreg |= IXGBE_HLREG0_JUMBOEN;
3181 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3182 	} else {
3183 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3184 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3185 	}
3186 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3187 
3188 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
3189 		u64 rdba = rxr->rxdma.dma_paddr;
3190 		u32 reg, rxdctl;
3191 
3192 		/* Do the queue enabling first */
3193 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
3194 		rxdctl |= IXGBE_RXDCTL_ENABLE;
3195 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
3196 		for (int k = 0; k < 10; k++) {
3197 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
3198 			    IXGBE_RXDCTL_ENABLE)
3199 				break;
3200 			else
3201 				msec_delay(1);
3202 		}
3203 		wmb();
3204 
3205 		/* Setup the Base and Length of the Rx Descriptor Ring */
3206 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
3207 		    (rdba & 0x00000000ffffffffULL));
3208 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
3209 		    (rdba >> 32));
3210 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
3211 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3212 
3213 		/* Set up the SRRCTL register */
3214 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
3215 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3216 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3217 		reg |= bufsz;
3218 		if (rxr->hdr_split) {
3219 			/* Use a standard mbuf for the header */
3220 			reg |= ((IXV_RX_HDR <<
3221 			    IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
3222 			    & IXGBE_SRRCTL_BSIZEHDR_MASK);
3223 			reg |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3224 		} else
3225 			reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3226 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
3227 
3228 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
3229 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
3230 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
3231 		    adapter->num_rx_desc - 1);
3232 	}
3233 
3234 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3235 
3236 	if (ifp->if_capenable & IFCAP_RXCSUM)
3237 		rxcsum |= IXGBE_RXCSUM_PCSD;
3238 
3239 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3240 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
3241 
3242 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3243 
3244 	return;
3245 }
3246 
3247 /*********************************************************************
3248  *
3249  *  Free all receive rings.
3250  *
3251  **********************************************************************/
3252 static void
ixv_free_receive_structures(struct adapter * adapter)3253 ixv_free_receive_structures(struct adapter *adapter)
3254 {
3255 	struct rx_ring *rxr = adapter->rx_rings;
3256 
3257 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3258 #ifdef LRO
3259 		struct lro_ctrl		*lro = &rxr->lro;
3260 #endif /* LRO */
3261 		ixv_free_receive_buffers(rxr);
3262 #ifdef LRO
3263 		/* Free LRO memory */
3264 		tcp_lro_free(lro);
3265 #endif /* LRO */
3266 		/* Free the ring memory as well */
3267 		ixv_dma_free(adapter, &rxr->rxdma);
3268 		IXV_RX_LOCK_DESTROY(rxr);
3269 	}
3270 
3271 	free(adapter->rx_rings, M_DEVBUF);
3272 }
3273 
3274 
3275 /*********************************************************************
3276  *
3277  *  Free receive ring data structures
3278  *
3279  **********************************************************************/
3280 static void
ixv_free_receive_buffers(struct rx_ring * rxr)3281 ixv_free_receive_buffers(struct rx_ring *rxr)
3282 {
3283 	struct adapter		*adapter = rxr->adapter;
3284 	struct ixv_rx_buf	*rxbuf;
3285 
3286 	INIT_DEBUGOUT("free_receive_structures: begin");
3287 
3288 	/* Cleanup any existing buffers */
3289 	if (rxr->rx_buffers != NULL) {
3290 		for (int i = 0; i < adapter->num_rx_desc; i++) {
3291 			rxbuf = &rxr->rx_buffers[i];
3292 			if (rxbuf->m_head != NULL) {
3293 				ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap,
3294 				    BUS_DMASYNC_POSTREAD);
3295 				ixgbe_dmamap_unload(rxr->htag, rxbuf->hmap);
3296 				rxbuf->m_head->m_flags |= M_PKTHDR;
3297 				m_freem(rxbuf->m_head);
3298 			}
3299 			if (rxbuf->m_pack != NULL) {
3300 				/* XXX not ixgbe_* ? */
3301 				bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
3302 				    0, rxbuf->m_pack->m_pkthdr.len,
3303 				    BUS_DMASYNC_POSTREAD);
3304 				ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
3305 				rxbuf->m_pack->m_flags |= M_PKTHDR;
3306 				m_freem(rxbuf->m_pack);
3307 			}
3308 			rxbuf->m_head = NULL;
3309 			rxbuf->m_pack = NULL;
3310 			if (rxbuf->hmap != NULL) {
3311 				ixgbe_dmamap_destroy(rxr->htag, rxbuf->hmap);
3312 				rxbuf->hmap = NULL;
3313 			}
3314 			if (rxbuf->pmap != NULL) {
3315 				ixgbe_dmamap_destroy(rxr->ptag, rxbuf->pmap);
3316 				rxbuf->pmap = NULL;
3317 			}
3318 		}
3319 		if (rxr->rx_buffers != NULL) {
3320 			free(rxr->rx_buffers, M_DEVBUF);
3321 			rxr->rx_buffers = NULL;
3322 		}
3323 	}
3324 
3325 	if (rxr->htag != NULL) {
3326 		ixgbe_dma_tag_destroy(rxr->htag);
3327 		rxr->htag = NULL;
3328 	}
3329 	if (rxr->ptag != NULL) {
3330 		ixgbe_dma_tag_destroy(rxr->ptag);
3331 		rxr->ptag = NULL;
3332 	}
3333 
3334 	return;
3335 }
3336 
3337 static __inline void
ixv_rx_input(struct rx_ring * rxr,struct ifnet * ifp,struct mbuf * m,u32 ptype)3338 ixv_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
3339 {
3340 	int s;
3341 
3342 #ifdef LRO
3343 	struct adapter	*adapter = ifp->if_softc;
3344 	struct ethercom *ec = &adapter->osdep.ec;
3345 
3346         /*
3347          * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
3348          * should be computed by hardware. Also it should not have VLAN tag in
3349          * ethernet header.
3350          */
3351         if (rxr->lro_enabled &&
3352             (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0 &&
3353             (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3354             (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
3355             (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
3356             (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
3357             (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
3358                 /*
3359                  * Send to the stack if:
3360                  **  - LRO not enabled, or
3361                  **  - no LRO resources, or
3362                  **  - lro enqueue fails
3363                  */
3364                 if (rxr->lro.lro_cnt != 0)
3365                         if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
3366                                 return;
3367         }
3368 #endif /* LRO */
3369 
3370 	IXV_RX_UNLOCK(rxr);
3371 
3372 	s = splnet();
3373 	/* Pass this up to any BPF listeners. */
3374 	bpf_mtap(ifp, m);
3375 	if_percpuq_enqueue(ifp->if_percpuq, m);
3376 	splx(s);
3377 
3378 	IXV_RX_LOCK(rxr);
3379 }
3380 
3381 static __inline void
ixv_rx_discard(struct rx_ring * rxr,int i)3382 ixv_rx_discard(struct rx_ring *rxr, int i)
3383 {
3384 	struct ixv_rx_buf	*rbuf;
3385 
3386 	rbuf = &rxr->rx_buffers[i];
3387 	if (rbuf->fmp != NULL) {/* Partial chain ? */
3388 		rbuf->fmp->m_flags |= M_PKTHDR;
3389 		m_freem(rbuf->fmp);
3390 		rbuf->fmp = NULL;
3391 	}
3392 
3393 	/*
3394 	** With advanced descriptors the writeback
3395 	** clobbers the buffer addrs, so its easier
3396 	** to just free the existing mbufs and take
3397 	** the normal refresh path to get new buffers
3398 	** and mapping.
3399 	*/
3400 	if (rbuf->m_head) {
3401 		m_free(rbuf->m_head);
3402 		rbuf->m_head = NULL;
3403 	}
3404 
3405 	if (rbuf->m_pack) {
3406 		m_free(rbuf->m_pack);
3407 		rbuf->m_pack = NULL;
3408 	}
3409 
3410 	return;
3411 }
3412 
3413 
3414 /*********************************************************************
3415  *
3416  *  This routine executes in interrupt context. It replenishes
3417  *  the mbufs in the descriptor and sends data which has been
3418  *  dma'ed into host memory to upper layer.
3419  *
3420  *  We loop at most count times if count is > 0, or until done if
3421  *  count < 0.
3422  *
3423  *  Return TRUE for more work, FALSE for all clean.
3424  *********************************************************************/
3425 static bool
ixv_rxeof(struct ix_queue * que,int count)3426 ixv_rxeof(struct ix_queue *que, int count)
3427 {
3428 	struct adapter		*adapter = que->adapter;
3429 	struct rx_ring		*rxr = que->rxr;
3430 	struct ifnet		*ifp = adapter->ifp;
3431 #ifdef LRO
3432 	struct lro_ctrl		*lro = &rxr->lro;
3433 	struct lro_entry	*queued;
3434 #endif /* LRO */
3435 	int			i, nextp, processed = 0;
3436 	u32			staterr = 0;
3437 	union ixgbe_adv_rx_desc	*cur;
3438 	struct ixv_rx_buf	*rbuf, *nbuf;
3439 
3440 	IXV_RX_LOCK(rxr);
3441 
3442 	for (i = rxr->next_to_check; count != 0;) {
3443 		struct mbuf	*sendmp, *mh, *mp;
3444 		u32		ptype;
3445 		u16		hlen, plen, hdr, vtag;
3446 		bool		eop;
3447 
3448 		/* Sync the ring. */
3449 		ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3450 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3451 
3452 		cur = &rxr->rx_base[i];
3453 		staterr = le32toh(cur->wb.upper.status_error);
3454 
3455 		if ((staterr & IXGBE_RXD_STAT_DD) == 0)
3456 			break;
3457 		if ((ifp->if_flags & IFF_RUNNING) == 0)
3458 			break;
3459 
3460 		count--;
3461 		sendmp = NULL;
3462 		nbuf = NULL;
3463 		cur->wb.upper.status_error = 0;
3464 		rbuf = &rxr->rx_buffers[i];
3465 		mh = rbuf->m_head;
3466 		mp = rbuf->m_pack;
3467 
3468 		plen = le16toh(cur->wb.upper.length);
3469 		ptype = le32toh(cur->wb.lower.lo_dword.data) &
3470 		    IXGBE_RXDADV_PKTTYPE_MASK;
3471 		hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
3472 		vtag = le16toh(cur->wb.upper.vlan);
3473 		eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
3474 
3475 		/* Make sure all parts of a bad packet are discarded */
3476 		if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
3477 		    (rxr->discard)) {
3478 			ifp->if_ierrors++;
3479 			rxr->rx_discarded.ev_count++;
3480 			if (!eop)
3481 				rxr->discard = TRUE;
3482 			else
3483 				rxr->discard = FALSE;
3484 			ixv_rx_discard(rxr, i);
3485 			goto next_desc;
3486 		}
3487 
3488 		if (!eop) {
3489 			nextp = i + 1;
3490 			if (nextp == adapter->num_rx_desc)
3491 				nextp = 0;
3492 			nbuf = &rxr->rx_buffers[nextp];
3493 			prefetch(nbuf);
3494 		}
3495 		/*
3496 		** The header mbuf is ONLY used when header
3497 		** split is enabled, otherwise we get normal
3498 		** behavior, ie, both header and payload
3499 		** are DMA'd into the payload buffer.
3500 		**
3501 		** Rather than using the fmp/lmp global pointers
3502 		** we now keep the head of a packet chain in the
3503 		** buffer struct and pass this along from one
3504 		** descriptor to the next, until we get EOP.
3505 		*/
3506 		if (rxr->hdr_split && (rbuf->fmp == NULL)) {
3507 			/* This must be an initial descriptor */
3508 			hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
3509 			    IXGBE_RXDADV_HDRBUFLEN_SHIFT;
3510 			if (hlen > IXV_RX_HDR)
3511 				hlen = IXV_RX_HDR;
3512 			mh->m_len = hlen;
3513 			mh->m_flags |= M_PKTHDR;
3514 			mh->m_next = NULL;
3515 			mh->m_pkthdr.len = mh->m_len;
3516 			/* Null buf pointer so it is refreshed */
3517 			rbuf->m_head = NULL;
3518 			/*
3519 			** Check the payload length, this
3520 			** could be zero if its a small
3521 			** packet.
3522 			*/
3523 			if (plen > 0) {
3524 				mp->m_len = plen;
3525 				mp->m_next = NULL;
3526 				mp->m_flags &= ~M_PKTHDR;
3527 				mh->m_next = mp;
3528 				mh->m_pkthdr.len += mp->m_len;
3529 				/* Null buf pointer so it is refreshed */
3530 				rbuf->m_pack = NULL;
3531 				rxr->rx_split_packets.ev_count++;
3532 			}
3533 			/*
3534 			** Now create the forward
3535 			** chain so when complete
3536 			** we wont have to.
3537 			*/
3538                         if (eop == 0) {
3539 				/* stash the chain head */
3540                                 nbuf->fmp = mh;
3541 				/* Make forward chain */
3542                                 if (plen)
3543                                         mp->m_next = nbuf->m_pack;
3544                                 else
3545                                         mh->m_next = nbuf->m_pack;
3546                         } else {
3547 				/* Singlet, prepare to send */
3548                                 sendmp = mh;
3549                                 if (VLAN_ATTACHED(&adapter->osdep.ec) &&
3550 				  (staterr & IXGBE_RXD_STAT_VP)) {
3551 					VLAN_INPUT_TAG(ifp, sendmp, vtag,
3552 					    printf("%s: could not apply VLAN "
3553 					        "tag", __func__));
3554                                 }
3555                         }
3556 		} else {
3557 			/*
3558 			** Either no header split, or a
3559 			** secondary piece of a fragmented
3560 			** split packet.
3561 			*/
3562 			mp->m_len = plen;
3563 			/*
3564 			** See if there is a stored head
3565 			** that determines what we are
3566 			*/
3567 			sendmp = rbuf->fmp;
3568 			rbuf->m_pack = rbuf->fmp = NULL;
3569 
3570 			if (sendmp != NULL) /* secondary frag */
3571 				sendmp->m_pkthdr.len += mp->m_len;
3572 			else {
3573 				/* first desc of a non-ps chain */
3574 				sendmp = mp;
3575 				sendmp->m_flags |= M_PKTHDR;
3576 				sendmp->m_pkthdr.len = mp->m_len;
3577 				if (staterr & IXGBE_RXD_STAT_VP) {
3578 					/* XXX Do something reasonable on
3579 					 * error.
3580 					 */
3581 					VLAN_INPUT_TAG(ifp, sendmp, vtag,
3582 					    printf("%s: could not apply VLAN "
3583 					        "tag", __func__));
3584 				}
3585                         }
3586 			/* Pass the head pointer on */
3587 			if (eop == 0) {
3588 				nbuf->fmp = sendmp;
3589 				sendmp = NULL;
3590 				mp->m_next = nbuf->m_pack;
3591 			}
3592 		}
3593 		++processed;
3594 		/* Sending this frame? */
3595 		if (eop) {
3596 			m_set_rcvif(sendmp, ifp);
3597 			ifp->if_ipackets++;
3598 			rxr->rx_packets.ev_count++;
3599 			/* capture data for AIM */
3600 			rxr->bytes += sendmp->m_pkthdr.len;
3601 			rxr->rx_bytes.ev_count += sendmp->m_pkthdr.len;
3602 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
3603 				ixv_rx_checksum(staterr, sendmp, ptype,
3604 				   &adapter->stats);
3605 			}
3606 #if __FreeBSD_version >= 800000
3607 			sendmp->m_pkthdr.flowid = que->msix;
3608 			sendmp->m_flags |= M_FLOWID;
3609 #endif
3610 		}
3611 next_desc:
3612 		ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3613 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3614 
3615 		/* Advance our pointers to the next descriptor. */
3616 		if (++i == adapter->num_rx_desc)
3617 			i = 0;
3618 
3619 		/* Now send to the stack or do LRO */
3620 		if (sendmp != NULL)
3621 			ixv_rx_input(rxr, ifp, sendmp, ptype);
3622 
3623                /* Every 8 descriptors we go to refresh mbufs */
3624 		if (processed == 8) {
3625 			ixv_refresh_mbufs(rxr, i);
3626 			processed = 0;
3627 		}
3628 	}
3629 
3630 	/* Refresh any remaining buf structs */
3631 	if (ixv_rx_unrefreshed(rxr))
3632 		ixv_refresh_mbufs(rxr, i);
3633 
3634 	rxr->next_to_check = i;
3635 
3636 #ifdef LRO
3637 	/*
3638 	 * Flush any outstanding LRO work
3639 	 */
3640 	while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
3641 		SLIST_REMOVE_HEAD(&lro->lro_active, next);
3642 		tcp_lro_flush(lro, queued);
3643 	}
3644 #endif /* LRO */
3645 
3646 	IXV_RX_UNLOCK(rxr);
3647 
3648 	/*
3649 	** We still have cleaning to do?
3650 	** Schedule another interrupt if so.
3651 	*/
3652 	if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
3653 		ixv_rearm_queues(adapter, (u64)(1ULL << que->msix));
3654 		return true;
3655 	}
3656 
3657 	return false;
3658 }
3659 
3660 
3661 /*********************************************************************
3662  *
3663  *  Verify that the hardware indicated that the checksum is valid.
3664  *  Inform the stack about the status of checksum so that stack
3665  *  doesn't spend time verifying the checksum.
3666  *
3667  *********************************************************************/
3668 static void
ixv_rx_checksum(u32 staterr,struct mbuf * mp,u32 ptype,struct ixgbevf_hw_stats * stats)3669 ixv_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype,
3670     struct ixgbevf_hw_stats *stats)
3671 {
3672 	u16	status = (u16) staterr;
3673 	u8	errors = (u8) (staterr >> 24);
3674 #if 0
3675 	bool	sctp = FALSE;
3676 
3677 	if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3678 	    (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
3679 		sctp = TRUE;
3680 #endif
3681 	if (status & IXGBE_RXD_STAT_IPCS) {
3682 		stats->ipcs.ev_count++;
3683 		if (!(errors & IXGBE_RXD_ERR_IPE)) {
3684 			/* IP Checksum Good */
3685 			mp->m_pkthdr.csum_flags |= M_CSUM_IPv4;
3686 
3687 		} else {
3688 			stats->ipcs_bad.ev_count++;
3689 			mp->m_pkthdr.csum_flags = M_CSUM_IPv4|M_CSUM_IPv4_BAD;
3690 		}
3691 	}
3692 	if (status & IXGBE_RXD_STAT_L4CS) {
3693 		stats->l4cs.ev_count++;
3694 		int type = M_CSUM_TCPv4|M_CSUM_TCPv6|M_CSUM_UDPv4|M_CSUM_UDPv6;
3695 		if (!(errors & IXGBE_RXD_ERR_TCPE)) {
3696 			mp->m_pkthdr.csum_flags |= type;
3697 		} else {
3698 			stats->l4cs_bad.ev_count++;
3699 			mp->m_pkthdr.csum_flags |= type | M_CSUM_TCP_UDP_BAD;
3700 		}
3701 	}
3702 	return;
3703 }
3704 
3705 static void
ixv_setup_vlan_support(struct adapter * adapter)3706 ixv_setup_vlan_support(struct adapter *adapter)
3707 {
3708 	struct ixgbe_hw *hw = &adapter->hw;
3709 	u32		ctrl, vid, vfta, retry;
3710 
3711 
3712 	/*
3713 	** We get here thru init_locked, meaning
3714 	** a soft reset, this has already cleared
3715 	** the VFTA and other state, so if there
3716 	** have been no vlan's registered do nothing.
3717 	*/
3718 	if (adapter->num_vlans == 0)
3719 		return;
3720 
3721 	/* Enable the queues */
3722 	for (int i = 0; i < adapter->num_queues; i++) {
3723 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
3724 		ctrl |= IXGBE_RXDCTL_VME;
3725 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
3726 	}
3727 
3728 	/*
3729 	** A soft reset zero's out the VFTA, so
3730 	** we need to repopulate it now.
3731 	*/
3732 	for (int i = 0; i < VFTA_SIZE; i++) {
3733 		if (ixv_shadow_vfta[i] == 0)
3734 			continue;
3735 		vfta = ixv_shadow_vfta[i];
3736 		/*
3737 		** Reconstruct the vlan id's
3738 		** based on the bits set in each
3739 		** of the array ints.
3740 		*/
3741 		for ( int j = 0; j < 32; j++) {
3742 			retry = 0;
3743 			if ((vfta & (1 << j)) == 0)
3744 				continue;
3745 			vid = (i * 32) + j;
3746 			/* Call the shared code mailbox routine */
3747 			while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
3748 				if (++retry > 5)
3749 					break;
3750 			}
3751 		}
3752 	}
3753 }
3754 
3755 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
3756 /*
3757 ** This routine is run via an vlan config EVENT,
3758 ** it enables us to use the HW Filter table since
3759 ** we can get the vlan id. This just creates the
3760 ** entry in the soft version of the VFTA, init will
3761 ** repopulate the real table.
3762 */
3763 static void
3764 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3765 {
3766 	struct adapter	*adapter = ifp->if_softc;
3767 	u16		index, bit;
3768 
3769 	if (ifp->if_softc !=  arg)   /* Not our event */
3770 		return;
3771 
3772 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3773 		return;
3774 
3775 	IXV_CORE_LOCK(adapter);
3776 	index = (vtag >> 5) & 0x7F;
3777 	bit = vtag & 0x1F;
3778 	ixv_shadow_vfta[index] |= (1 << bit);
3779 	/* Re-init to load the changes */
3780 	ixv_init_locked(adapter);
3781 	IXV_CORE_UNLOCK(adapter);
3782 }
3783 
3784 /*
3785 ** This routine is run via an vlan
3786 ** unconfig EVENT, remove our entry
3787 ** in the soft vfta.
3788 */
3789 static void
3790 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3791 {
3792 	struct adapter	*adapter = ifp->if_softc;
3793 	u16		index, bit;
3794 
3795 	if (ifp->if_softc !=  arg)
3796 		return;
3797 
3798 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3799 		return;
3800 
3801 	IXV_CORE_LOCK(adapter);
3802 	index = (vtag >> 5) & 0x7F;
3803 	bit = vtag & 0x1F;
3804 	ixv_shadow_vfta[index] &= ~(1 << bit);
3805 	/* Re-init to load the changes */
3806 	ixv_init_locked(adapter);
3807 	IXV_CORE_UNLOCK(adapter);
3808 }
3809 #endif
3810 
3811 static void
ixv_enable_intr(struct adapter * adapter)3812 ixv_enable_intr(struct adapter *adapter)
3813 {
3814 	struct ixgbe_hw *hw = &adapter->hw;
3815 	struct ix_queue *que = adapter->queues;
3816 	u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3817 
3818 
3819 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
3820 
3821 	mask = IXGBE_EIMS_ENABLE_MASK;
3822 	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3823 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
3824 
3825         for (int i = 0; i < adapter->num_queues; i++, que++)
3826 		ixv_enable_queue(adapter, que->msix);
3827 
3828 	IXGBE_WRITE_FLUSH(hw);
3829 
3830 	return;
3831 }
3832 
3833 static void
ixv_disable_intr(struct adapter * adapter)3834 ixv_disable_intr(struct adapter *adapter)
3835 {
3836 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
3837 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
3838 	IXGBE_WRITE_FLUSH(&adapter->hw);
3839 	return;
3840 }
3841 
3842 /*
3843 ** Setup the correct IVAR register for a particular MSIX interrupt
3844 **  - entry is the register array entry
3845 **  - vector is the MSIX vector for this queue
3846 **  - type is RX/TX/MISC
3847 */
3848 static void
ixv_set_ivar(struct adapter * adapter,u8 entry,u8 vector,s8 type)3849 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3850 {
3851 	struct ixgbe_hw *hw = &adapter->hw;
3852 	u32 ivar, index;
3853 
3854 	vector |= IXGBE_IVAR_ALLOC_VAL;
3855 
3856 	if (type == -1) { /* MISC IVAR */
3857 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
3858 		ivar &= ~0xFF;
3859 		ivar |= vector;
3860 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
3861 	} else {	/* RX/TX IVARS */
3862 		index = (16 * (entry & 1)) + (8 * type);
3863 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
3864 		ivar &= ~(0xFF << index);
3865 		ivar |= (vector << index);
3866 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
3867 	}
3868 }
3869 
3870 static void
ixv_configure_ivars(struct adapter * adapter)3871 ixv_configure_ivars(struct adapter *adapter)
3872 {
3873 	struct  ix_queue *que = adapter->queues;
3874 
3875         for (int i = 0; i < adapter->num_queues; i++, que++) {
3876 		/* First the RX queue entry */
3877                 ixv_set_ivar(adapter, i, que->msix, 0);
3878 		/* ... and the TX */
3879 		ixv_set_ivar(adapter, i, que->msix, 1);
3880 		/* Set an initial value in EITR */
3881                 IXGBE_WRITE_REG(&adapter->hw,
3882                     IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
3883 	}
3884 
3885 	/* For the Link interrupt */
3886         ixv_set_ivar(adapter, 1, adapter->mbxvec, -1);
3887 }
3888 
3889 
3890 /*
3891 ** Tasklet handler for MSIX MBX interrupts
3892 **  - do outside interrupt since it might sleep
3893 */
3894 static void
ixv_handle_mbx(void * context)3895 ixv_handle_mbx(void *context)
3896 {
3897 	struct adapter  *adapter = context;
3898 
3899 	ixgbe_check_link(&adapter->hw,
3900 	    &adapter->link_speed, &adapter->link_up, 0);
3901 	ixv_update_link_status(adapter);
3902 }
3903 
3904 /*
3905 ** The VF stats registers never have a truely virgin
3906 ** starting point, so this routine tries to make an
3907 ** artificial one, marking ground zero on attach as
3908 ** it were.
3909 */
3910 static void
ixv_save_stats(struct adapter * adapter)3911 ixv_save_stats(struct adapter *adapter)
3912 {
3913 	if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
3914 		adapter->stats.saved_reset_vfgprc +=
3915 		    adapter->stats.vfgprc - adapter->stats.base_vfgprc;
3916 		adapter->stats.saved_reset_vfgptc +=
3917 		    adapter->stats.vfgptc - adapter->stats.base_vfgptc;
3918 		adapter->stats.saved_reset_vfgorc +=
3919 		    adapter->stats.vfgorc - adapter->stats.base_vfgorc;
3920 		adapter->stats.saved_reset_vfgotc +=
3921 		    adapter->stats.vfgotc - adapter->stats.base_vfgotc;
3922 		adapter->stats.saved_reset_vfmprc +=
3923 		    adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3924 	}
3925 }
3926 
3927 static void
ixv_init_stats(struct adapter * adapter)3928 ixv_init_stats(struct adapter *adapter)
3929 {
3930 	struct ixgbe_hw *hw = &adapter->hw;
3931 
3932 	adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
3933 	adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
3934 	adapter->stats.last_vfgorc |=
3935 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
3936 
3937 	adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
3938 	adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
3939 	adapter->stats.last_vfgotc |=
3940 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
3941 
3942 	adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
3943 
3944 	adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
3945 	adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
3946 	adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
3947 	adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
3948 	adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
3949 }
3950 
3951 #define UPDATE_STAT_32(reg, last, count)		\
3952 {							\
3953 	u32 current = IXGBE_READ_REG(hw, reg);		\
3954 	if (current < last)				\
3955 		count += 0x100000000LL;			\
3956 	last = current;					\
3957 	count &= 0xFFFFFFFF00000000LL;			\
3958 	count |= current;				\
3959 }
3960 
3961 #define UPDATE_STAT_36(lsb, msb, last, count) 		\
3962 {							\
3963 	u64 cur_lsb = IXGBE_READ_REG(hw, lsb);		\
3964 	u64 cur_msb = IXGBE_READ_REG(hw, msb);		\
3965 	u64 current = ((cur_msb << 32) | cur_lsb);	\
3966 	if (current < last)				\
3967 		count += 0x1000000000LL;		\
3968 	last = current;					\
3969 	count &= 0xFFFFFFF000000000LL;			\
3970 	count |= current;				\
3971 }
3972 
3973 /*
3974 ** ixv_update_stats - Update the board statistics counters.
3975 */
3976 void
ixv_update_stats(struct adapter * adapter)3977 ixv_update_stats(struct adapter *adapter)
3978 {
3979         struct ixgbe_hw *hw = &adapter->hw;
3980 
3981         UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
3982 	    adapter->stats.vfgprc);
3983         UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
3984 	    adapter->stats.vfgptc);
3985         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3986 	    adapter->stats.last_vfgorc, adapter->stats.vfgorc);
3987         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3988 	    adapter->stats.last_vfgotc, adapter->stats.vfgotc);
3989         UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
3990 	    adapter->stats.vfmprc);
3991 }
3992 
3993 /**********************************************************************
3994  *
3995  *  This routine is called only when ixgbe_display_debug_stats is enabled.
3996  *  This routine provides a way to take a look at important statistics
3997  *  maintained by the driver and hardware.
3998  *
3999  **********************************************************************/
4000 static void
ixv_print_hw_stats(struct adapter * adapter)4001 ixv_print_hw_stats(struct adapter * adapter)
4002 {
4003         device_t dev = adapter->dev;
4004 
4005         device_printf(dev,"Std Mbuf Failed = %"PRIu64"\n",
4006                adapter->mbuf_defrag_failed.ev_count);
4007         device_printf(dev,"Driver dropped packets = %"PRIu64"\n",
4008                adapter->dropped_pkts.ev_count);
4009         device_printf(dev, "watchdog timeouts = %"PRIu64"\n",
4010                adapter->watchdog_events.ev_count);
4011 
4012         device_printf(dev,"Good Packets Rcvd = %lld\n",
4013                (long long)adapter->stats.vfgprc);
4014         device_printf(dev,"Good Packets Xmtd = %lld\n",
4015                (long long)adapter->stats.vfgptc);
4016         device_printf(dev,"TSO Transmissions = %"PRIu64"\n",
4017                adapter->tso_tx.ev_count);
4018 
4019 }
4020 
4021 /**********************************************************************
4022  *
4023  *  This routine is called only when em_display_debug_stats is enabled.
4024  *  This routine provides a way to take a look at important statistics
4025  *  maintained by the driver and hardware.
4026  *
4027  **********************************************************************/
4028 static void
ixv_print_debug_info(struct adapter * adapter)4029 ixv_print_debug_info(struct adapter *adapter)
4030 {
4031         device_t dev = adapter->dev;
4032         struct ixgbe_hw         *hw = &adapter->hw;
4033         struct ix_queue         *que = adapter->queues;
4034         struct rx_ring          *rxr;
4035         struct tx_ring          *txr;
4036 #ifdef LRO
4037         struct lro_ctrl         *lro;
4038 #endif /* LRO */
4039 
4040         device_printf(dev,"Error Byte Count = %u \n",
4041             IXGBE_READ_REG(hw, IXGBE_ERRBC));
4042 
4043         for (int i = 0; i < adapter->num_queues; i++, que++) {
4044                 txr = que->txr;
4045                 rxr = que->rxr;
4046 #ifdef LRO
4047                 lro = &rxr->lro;
4048 #endif /* LRO */
4049                 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
4050                     que->msix, (long)que->irqs);
4051                 device_printf(dev,"RX(%d) Packets Received: %lld\n",
4052                     rxr->me, (long long)rxr->rx_packets.ev_count);
4053                 device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
4054                     rxr->me, (long long)rxr->rx_split_packets.ev_count);
4055                 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
4056                     rxr->me, (long)rxr->rx_bytes.ev_count);
4057 #ifdef LRO
4058                 device_printf(dev,"RX(%d) LRO Queued= %d\n",
4059                     rxr->me, lro->lro_queued);
4060                 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
4061                     rxr->me, lro->lro_flushed);
4062 #endif /* LRO */
4063                 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
4064                     txr->me, (long)txr->total_packets.ev_count);
4065                 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
4066                     txr->me, (long)txr->no_desc_avail.ev_count);
4067         }
4068 
4069         device_printf(dev,"MBX IRQ Handled: %lu\n",
4070             (long)adapter->mbx_irq.ev_count);
4071         return;
4072 }
4073 
4074 static int
ixv_sysctl_stats(SYSCTLFN_ARGS)4075 ixv_sysctl_stats(SYSCTLFN_ARGS)
4076 {
4077 	struct sysctlnode node;
4078 	int             error;
4079 	int		result;
4080 	struct adapter *adapter;
4081 
4082 	node = *rnode;
4083 	adapter = (struct adapter *)node.sysctl_data;
4084 	node.sysctl_data = &result;
4085 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
4086 	if (error != 0)
4087 		return error;
4088 
4089 	if (result == 1)
4090 		ixv_print_hw_stats(adapter);
4091 
4092 	return 0;
4093 }
4094 
4095 static int
ixv_sysctl_debug(SYSCTLFN_ARGS)4096 ixv_sysctl_debug(SYSCTLFN_ARGS)
4097 {
4098 	struct sysctlnode node;
4099 	int error, result;
4100 	struct adapter *adapter;
4101 
4102 	node = *rnode;
4103 	adapter = (struct adapter *)node.sysctl_data;
4104 	node.sysctl_data = &result;
4105 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
4106 
4107 	if (error)
4108 		return error;
4109 
4110 	if (result == 1)
4111 		ixv_print_debug_info(adapter);
4112 
4113 	return 0;
4114 }
4115 
4116 /*
4117 ** Set flow control using sysctl:
4118 ** Flow control values:
4119 ** 	0 - off
4120 **	1 - rx pause
4121 **	2 - tx pause
4122 **	3 - full
4123 */
4124 static int
ixv_set_flowcntl(SYSCTLFN_ARGS)4125 ixv_set_flowcntl(SYSCTLFN_ARGS)
4126 {
4127 	struct sysctlnode node;
4128 	int error;
4129 	struct adapter *adapter;
4130 
4131 	node = *rnode;
4132 	adapter = (struct adapter *)node.sysctl_data;
4133 	node.sysctl_data = &ixv_flow_control;
4134 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
4135 
4136 	if (error)
4137 		return (error);
4138 
4139 	switch (ixv_flow_control) {
4140 		case ixgbe_fc_rx_pause:
4141 		case ixgbe_fc_tx_pause:
4142 		case ixgbe_fc_full:
4143 			adapter->hw.fc.requested_mode = ixv_flow_control;
4144 			break;
4145 		case ixgbe_fc_none:
4146 		default:
4147 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
4148 	}
4149 
4150 	ixgbe_fc_enable(&adapter->hw);
4151 	return error;
4152 }
4153 
4154 const struct sysctlnode *
ixv_sysctl_instance(struct adapter * adapter)4155 ixv_sysctl_instance(struct adapter *adapter)
4156 {
4157 	const char *dvname;
4158 	struct sysctllog **log;
4159 	int rc;
4160 	const struct sysctlnode *rnode;
4161 
4162 	log = &adapter->sysctllog;
4163 	dvname = device_xname(adapter->dev);
4164 
4165 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
4166 	    0, CTLTYPE_NODE, dvname,
4167 	    SYSCTL_DESCR("ixv information and settings"),
4168 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
4169 		goto err;
4170 
4171 	return rnode;
4172 err:
4173 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
4174 	return NULL;
4175 }
4176 
4177 static void
ixv_add_rx_process_limit(struct adapter * adapter,const char * name,const char * description,int * limit,int value)4178 ixv_add_rx_process_limit(struct adapter *adapter, const char *name,
4179         const char *description, int *limit, int value)
4180 {
4181 	const struct sysctlnode *rnode, *cnode;
4182 	struct sysctllog **log = &adapter->sysctllog;
4183 
4184         *limit = value;
4185 
4186 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL)
4187 		aprint_error_dev(adapter->dev,
4188 		    "could not create sysctl root\n");
4189 	else if (sysctl_createv(log, 0, &rnode, &cnode,
4190 	    CTLFLAG_READWRITE,
4191 	    CTLTYPE_INT,
4192 	    name, SYSCTL_DESCR(description),
4193 	    NULL, 0, limit, 0,
4194 	    CTL_CREATE, CTL_EOL) != 0) {
4195 		aprint_error_dev(adapter->dev, "%s: could not create sysctl",
4196 		    __func__);
4197 	}
4198 }
4199 
4200