1 /*-
2 * Copyright (c) 2013 Tsubai Masanari
3 * Copyright (c) 2013 Bryan Venteicher <bryanv@FreeBSD.org>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 *
17 * $OpenBSD: src/sys/dev/pci/if_vmx.c,v 1.11 2013/06/22 00:28:10 uebayasi Exp $
18 * $FreeBSD: head/sys/dev/vmware/vmxnet3/if_vmx.c 318867 2017-05-25 10:49:56Z avg $
19 */
20
21 /* Driver for VMware vmxnet3 virtual ethernet devices. */
22
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/device.h>
26 #include <sys/eventhandler.h>
27 #include <sys/kernel.h>
28 #include <sys/endian.h>
29 #include <sys/sockio.h>
30 #include <sys/mbuf.h>
31 #include <sys/malloc.h>
32 #include <sys/module.h>
33 #include <sys/socket.h>
34 #include <sys/sysctl.h>
35 #include <sys/taskqueue.h>
36 #include <vm/vm.h>
37 #include <vm/pmap.h>
38
39 #include <net/ethernet.h>
40 #include <net/if.h>
41 #include <net/if_var.h>
42 #include <net/ifq_var.h>
43 #include <net/if_arp.h>
44 #include <net/if_dl.h>
45 #include <net/if_types.h>
46 #include <net/if_media.h>
47 #include <net/vlan/if_vlan_ether.h>
48 #include <net/vlan/if_vlan_var.h>
49
50 #include <net/bpf.h>
51
52 #include <netinet/in_systm.h>
53 #include <netinet/in.h>
54 #include <netinet/ip.h>
55 #include <netinet/ip6.h>
56 #include <netinet6/ip6_var.h>
57 #include <netinet/udp.h>
58 #include <netinet/tcp.h>
59
60 #include <sys/in_cksum.h>
61
62 #include <sys/bus.h>
63 #include <sys/rman.h>
64
65 #include <bus/pci/pcireg.h>
66 #include <bus/pci/pcivar.h>
67
68 #define VMXNET3_LEGACY_TX 1 /* XXX we need this at the moment */
69 #include "if_vmxreg.h"
70 #include "if_vmxvar.h"
71
72 #include "opt_inet.h"
73 #include "opt_inet6.h"
74
75 #ifdef VMXNET3_FAILPOINTS
76 #include <sys/fail.h>
77 static SYSCTL_NODE(DEBUG_FP, OID_AUTO, vmxnet3, CTLFLAG_RW, 0,
78 "vmxnet3 fail points");
79 #define VMXNET3_FP _debug_fail_point_vmxnet3
80 #endif
81
82 static int vmxnet3_probe(device_t);
83 static int vmxnet3_attach(device_t);
84 static int vmxnet3_detach(device_t);
85 static int vmxnet3_shutdown(device_t);
86
87 static int vmxnet3_alloc_resources(struct vmxnet3_softc *);
88 static void vmxnet3_free_resources(struct vmxnet3_softc *);
89 static int vmxnet3_check_version(struct vmxnet3_softc *);
90 static void vmxnet3_initial_config(struct vmxnet3_softc *);
91 static void vmxnet3_check_multiqueue(struct vmxnet3_softc *);
92
93 #ifdef __FreeBSD__
94 static int vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *);
95 static int vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *);
96 #else
97 static int vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *);
98 #endif
99 static int vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *);
100 static int vmxnet3_alloc_interrupt(struct vmxnet3_softc *, int, int,
101 struct vmxnet3_interrupt *);
102 static int vmxnet3_alloc_intr_resources(struct vmxnet3_softc *);
103 #ifdef __FreeBSD__
104 static int vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *);
105 #endif
106 static int vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *);
107 static int vmxnet3_setup_interrupts(struct vmxnet3_softc *);
108 static int vmxnet3_alloc_interrupts(struct vmxnet3_softc *);
109
110 static void vmxnet3_free_interrupt(struct vmxnet3_softc *,
111 struct vmxnet3_interrupt *);
112 static void vmxnet3_free_interrupts(struct vmxnet3_softc *);
113
114 #ifndef VMXNET3_LEGACY_TX
115 static int vmxnet3_alloc_taskqueue(struct vmxnet3_softc *);
116 static void vmxnet3_start_taskqueue(struct vmxnet3_softc *);
117 static void vmxnet3_drain_taskqueue(struct vmxnet3_softc *);
118 static void vmxnet3_free_taskqueue(struct vmxnet3_softc *);
119 #endif
120
121 static int vmxnet3_init_rxq(struct vmxnet3_softc *, int);
122 static int vmxnet3_init_txq(struct vmxnet3_softc *, int);
123 static int vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *);
124 static void vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *);
125 static void vmxnet3_destroy_txq(struct vmxnet3_txqueue *);
126 static void vmxnet3_free_rxtx_queues(struct vmxnet3_softc *);
127
128 static int vmxnet3_alloc_shared_data(struct vmxnet3_softc *);
129 static void vmxnet3_free_shared_data(struct vmxnet3_softc *);
130 static int vmxnet3_alloc_txq_data(struct vmxnet3_softc *);
131 static void vmxnet3_free_txq_data(struct vmxnet3_softc *);
132 static int vmxnet3_alloc_rxq_data(struct vmxnet3_softc *);
133 static void vmxnet3_free_rxq_data(struct vmxnet3_softc *);
134 static int vmxnet3_alloc_queue_data(struct vmxnet3_softc *);
135 static void vmxnet3_free_queue_data(struct vmxnet3_softc *);
136 static int vmxnet3_alloc_mcast_table(struct vmxnet3_softc *);
137 static void vmxnet3_init_shared_data(struct vmxnet3_softc *);
138 static void vmxnet3_init_hwassist(struct vmxnet3_softc *);
139 static void vmxnet3_reinit_interface(struct vmxnet3_softc *);
140 static void vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc *);
141 static void vmxnet3_reinit_shared_data(struct vmxnet3_softc *);
142 static int vmxnet3_alloc_data(struct vmxnet3_softc *);
143 static void vmxnet3_free_data(struct vmxnet3_softc *);
144 static int vmxnet3_setup_interface(struct vmxnet3_softc *);
145
146 static void vmxnet3_evintr(struct vmxnet3_softc *);
147 static void vmxnet3_txq_eof(struct vmxnet3_txqueue *);
148 static void vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *, struct mbuf *);
149 static int vmxnet3_newbuf(struct vmxnet3_softc *, struct vmxnet3_rxring *);
150 static void vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *,
151 struct vmxnet3_rxring *, int);
152 static void vmxnet3_rxq_eof(struct vmxnet3_rxqueue *);
153 static void vmxnet3_legacy_intr(void *);
154 #ifdef __FreeBSD__
155 static void vmxnet3_txq_intr(void *);
156 static void vmxnet3_rxq_intr(void *);
157 static void vmxnet3_event_intr(void *);
158 #endif
159
160 static void vmxnet3_txstop(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
161 static void vmxnet3_rxstop(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
162 static void vmxnet3_stop(struct vmxnet3_softc *);
163
164 static void vmxnet3_txinit(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
165 static int vmxnet3_rxinit(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
166 static int vmxnet3_reinit_queues(struct vmxnet3_softc *);
167 static int vmxnet3_enable_device(struct vmxnet3_softc *);
168 static void vmxnet3_reinit_rxfilters(struct vmxnet3_softc *);
169 static int vmxnet3_reinit(struct vmxnet3_softc *);
170 static void vmxnet3_init_locked(struct vmxnet3_softc *);
171 static void vmxnet3_init(void *);
172
173 static int vmxnet3_txq_offload_ctx(struct vmxnet3_txqueue *,struct mbuf *,
174 int *, int *, int *);
175 static int vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *, struct mbuf **,
176 bus_dmamap_t, bus_dma_segment_t [], int *);
177 static void vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *, bus_dmamap_t);
178 static int vmxnet3_txq_encap(struct vmxnet3_txqueue *, struct mbuf **);
179 #ifdef VMXNET3_LEGACY_TX
180 static void vmxnet3_start_locked(struct ifnet *);
181 static void vmxnet3_start(struct ifnet *, struct ifaltq_subque *);
182 #else
183 static int vmxnet3_txq_mq_start_locked(struct vmxnet3_txqueue *,
184 struct mbuf *);
185 static int vmxnet3_txq_mq_start(struct ifnet *, struct mbuf *);
186 static void vmxnet3_txq_tq_deferred(void *, int);
187 #endif
188 static void vmxnet3_txq_start(struct vmxnet3_txqueue *);
189 static void vmxnet3_tx_start_all(struct vmxnet3_softc *);
190
191 static void vmxnet3_update_vlan_filter(struct vmxnet3_softc *, int,
192 uint16_t);
193 static void vmxnet3_register_vlan(void *, struct ifnet *, uint16_t);
194 static void vmxnet3_unregister_vlan(void *, struct ifnet *, uint16_t);
195 static void vmxnet3_set_rxfilter(struct vmxnet3_softc *);
196 static int vmxnet3_change_mtu(struct vmxnet3_softc *, int);
197 static int vmxnet3_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
198
199 #ifndef VMXNET3_LEGACY_TX
200 static void vmxnet3_qflush(struct ifnet *);
201 #endif
202
203 static int vmxnet3_watchdog(struct vmxnet3_txqueue *);
204 static void vmxnet3_refresh_host_stats(struct vmxnet3_softc *);
205 static void vmxnet3_txq_accum_stats(struct vmxnet3_txqueue *,
206 struct vmxnet3_txq_stats *);
207 static void vmxnet3_rxq_accum_stats(struct vmxnet3_rxqueue *,
208 struct vmxnet3_rxq_stats *);
209 static void vmxnet3_tick(void *);
210 static void vmxnet3_link_status(struct vmxnet3_softc *);
211 static void vmxnet3_media_status(struct ifnet *, struct ifmediareq *);
212 static int vmxnet3_media_change(struct ifnet *);
213 static void vmxnet3_set_lladdr(struct vmxnet3_softc *);
214 static void vmxnet3_get_lladdr(struct vmxnet3_softc *);
215
216 static void vmxnet3_setup_txq_sysctl(struct vmxnet3_txqueue *,
217 struct sysctl_ctx_list *, struct sysctl_oid_list *);
218 static void vmxnet3_setup_rxq_sysctl(struct vmxnet3_rxqueue *,
219 struct sysctl_ctx_list *, struct sysctl_oid_list *);
220 static void vmxnet3_setup_queue_sysctl(struct vmxnet3_softc *,
221 struct sysctl_ctx_list *, struct sysctl_oid_list *);
222 static void vmxnet3_setup_sysctl(struct vmxnet3_softc *);
223
224 static void vmxnet3_write_bar0(struct vmxnet3_softc *, bus_size_t,
225 uint32_t);
226 static uint32_t vmxnet3_read_bar1(struct vmxnet3_softc *, bus_size_t);
227 static void vmxnet3_write_bar1(struct vmxnet3_softc *, bus_size_t,
228 uint32_t);
229 static void vmxnet3_write_cmd(struct vmxnet3_softc *, uint32_t);
230 static uint32_t vmxnet3_read_cmd(struct vmxnet3_softc *, uint32_t);
231
232 static void vmxnet3_enable_intr(struct vmxnet3_softc *, int);
233 static void vmxnet3_disable_intr(struct vmxnet3_softc *, int);
234 static void vmxnet3_enable_all_intrs(struct vmxnet3_softc *);
235 static void vmxnet3_disable_all_intrs(struct vmxnet3_softc *);
236
237 static int vmxnet3_dma_malloc(struct vmxnet3_softc *, bus_size_t,
238 bus_size_t, struct vmxnet3_dma_alloc *);
239 static void vmxnet3_dma_free(struct vmxnet3_softc *,
240 struct vmxnet3_dma_alloc *);
241 static int vmxnet3_tunable_int(struct vmxnet3_softc *,
242 const char *, int);
243
244 typedef enum {
245 VMXNET3_BARRIER_RD,
246 VMXNET3_BARRIER_WR,
247 VMXNET3_BARRIER_RDWR,
248 } vmxnet3_barrier_t;
249
250 static void vmxnet3_barrier(struct vmxnet3_softc *, vmxnet3_barrier_t);
251
252 /* Tunables. */
253 static int vmxnet3_mq_disable = 0;
254 TUNABLE_INT("hw.vmx.mq_disable", &vmxnet3_mq_disable);
255 static int vmxnet3_default_txnqueue = VMXNET3_DEF_TX_QUEUES;
256 TUNABLE_INT("hw.vmx.txnqueue", &vmxnet3_default_txnqueue);
257 static int vmxnet3_default_rxnqueue = VMXNET3_DEF_RX_QUEUES;
258 TUNABLE_INT("hw.vmx.rxnqueue", &vmxnet3_default_rxnqueue);
259 static int vmxnet3_default_txndesc = VMXNET3_DEF_TX_NDESC;
260 TUNABLE_INT("hw.vmx.txndesc", &vmxnet3_default_txndesc);
261 static int vmxnet3_default_rxndesc = VMXNET3_DEF_RX_NDESC;
262 TUNABLE_INT("hw.vmx.rxndesc", &vmxnet3_default_rxndesc);
263
264 static device_method_t vmxnet3_methods[] = {
265 /* Device interface. */
266 DEVMETHOD(device_probe, vmxnet3_probe),
267 DEVMETHOD(device_attach, vmxnet3_attach),
268 DEVMETHOD(device_detach, vmxnet3_detach),
269 DEVMETHOD(device_shutdown, vmxnet3_shutdown),
270
271 DEVMETHOD_END
272 };
273
274 static driver_t vmxnet3_driver = {
275 "vmx", vmxnet3_methods, sizeof(struct vmxnet3_softc)
276 };
277
278 static devclass_t vmxnet3_devclass;
279 DRIVER_MODULE(vmx, pci, vmxnet3_driver, vmxnet3_devclass, NULL, NULL);
280
281 MODULE_DEPEND(vmx, pci, 1, 1, 1);
282 MODULE_DEPEND(vmx, ether, 1, 1, 1);
283
284 #define VMXNET3_VMWARE_VENDOR_ID 0x15AD
285 #define VMXNET3_VMWARE_DEVICE_ID 0x07B0
286
287 static int
vmxnet3_probe(device_t dev)288 vmxnet3_probe(device_t dev)
289 {
290
291 if (pci_get_vendor(dev) == VMXNET3_VMWARE_VENDOR_ID &&
292 pci_get_device(dev) == VMXNET3_VMWARE_DEVICE_ID) {
293 device_set_desc(dev, "VMware VMXNET3 Ethernet Adapter");
294 return (BUS_PROBE_DEFAULT);
295 }
296
297 return (ENXIO);
298 }
299
300 static int
vmxnet3_attach(device_t dev)301 vmxnet3_attach(device_t dev)
302 {
303 struct vmxnet3_softc *sc;
304 int error;
305
306 sc = device_get_softc(dev);
307 sc->vmx_dev = dev;
308
309 pci_enable_busmaster(dev);
310
311 VMXNET3_CORE_LOCK_INIT(sc, device_get_nameunit(dev));
312 callout_init_lk(&sc->vmx_tick, &sc->vmx_lock);
313
314 vmxnet3_initial_config(sc);
315
316 error = vmxnet3_alloc_resources(sc);
317 if (error)
318 goto fail;
319
320 error = vmxnet3_check_version(sc);
321 if (error)
322 goto fail;
323
324 error = vmxnet3_alloc_rxtx_queues(sc);
325 if (error)
326 goto fail;
327
328 #ifndef VMXNET3_LEGACY_TX
329 error = vmxnet3_alloc_taskqueue(sc);
330 if (error)
331 goto fail;
332 #endif
333
334 error = vmxnet3_alloc_interrupts(sc);
335 if (error)
336 goto fail;
337
338 vmxnet3_check_multiqueue(sc);
339
340 error = vmxnet3_alloc_data(sc);
341 if (error)
342 goto fail;
343
344 error = vmxnet3_setup_interface(sc);
345 if (error)
346 goto fail;
347
348 error = vmxnet3_setup_interrupts(sc);
349 if (error) {
350 ether_ifdetach(sc->vmx_ifp);
351 device_printf(dev, "could not set up interrupt\n");
352 goto fail;
353 }
354
355 vmxnet3_setup_sysctl(sc);
356 #ifndef VMXNET3_LEGACY_TX
357 vmxnet3_start_taskqueue(sc);
358 #endif
359
360 fail:
361 if (error)
362 vmxnet3_detach(dev);
363
364 return (error);
365 }
366
367 static int
vmxnet3_detach(device_t dev)368 vmxnet3_detach(device_t dev)
369 {
370 struct vmxnet3_softc *sc;
371 struct ifnet *ifp;
372
373 sc = device_get_softc(dev);
374 ifp = sc->vmx_ifp;
375
376 if (device_is_attached(dev)) {
377 VMXNET3_CORE_LOCK(sc);
378 vmxnet3_stop(sc);
379 VMXNET3_CORE_UNLOCK(sc);
380
381 callout_terminate(&sc->vmx_tick);
382 #ifndef VMXNET3_LEGACY_TX
383 vmxnet3_drain_taskqueue(sc);
384 #endif
385
386 ether_ifdetach(ifp);
387 }
388
389 if (sc->vmx_vlan_attach != NULL) {
390 EVENTHANDLER_DEREGISTER(vlan_config, sc->vmx_vlan_attach);
391 sc->vmx_vlan_attach = NULL;
392 }
393 if (sc->vmx_vlan_detach != NULL) {
394 EVENTHANDLER_DEREGISTER(vlan_config, sc->vmx_vlan_detach);
395 sc->vmx_vlan_detach = NULL;
396 }
397
398 #ifndef VMXNET3_LEGACY_TX
399 vmxnet3_free_taskqueue(sc);
400 #endif
401 vmxnet3_free_interrupts(sc);
402
403 if (ifp != NULL) {
404 if_free(ifp);
405 sc->vmx_ifp = NULL;
406 }
407
408 ifmedia_removeall(&sc->vmx_media);
409
410 vmxnet3_free_data(sc);
411 vmxnet3_free_resources(sc);
412 vmxnet3_free_rxtx_queues(sc);
413
414 VMXNET3_CORE_LOCK_DESTROY(sc);
415
416 return (0);
417 }
418
419 static int
vmxnet3_shutdown(device_t dev)420 vmxnet3_shutdown(device_t dev)
421 {
422
423 return (0);
424 }
425
426 static int
vmxnet3_alloc_resources(struct vmxnet3_softc * sc)427 vmxnet3_alloc_resources(struct vmxnet3_softc *sc)
428 {
429 device_t dev;
430 int rid;
431
432 dev = sc->vmx_dev;
433
434 rid = PCIR_BAR(0);
435 sc->vmx_res0 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
436 RF_ACTIVE);
437 if (sc->vmx_res0 == NULL) {
438 device_printf(dev,
439 "could not map BAR0 memory\n");
440 return (ENXIO);
441 }
442
443 sc->vmx_iot0 = rman_get_bustag(sc->vmx_res0);
444 sc->vmx_ioh0 = rman_get_bushandle(sc->vmx_res0);
445
446 rid = PCIR_BAR(1);
447 sc->vmx_res1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
448 RF_ACTIVE);
449 if (sc->vmx_res1 == NULL) {
450 device_printf(dev,
451 "could not map BAR1 memory\n");
452 return (ENXIO);
453 }
454
455 sc->vmx_iot1 = rman_get_bustag(sc->vmx_res1);
456 sc->vmx_ioh1 = rman_get_bushandle(sc->vmx_res1);
457
458 if (pci_find_extcap(dev, PCIY_MSIX, NULL) == 0) {
459 rid = PCIR_BAR(2);
460 sc->vmx_msix_res = bus_alloc_resource_any(dev,
461 SYS_RES_MEMORY, &rid, RF_ACTIVE);
462 }
463
464 if (sc->vmx_msix_res == NULL)
465 sc->vmx_flags |= VMXNET3_FLAG_NO_MSIX;
466
467 return (0);
468 }
469
470 static void
vmxnet3_free_resources(struct vmxnet3_softc * sc)471 vmxnet3_free_resources(struct vmxnet3_softc *sc)
472 {
473 device_t dev;
474 int rid;
475
476 dev = sc->vmx_dev;
477
478 if (sc->vmx_res0 != NULL) {
479 rid = PCIR_BAR(0);
480 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->vmx_res0);
481 sc->vmx_res0 = NULL;
482 }
483
484 if (sc->vmx_res1 != NULL) {
485 rid = PCIR_BAR(1);
486 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->vmx_res1);
487 sc->vmx_res1 = NULL;
488 }
489
490 if (sc->vmx_msix_res != NULL) {
491 rid = PCIR_BAR(2);
492 bus_release_resource(dev, SYS_RES_MEMORY, rid,
493 sc->vmx_msix_res);
494 sc->vmx_msix_res = NULL;
495 }
496 }
497
498 static int
vmxnet3_check_version(struct vmxnet3_softc * sc)499 vmxnet3_check_version(struct vmxnet3_softc *sc)
500 {
501 device_t dev;
502 uint32_t version;
503
504 dev = sc->vmx_dev;
505
506 version = vmxnet3_read_bar1(sc, VMXNET3_BAR1_VRRS);
507 if ((version & 0x01) == 0) {
508 device_printf(dev, "unsupported hardware version %#x\n",
509 version);
510 return (ENOTSUP);
511 }
512 vmxnet3_write_bar1(sc, VMXNET3_BAR1_VRRS, 1);
513
514 version = vmxnet3_read_bar1(sc, VMXNET3_BAR1_UVRS);
515 if ((version & 0x01) == 0) {
516 device_printf(dev, "unsupported UPT version %#x\n", version);
517 return (ENOTSUP);
518 }
519 vmxnet3_write_bar1(sc, VMXNET3_BAR1_UVRS, 1);
520
521 return (0);
522 }
523
524 static int
trunc_powerof2(int val)525 trunc_powerof2(int val)
526 {
527
528 return (1U << (fls(val) - 1));
529 }
530
531 static void
vmxnet3_initial_config(struct vmxnet3_softc * sc)532 vmxnet3_initial_config(struct vmxnet3_softc *sc)
533 {
534 int nqueue, ndesc;
535
536 nqueue = vmxnet3_tunable_int(sc, "txnqueue", vmxnet3_default_txnqueue);
537 if (nqueue > VMXNET3_MAX_TX_QUEUES || nqueue < 1)
538 nqueue = VMXNET3_DEF_TX_QUEUES;
539 if (nqueue > ncpus)
540 nqueue = ncpus;
541 sc->vmx_max_ntxqueues = trunc_powerof2(nqueue);
542
543 nqueue = vmxnet3_tunable_int(sc, "rxnqueue", vmxnet3_default_rxnqueue);
544 if (nqueue > VMXNET3_MAX_RX_QUEUES || nqueue < 1)
545 nqueue = VMXNET3_DEF_RX_QUEUES;
546 if (nqueue > ncpus)
547 nqueue = ncpus;
548 sc->vmx_max_nrxqueues = trunc_powerof2(nqueue);
549
550 if (vmxnet3_tunable_int(sc, "mq_disable", vmxnet3_mq_disable)) {
551 sc->vmx_max_nrxqueues = 1;
552 sc->vmx_max_ntxqueues = 1;
553 }
554
555 ndesc = vmxnet3_tunable_int(sc, "txd", vmxnet3_default_txndesc);
556 if (ndesc > VMXNET3_MAX_TX_NDESC || ndesc < VMXNET3_MIN_TX_NDESC)
557 ndesc = VMXNET3_DEF_TX_NDESC;
558 if (ndesc & VMXNET3_MASK_TX_NDESC)
559 ndesc &= ~VMXNET3_MASK_TX_NDESC;
560 sc->vmx_ntxdescs = ndesc;
561
562 ndesc = vmxnet3_tunable_int(sc, "rxd", vmxnet3_default_rxndesc);
563 if (ndesc > VMXNET3_MAX_RX_NDESC || ndesc < VMXNET3_MIN_RX_NDESC)
564 ndesc = VMXNET3_DEF_RX_NDESC;
565 if (ndesc & VMXNET3_MASK_RX_NDESC)
566 ndesc &= ~VMXNET3_MASK_RX_NDESC;
567 sc->vmx_nrxdescs = ndesc;
568 sc->vmx_max_rxsegs = VMXNET3_MAX_RX_SEGS;
569 }
570
571 static void
vmxnet3_check_multiqueue(struct vmxnet3_softc * sc)572 vmxnet3_check_multiqueue(struct vmxnet3_softc *sc)
573 {
574
575 if (sc->vmx_intr_type != VMXNET3_IT_MSIX)
576 goto out;
577
578 /* BMV: Just use the maximum configured for now. */
579 sc->vmx_nrxqueues = sc->vmx_max_nrxqueues;
580 sc->vmx_ntxqueues = sc->vmx_max_ntxqueues;
581
582 if (sc->vmx_nrxqueues > 1)
583 sc->vmx_flags |= VMXNET3_FLAG_RSS;
584
585 return;
586
587 out:
588 sc->vmx_ntxqueues = 1;
589 sc->vmx_nrxqueues = 1;
590 }
591
592 #ifdef __FreeBSD__
593 static int
vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc * sc)594 vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *sc)
595 {
596 device_t dev;
597 int nmsix, cnt, required;
598
599 dev = sc->vmx_dev;
600
601 if (sc->vmx_flags & VMXNET3_FLAG_NO_MSIX)
602 return (1);
603
604 /* Allocate an additional vector for the events interrupt. */
605 required = sc->vmx_max_nrxqueues + sc->vmx_max_ntxqueues + 1;
606
607 nmsix = pci_msix_count(dev);
608 if (nmsix < required)
609 return (1);
610
611 cnt = required;
612 if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required) {
613 sc->vmx_nintrs = required;
614 return (0);
615 } else
616 pci_release_msi(dev);
617
618 /* BMV TODO Fallback to sharing MSIX vectors if possible. */
619
620 return (1);
621 }
622
623 static int
vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc * sc)624 vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *sc)
625 {
626 device_t dev;
627 int nmsi, cnt, required;
628
629 dev = sc->vmx_dev;
630 required = 1;
631
632 nmsi = pci_msi_count(dev);
633 if (nmsi < required)
634 return (1);
635
636 cnt = required;
637 if (pci_alloc_msi(dev, &cnt) == 0 && cnt >= required) {
638 sc->vmx_nintrs = 1;
639 return (0);
640 } else
641 pci_release_msi(dev);
642
643 return (1);
644 }
645 #else
646 static int
vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc * sc)647 vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *sc)
648 {
649 int irq_flags, rid;
650 int enable = 1;
651
652 sc->vmx_irq_type = pci_alloc_1intr(sc->vmx_dev, enable, &rid,
653 &irq_flags);
654 sc->vmx_irq_flags = irq_flags;
655 sc->vmx_nintrs = 1;
656 return (0);
657 }
658 #endif
659
660 static int
vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc * sc)661 vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *sc)
662 {
663
664 sc->vmx_nintrs = 1;
665 return (0);
666 }
667
668 static int
vmxnet3_alloc_interrupt(struct vmxnet3_softc * sc,int rid,int flags,struct vmxnet3_interrupt * intr)669 vmxnet3_alloc_interrupt(struct vmxnet3_softc *sc, int rid, int flags,
670 struct vmxnet3_interrupt *intr)
671 {
672 struct resource *irq;
673
674 irq = bus_alloc_resource_any(sc->vmx_dev, SYS_RES_IRQ, &rid,
675 sc->vmx_irq_flags);
676 if (irq == NULL)
677 return (ENXIO);
678
679 intr->vmxi_irq = irq;
680 intr->vmxi_rid = rid;
681
682 return (0);
683 }
684
685 static int
vmxnet3_alloc_intr_resources(struct vmxnet3_softc * sc)686 vmxnet3_alloc_intr_resources(struct vmxnet3_softc *sc)
687 {
688 int i, rid, flags, error;
689
690 rid = 0;
691 flags = RF_ACTIVE;
692
693 if (sc->vmx_intr_type == VMXNET3_IT_LEGACY)
694 flags |= RF_SHAREABLE;
695 else
696 rid = 1;
697
698 for (i = 0; i < sc->vmx_nintrs; i++, rid++) {
699 error = vmxnet3_alloc_interrupt(sc, rid, flags,
700 &sc->vmx_intrs[i]);
701 if (error)
702 return (error);
703 }
704
705 return (0);
706 }
707
708 #ifdef __FreeBSD__
709 static int
vmxnet3_setup_msix_interrupts(struct vmxnet3_softc * sc)710 vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *sc)
711 {
712 device_t dev;
713 struct vmxnet3_txqueue *txq;
714 struct vmxnet3_rxqueue *rxq;
715 struct vmxnet3_interrupt *intr;
716 int i, error;
717
718 dev = sc->vmx_dev;
719 intr = &sc->vmx_intrs[0];
720
721 for (i = 0; i < sc->vmx_ntxqueues; i++, intr++) {
722 txq = &sc->vmx_txq[i];
723 error = bus_setup_intr(dev, intr->vmxi_irq, INTR_MPSAFE,
724 vmxnet3_txq_intr, txq, &intr->vmxi_handler, NULL);
725 if (error)
726 return (error);
727 bus_describe_intr(dev, intr->vmxi_irq, intr->vmxi_handler,
728 "tq%d", i);
729 txq->vxtxq_intr_idx = intr->vmxi_rid - 1;
730 }
731
732 for (i = 0; i < sc->vmx_nrxqueues; i++, intr++) {
733 rxq = &sc->vmx_rxq[i];
734 error = bus_setup_intr(dev, intr->vmxi_irq, INTR_MPSAFE,
735 vmxnet3_rxq_intr, rxq, &intr->vmxi_handler, NULL);
736 if (error)
737 return (error);
738 bus_describe_intr(dev, intr->vmxi_irq, intr->vmxi_handler,
739 "rq%d", i);
740 rxq->vxrxq_intr_idx = intr->vmxi_rid - 1;
741 }
742
743 error = bus_setup_intr(dev, intr->vmxi_irq, INTR_MPSAFE,
744 vmxnet3_event_intr, sc, &intr->vmxi_handler, NULL);
745 if (error)
746 return (error);
747 bus_describe_intr(dev, intr->vmxi_irq, intr->vmxi_handler, "event");
748 sc->vmx_event_intr_idx = intr->vmxi_rid - 1;
749
750 return (0);
751 }
752 #endif
753
754 static int
vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc * sc)755 vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *sc)
756 {
757 struct vmxnet3_interrupt *intr;
758 int i, error;
759
760 intr = &sc->vmx_intrs[0];
761 error = bus_setup_intr(sc->vmx_dev, intr->vmxi_irq,
762 INTR_MPSAFE, vmxnet3_legacy_intr, sc,
763 &intr->vmxi_handler, NULL);
764
765 for (i = 0; i < sc->vmx_ntxqueues; i++)
766 sc->vmx_txq[i].vxtxq_intr_idx = 0;
767 for (i = 0; i < sc->vmx_nrxqueues; i++)
768 sc->vmx_rxq[i].vxrxq_intr_idx = 0;
769 sc->vmx_event_intr_idx = 0;
770
771 return (error);
772 }
773
774 static void
vmxnet3_set_interrupt_idx(struct vmxnet3_softc * sc)775 vmxnet3_set_interrupt_idx(struct vmxnet3_softc *sc)
776 {
777 struct vmxnet3_txqueue *txq;
778 struct vmxnet3_txq_shared *txs;
779 struct vmxnet3_rxqueue *rxq;
780 struct vmxnet3_rxq_shared *rxs;
781 int i;
782
783 sc->vmx_ds->evintr = sc->vmx_event_intr_idx;
784
785 for (i = 0; i < sc->vmx_ntxqueues; i++) {
786 txq = &sc->vmx_txq[i];
787 txs = txq->vxtxq_ts;
788 txs->intr_idx = txq->vxtxq_intr_idx;
789 }
790
791 for (i = 0; i < sc->vmx_nrxqueues; i++) {
792 rxq = &sc->vmx_rxq[i];
793 rxs = rxq->vxrxq_rs;
794 rxs->intr_idx = rxq->vxrxq_intr_idx;
795 }
796 }
797
798 static int
vmxnet3_setup_interrupts(struct vmxnet3_softc * sc)799 vmxnet3_setup_interrupts(struct vmxnet3_softc *sc)
800 {
801 int error;
802
803 error = vmxnet3_alloc_intr_resources(sc);
804 if (error)
805 return (error);
806
807 switch (sc->vmx_intr_type) {
808 case VMXNET3_IT_MSIX:
809 #ifdef __FreeBSD__
810 error = vmxnet3_setup_msix_interrupts(sc);
811 #else
812 device_printf(sc->vmx_dev, "VMXNET3_IT_MSIX unsupported\n");
813 error = ENXIO;
814 #endif
815 break;
816 case VMXNET3_IT_MSI:
817 case VMXNET3_IT_LEGACY:
818 error = vmxnet3_setup_legacy_interrupt(sc);
819 break;
820 default:
821 panic("%s: invalid interrupt type %d", __func__,
822 sc->vmx_intr_type);
823 }
824
825 if (error == 0)
826 vmxnet3_set_interrupt_idx(sc);
827
828 return (error);
829 }
830
831 #ifdef __FreeBSD__
832 static int
vmxnet3_alloc_interrupts(struct vmxnet3_softc * sc)833 vmxnet3_alloc_interrupts(struct vmxnet3_softc *sc)
834 {
835 device_t dev;
836 uint32_t config;
837 int error;
838
839 dev = sc->vmx_dev;
840 config = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_INTRCFG);
841
842 sc->vmx_intr_type = config & 0x03;
843 sc->vmx_intr_mask_mode = (config >> 2) & 0x03;
844
845 switch (sc->vmx_intr_type) {
846 case VMXNET3_IT_AUTO:
847 sc->vmx_intr_type = VMXNET3_IT_MSIX;
848 /* FALLTHROUGH */
849 case VMXNET3_IT_MSIX:
850 error = vmxnet3_alloc_msix_interrupts(sc);
851 if (error == 0)
852 break;
853 sc->vmx_intr_type = VMXNET3_IT_MSI;
854 /* FALLTHROUGH */
855 case VMXNET3_IT_MSI:
856 error = vmxnet3_alloc_msi_interrupts(sc);
857 if (error == 0)
858 break;
859 sc->vmx_intr_type = VMXNET3_IT_LEGACY;
860 /* FALLTHROUGH */
861 case VMXNET3_IT_LEGACY:
862 error = vmxnet3_alloc_legacy_interrupts(sc);
863 if (error == 0)
864 break;
865 /* FALLTHROUGH */
866 default:
867 sc->vmx_intr_type = -1;
868 device_printf(dev, "cannot allocate any interrupt resources\n");
869 return (ENXIO);
870 }
871
872 return (error);
873 }
874 #else
875 static int
vmxnet3_alloc_interrupts(struct vmxnet3_softc * sc)876 vmxnet3_alloc_interrupts(struct vmxnet3_softc *sc)
877 {
878 device_t dev;
879 uint32_t config;
880 int error;
881
882 dev = sc->vmx_dev;
883 config = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_INTRCFG);
884
885 sc->vmx_intr_type = config & 0x03;
886 sc->vmx_intr_mask_mode = (config >> 2) & 0x03;
887
888 switch (sc->vmx_intr_type) {
889 case VMXNET3_IT_AUTO:
890 sc->vmx_intr_type = VMXNET3_IT_MSI;
891 /* FALLTHROUGH */
892 case VMXNET3_IT_MSI:
893 error = vmxnet3_alloc_msi_interrupts(sc);
894 if (error == 0)
895 break;
896 sc->vmx_intr_type = VMXNET3_IT_LEGACY;
897 case VMXNET3_IT_LEGACY:
898 error = vmxnet3_alloc_legacy_interrupts(sc);
899 if (error == 0)
900 break;
901 /* FALLTHROUGH */
902 case VMXNET3_IT_MSIX:
903 /* FALLTHROUGH */
904 default:
905 sc->vmx_intr_type = -1;
906 device_printf(dev, "cannot allocate any interrupt resources\n");
907 return (ENXIO);
908 }
909
910 return (error);
911 }
912 #endif
913
914 static void
vmxnet3_free_interrupt(struct vmxnet3_softc * sc,struct vmxnet3_interrupt * intr)915 vmxnet3_free_interrupt(struct vmxnet3_softc *sc,
916 struct vmxnet3_interrupt *intr)
917 {
918 device_t dev;
919
920 dev = sc->vmx_dev;
921
922 if (intr->vmxi_handler != NULL) {
923 bus_teardown_intr(dev, intr->vmxi_irq, intr->vmxi_handler);
924 intr->vmxi_handler = NULL;
925 }
926
927 if (intr->vmxi_irq != NULL) {
928 bus_release_resource(dev, SYS_RES_IRQ, intr->vmxi_rid,
929 intr->vmxi_irq);
930 intr->vmxi_irq = NULL;
931 intr->vmxi_rid = -1;
932 }
933 }
934
935 #ifdef __FreeBSD__
936 static void
vmxnet3_free_interrupts(struct vmxnet3_softc * sc)937 vmxnet3_free_interrupts(struct vmxnet3_softc *sc)
938 {
939 int i;
940
941 for (i = 0; i < sc->vmx_nintrs; i++)
942 vmxnet3_free_interrupt(sc, &sc->vmx_intrs[i]);
943
944 if (sc->vmx_intr_type == VMXNET3_IT_MSI ||
945 sc->vmx_intr_type == VMXNET3_IT_MSIX)
946 pci_release_msi(sc->vmx_dev);
947 }
948 #else
949 static void
vmxnet3_free_interrupts(struct vmxnet3_softc * sc)950 vmxnet3_free_interrupts(struct vmxnet3_softc *sc)
951 {
952 int i;
953
954 for (i = 0; i < sc->vmx_nintrs; i++)
955 vmxnet3_free_interrupt(sc, &sc->vmx_intrs[i]);
956
957 if (sc->vmx_irq_type == PCI_INTR_TYPE_MSI)
958 pci_release_msi(sc->vmx_dev);
959 }
960 #endif
961
962 #ifndef VMXNET3_LEGACY_TX
963 static int
vmxnet3_alloc_taskqueue(struct vmxnet3_softc * sc)964 vmxnet3_alloc_taskqueue(struct vmxnet3_softc *sc)
965 {
966 device_t dev;
967
968 dev = sc->vmx_dev;
969
970 sc->vmx_tq = taskqueue_create(device_get_nameunit(dev), M_NOWAIT,
971 taskqueue_thread_enqueue, &sc->vmx_tq);
972 if (sc->vmx_tq == NULL)
973 return (ENOMEM);
974
975 return (0);
976 }
977
978 static void
vmxnet3_start_taskqueue(struct vmxnet3_softc * sc)979 vmxnet3_start_taskqueue(struct vmxnet3_softc *sc)
980 {
981 device_t dev;
982 int nthreads, error;
983
984 dev = sc->vmx_dev;
985
986 /*
987 * The taskqueue is typically not frequently used, so a dedicated
988 * thread for each queue is unnecessary.
989 */
990 nthreads = MAX(1, sc->vmx_ntxqueues / 2);
991
992 /*
993 * Most drivers just ignore the return value - it only fails
994 * with ENOMEM so an error is not likely. It is hard for us
995 * to recover from an error here.
996 */
997 error = taskqueue_start_threads(&sc->vmx_tq, nthreads, PI_NET,
998 "%s taskq", device_get_nameunit(dev));
999 if (error)
1000 device_printf(dev, "failed to start taskqueue: %d", error);
1001 }
1002
1003 static void
vmxnet3_drain_taskqueue(struct vmxnet3_softc * sc)1004 vmxnet3_drain_taskqueue(struct vmxnet3_softc *sc)
1005 {
1006 struct vmxnet3_txqueue *txq;
1007 int i;
1008
1009 if (sc->vmx_tq != NULL) {
1010 for (i = 0; i < sc->vmx_max_ntxqueues; i++) {
1011 txq = &sc->vmx_txq[i];
1012 taskqueue_drain(sc->vmx_tq, &txq->vxtxq_defrtask);
1013 }
1014 }
1015 }
1016
1017 static void
vmxnet3_free_taskqueue(struct vmxnet3_softc * sc)1018 vmxnet3_free_taskqueue(struct vmxnet3_softc *sc)
1019 {
1020 if (sc->vmx_tq != NULL) {
1021 taskqueue_free(sc->vmx_tq);
1022 sc->vmx_tq = NULL;
1023 }
1024 }
1025 #endif
1026
1027 static int
vmxnet3_init_rxq(struct vmxnet3_softc * sc,int q)1028 vmxnet3_init_rxq(struct vmxnet3_softc *sc, int q)
1029 {
1030 struct vmxnet3_rxqueue *rxq;
1031 struct vmxnet3_rxring *rxr;
1032 int i;
1033
1034 rxq = &sc->vmx_rxq[q];
1035
1036 ksnprintf(rxq->vxrxq_name, sizeof(rxq->vxrxq_name), "%s-rx%d",
1037 device_get_nameunit(sc->vmx_dev), q);
1038 lockinit(&rxq->vxrxq_lock, rxq->vxrxq_name, 0, 0);
1039
1040 rxq->vxrxq_sc = sc;
1041 rxq->vxrxq_id = q;
1042
1043 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1044 rxr = &rxq->vxrxq_cmd_ring[i];
1045 rxr->vxrxr_rid = i;
1046 rxr->vxrxr_ndesc = sc->vmx_nrxdescs;
1047 rxr->vxrxr_rxbuf = kmalloc(rxr->vxrxr_ndesc *
1048 sizeof(struct vmxnet3_rxbuf), M_DEVBUF, M_INTWAIT | M_ZERO);
1049 if (rxr->vxrxr_rxbuf == NULL)
1050 return (ENOMEM);
1051
1052 rxq->vxrxq_comp_ring.vxcr_ndesc += sc->vmx_nrxdescs;
1053 }
1054
1055 return (0);
1056 }
1057
1058 static int
vmxnet3_init_txq(struct vmxnet3_softc * sc,int q)1059 vmxnet3_init_txq(struct vmxnet3_softc *sc, int q)
1060 {
1061 struct vmxnet3_txqueue *txq;
1062 struct vmxnet3_txring *txr;
1063
1064 txq = &sc->vmx_txq[q];
1065 txr = &txq->vxtxq_cmd_ring;
1066
1067 ksnprintf(txq->vxtxq_name, sizeof(txq->vxtxq_name), "%s-tx%d",
1068 device_get_nameunit(sc->vmx_dev), q);
1069 lockinit(&txq->vxtxq_lock, txq->vxtxq_name, 0, 0);
1070
1071 txq->vxtxq_sc = sc;
1072 txq->vxtxq_id = q;
1073
1074 txr->vxtxr_ndesc = sc->vmx_ntxdescs;
1075 txr->vxtxr_txbuf = kmalloc(txr->vxtxr_ndesc *
1076 sizeof(struct vmxnet3_txbuf), M_DEVBUF, M_INTWAIT | M_ZERO);
1077 if (txr->vxtxr_txbuf == NULL)
1078 return (ENOMEM);
1079
1080 txq->vxtxq_comp_ring.vxcr_ndesc = sc->vmx_ntxdescs;
1081
1082 #ifndef VMXNET3_LEGACY_TX
1083 TASK_INIT(&txq->vxtxq_defrtask, 0, vmxnet3_txq_tq_deferred, txq);
1084
1085 txq->vxtxq_br = buf_ring_alloc(VMXNET3_DEF_BUFRING_SIZE, M_DEVBUF,
1086 M_NOWAIT, &txq->vxtxq_lock);
1087 if (txq->vxtxq_br == NULL)
1088 return (ENOMEM);
1089 #endif
1090
1091 return (0);
1092 }
1093
1094 static int
vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc * sc)1095 vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *sc)
1096 {
1097 int i, error;
1098
1099 /*
1100 * Only attempt to create multiple queues if MSIX is available. MSIX is
1101 * disabled by default because its apparently broken for devices passed
1102 * through by at least ESXi 5.1. The hw.pci.honor_msi_blacklist tunable
1103 * must be set to zero for MSIX. This check prevents us from allocating
1104 * queue structures that we will not use.
1105 */
1106 if (sc->vmx_flags & VMXNET3_FLAG_NO_MSIX) {
1107 sc->vmx_max_nrxqueues = 1;
1108 sc->vmx_max_ntxqueues = 1;
1109 }
1110
1111 sc->vmx_rxq = kmalloc(sizeof(struct vmxnet3_rxqueue) *
1112 sc->vmx_max_nrxqueues, M_DEVBUF, M_INTWAIT | M_ZERO);
1113 sc->vmx_txq = kmalloc(sizeof(struct vmxnet3_txqueue) *
1114 sc->vmx_max_ntxqueues, M_DEVBUF, M_INTWAIT | M_ZERO);
1115 if (sc->vmx_rxq == NULL || sc->vmx_txq == NULL)
1116 return (ENOMEM);
1117
1118 for (i = 0; i < sc->vmx_max_nrxqueues; i++) {
1119 error = vmxnet3_init_rxq(sc, i);
1120 if (error)
1121 return (error);
1122 }
1123
1124 for (i = 0; i < sc->vmx_max_ntxqueues; i++) {
1125 error = vmxnet3_init_txq(sc, i);
1126 if (error)
1127 return (error);
1128 }
1129
1130 return (0);
1131 }
1132
1133 static void
vmxnet3_destroy_rxq(struct vmxnet3_rxqueue * rxq)1134 vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *rxq)
1135 {
1136 struct vmxnet3_rxring *rxr;
1137 int i;
1138
1139 rxq->vxrxq_sc = NULL;
1140 rxq->vxrxq_id = -1;
1141
1142 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1143 rxr = &rxq->vxrxq_cmd_ring[i];
1144
1145 if (rxr->vxrxr_rxbuf != NULL) {
1146 kfree(rxr->vxrxr_rxbuf, M_DEVBUF);
1147 rxr->vxrxr_rxbuf = NULL;
1148 }
1149 }
1150
1151 #if 0 /* XXX */
1152 if (mtx_initialized(&rxq->vxrxq_lock) != 0)
1153 #endif
1154 lockuninit(&rxq->vxrxq_lock);
1155 }
1156
1157 static void
vmxnet3_destroy_txq(struct vmxnet3_txqueue * txq)1158 vmxnet3_destroy_txq(struct vmxnet3_txqueue *txq)
1159 {
1160 struct vmxnet3_txring *txr;
1161
1162 txr = &txq->vxtxq_cmd_ring;
1163
1164 txq->vxtxq_sc = NULL;
1165 txq->vxtxq_id = -1;
1166
1167 #ifndef VMXNET3_LEGACY_TX
1168 if (txq->vxtxq_br != NULL) {
1169 buf_ring_free(txq->vxtxq_br, M_DEVBUF);
1170 txq->vxtxq_br = NULL;
1171 }
1172 #endif
1173
1174 if (txr->vxtxr_txbuf != NULL) {
1175 kfree(txr->vxtxr_txbuf, M_DEVBUF);
1176 txr->vxtxr_txbuf = NULL;
1177 }
1178
1179 #if 0 /* XXX */
1180 if (mtx_initialized(&txq->vxtxq_lock) != 0)
1181 #endif
1182 lockuninit(&txq->vxtxq_lock);
1183 }
1184
1185 static void
vmxnet3_free_rxtx_queues(struct vmxnet3_softc * sc)1186 vmxnet3_free_rxtx_queues(struct vmxnet3_softc *sc)
1187 {
1188 int i;
1189
1190 if (sc->vmx_rxq != NULL) {
1191 for (i = 0; i < sc->vmx_max_nrxqueues; i++)
1192 vmxnet3_destroy_rxq(&sc->vmx_rxq[i]);
1193 kfree(sc->vmx_rxq, M_DEVBUF);
1194 sc->vmx_rxq = NULL;
1195 }
1196
1197 if (sc->vmx_txq != NULL) {
1198 for (i = 0; i < sc->vmx_max_ntxqueues; i++)
1199 vmxnet3_destroy_txq(&sc->vmx_txq[i]);
1200 kfree(sc->vmx_txq, M_DEVBUF);
1201 sc->vmx_txq = NULL;
1202 }
1203 }
1204
1205 static int
vmxnet3_alloc_shared_data(struct vmxnet3_softc * sc)1206 vmxnet3_alloc_shared_data(struct vmxnet3_softc *sc)
1207 {
1208 device_t dev;
1209 uint8_t *kva;
1210 size_t size;
1211 int i, error;
1212
1213 dev = sc->vmx_dev;
1214
1215 size = sizeof(struct vmxnet3_driver_shared);
1216 error = vmxnet3_dma_malloc(sc, size, 1, &sc->vmx_ds_dma);
1217 if (error) {
1218 device_printf(dev, "cannot alloc shared memory\n");
1219 return (error);
1220 }
1221 sc->vmx_ds = (struct vmxnet3_driver_shared *) sc->vmx_ds_dma.dma_vaddr;
1222
1223 size = sc->vmx_ntxqueues * sizeof(struct vmxnet3_txq_shared) +
1224 sc->vmx_nrxqueues * sizeof(struct vmxnet3_rxq_shared);
1225 error = vmxnet3_dma_malloc(sc, size, 128, &sc->vmx_qs_dma);
1226 if (error) {
1227 device_printf(dev, "cannot alloc queue shared memory\n");
1228 return (error);
1229 }
1230 sc->vmx_qs = (void *) sc->vmx_qs_dma.dma_vaddr;
1231 kva = sc->vmx_qs;
1232
1233 for (i = 0; i < sc->vmx_ntxqueues; i++) {
1234 sc->vmx_txq[i].vxtxq_ts = (struct vmxnet3_txq_shared *) kva;
1235 kva += sizeof(struct vmxnet3_txq_shared);
1236 }
1237 for (i = 0; i < sc->vmx_nrxqueues; i++) {
1238 sc->vmx_rxq[i].vxrxq_rs = (struct vmxnet3_rxq_shared *) kva;
1239 kva += sizeof(struct vmxnet3_rxq_shared);
1240 }
1241
1242 if (sc->vmx_flags & VMXNET3_FLAG_RSS) {
1243 size = sizeof(struct vmxnet3_rss_shared);
1244 error = vmxnet3_dma_malloc(sc, size, 128, &sc->vmx_rss_dma);
1245 if (error) {
1246 device_printf(dev, "cannot alloc rss shared memory\n");
1247 return (error);
1248 }
1249 sc->vmx_rss =
1250 (struct vmxnet3_rss_shared *) sc->vmx_rss_dma.dma_vaddr;
1251 }
1252
1253 return (0);
1254 }
1255
1256 static void
vmxnet3_free_shared_data(struct vmxnet3_softc * sc)1257 vmxnet3_free_shared_data(struct vmxnet3_softc *sc)
1258 {
1259
1260 if (sc->vmx_rss != NULL) {
1261 vmxnet3_dma_free(sc, &sc->vmx_rss_dma);
1262 sc->vmx_rss = NULL;
1263 }
1264
1265 if (sc->vmx_qs != NULL) {
1266 vmxnet3_dma_free(sc, &sc->vmx_qs_dma);
1267 sc->vmx_qs = NULL;
1268 }
1269
1270 if (sc->vmx_ds != NULL) {
1271 vmxnet3_dma_free(sc, &sc->vmx_ds_dma);
1272 sc->vmx_ds = NULL;
1273 }
1274 }
1275
1276 static int
vmxnet3_alloc_txq_data(struct vmxnet3_softc * sc)1277 vmxnet3_alloc_txq_data(struct vmxnet3_softc *sc)
1278 {
1279 device_t dev;
1280 struct vmxnet3_txqueue *txq;
1281 struct vmxnet3_txring *txr;
1282 struct vmxnet3_comp_ring *txc;
1283 size_t descsz, compsz;
1284 int i, q, error;
1285
1286 dev = sc->vmx_dev;
1287
1288 for (q = 0; q < sc->vmx_ntxqueues; q++) {
1289 txq = &sc->vmx_txq[q];
1290 txr = &txq->vxtxq_cmd_ring;
1291 txc = &txq->vxtxq_comp_ring;
1292
1293 descsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc);
1294 compsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txcompdesc);
1295
1296 error = bus_dma_tag_create(bus_get_dma_tag(dev),
1297 1, 0, /* alignment, boundary */
1298 BUS_SPACE_MAXADDR, /* lowaddr */
1299 BUS_SPACE_MAXADDR, /* highaddr */
1300 VMXNET3_TX_MAXSIZE, /* maxsize */
1301 VMXNET3_TX_MAXSEGS, /* nsegments */
1302 VMXNET3_TX_MAXSEGSIZE, /* maxsegsize */
1303 0, /* flags */
1304 &txr->vxtxr_txtag);
1305 if (error) {
1306 device_printf(dev,
1307 "unable to create Tx buffer tag for queue %d\n", q);
1308 return (error);
1309 }
1310
1311 error = vmxnet3_dma_malloc(sc, descsz, 512, &txr->vxtxr_dma);
1312 if (error) {
1313 device_printf(dev, "cannot alloc Tx descriptors for "
1314 "queue %d error %d\n", q, error);
1315 return (error);
1316 }
1317 txr->vxtxr_txd =
1318 (struct vmxnet3_txdesc *) txr->vxtxr_dma.dma_vaddr;
1319
1320 error = vmxnet3_dma_malloc(sc, compsz, 512, &txc->vxcr_dma);
1321 if (error) {
1322 device_printf(dev, "cannot alloc Tx comp descriptors "
1323 "for queue %d error %d\n", q, error);
1324 return (error);
1325 }
1326 txc->vxcr_u.txcd =
1327 (struct vmxnet3_txcompdesc *) txc->vxcr_dma.dma_vaddr;
1328
1329 for (i = 0; i < txr->vxtxr_ndesc; i++) {
1330 error = bus_dmamap_create(txr->vxtxr_txtag, 0,
1331 &txr->vxtxr_txbuf[i].vtxb_dmamap);
1332 if (error) {
1333 device_printf(dev, "unable to create Tx buf "
1334 "dmamap for queue %d idx %d\n", q, i);
1335 return (error);
1336 }
1337 }
1338 }
1339
1340 return (0);
1341 }
1342
1343 static void
vmxnet3_free_txq_data(struct vmxnet3_softc * sc)1344 vmxnet3_free_txq_data(struct vmxnet3_softc *sc)
1345 {
1346 device_t dev;
1347 struct vmxnet3_txqueue *txq;
1348 struct vmxnet3_txring *txr;
1349 struct vmxnet3_comp_ring *txc;
1350 struct vmxnet3_txbuf *txb;
1351 int i, q;
1352
1353 dev = sc->vmx_dev;
1354
1355 for (q = 0; q < sc->vmx_ntxqueues; q++) {
1356 txq = &sc->vmx_txq[q];
1357 txr = &txq->vxtxq_cmd_ring;
1358 txc = &txq->vxtxq_comp_ring;
1359
1360 for (i = 0; i < txr->vxtxr_ndesc; i++) {
1361 txb = &txr->vxtxr_txbuf[i];
1362 if (txb->vtxb_dmamap != NULL) {
1363 bus_dmamap_destroy(txr->vxtxr_txtag,
1364 txb->vtxb_dmamap);
1365 txb->vtxb_dmamap = NULL;
1366 }
1367 }
1368
1369 if (txc->vxcr_u.txcd != NULL) {
1370 vmxnet3_dma_free(sc, &txc->vxcr_dma);
1371 txc->vxcr_u.txcd = NULL;
1372 }
1373
1374 if (txr->vxtxr_txd != NULL) {
1375 vmxnet3_dma_free(sc, &txr->vxtxr_dma);
1376 txr->vxtxr_txd = NULL;
1377 }
1378
1379 if (txr->vxtxr_txtag != NULL) {
1380 bus_dma_tag_destroy(txr->vxtxr_txtag);
1381 txr->vxtxr_txtag = NULL;
1382 }
1383 }
1384 }
1385
1386 static int
vmxnet3_alloc_rxq_data(struct vmxnet3_softc * sc)1387 vmxnet3_alloc_rxq_data(struct vmxnet3_softc *sc)
1388 {
1389 device_t dev;
1390 struct vmxnet3_rxqueue *rxq;
1391 struct vmxnet3_rxring *rxr;
1392 struct vmxnet3_comp_ring *rxc;
1393 int descsz, compsz;
1394 int i, j, q, error;
1395
1396 dev = sc->vmx_dev;
1397
1398 for (q = 0; q < sc->vmx_nrxqueues; q++) {
1399 rxq = &sc->vmx_rxq[q];
1400 rxc = &rxq->vxrxq_comp_ring;
1401 compsz = 0;
1402
1403 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1404 rxr = &rxq->vxrxq_cmd_ring[i];
1405
1406 descsz = rxr->vxrxr_ndesc *
1407 sizeof(struct vmxnet3_rxdesc);
1408 compsz += rxr->vxrxr_ndesc *
1409 sizeof(struct vmxnet3_rxcompdesc);
1410
1411 error = bus_dma_tag_create(bus_get_dma_tag(dev),
1412 1, 0, /* alignment, boundary */
1413 BUS_SPACE_MAXADDR, /* lowaddr */
1414 BUS_SPACE_MAXADDR, /* highaddr */
1415 MJUMPAGESIZE, /* maxsize */
1416 1, /* nsegments */
1417 MJUMPAGESIZE, /* maxsegsize */
1418 0, /* flags */
1419 &rxr->vxrxr_rxtag);
1420 if (error) {
1421 device_printf(dev,
1422 "unable to create Rx buffer tag for "
1423 "queue %d\n", q);
1424 return (error);
1425 }
1426
1427 error = vmxnet3_dma_malloc(sc, descsz, 512,
1428 &rxr->vxrxr_dma);
1429 if (error) {
1430 device_printf(dev, "cannot allocate Rx "
1431 "descriptors for queue %d/%d error %d\n",
1432 i, q, error);
1433 return (error);
1434 }
1435 rxr->vxrxr_rxd =
1436 (struct vmxnet3_rxdesc *) rxr->vxrxr_dma.dma_vaddr;
1437 }
1438
1439 error = vmxnet3_dma_malloc(sc, compsz, 512, &rxc->vxcr_dma);
1440 if (error) {
1441 device_printf(dev, "cannot alloc Rx comp descriptors "
1442 "for queue %d error %d\n", q, error);
1443 return (error);
1444 }
1445 rxc->vxcr_u.rxcd =
1446 (struct vmxnet3_rxcompdesc *) rxc->vxcr_dma.dma_vaddr;
1447
1448 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1449 rxr = &rxq->vxrxq_cmd_ring[i];
1450
1451 error = bus_dmamap_create(rxr->vxrxr_rxtag, 0,
1452 &rxr->vxrxr_spare_dmap);
1453 if (error) {
1454 device_printf(dev, "unable to create spare "
1455 "dmamap for queue %d/%d error %d\n",
1456 q, i, error);
1457 return (error);
1458 }
1459
1460 for (j = 0; j < rxr->vxrxr_ndesc; j++) {
1461 error = bus_dmamap_create(rxr->vxrxr_rxtag, 0,
1462 &rxr->vxrxr_rxbuf[j].vrxb_dmamap);
1463 if (error) {
1464 device_printf(dev, "unable to create "
1465 "dmamap for queue %d/%d slot %d "
1466 "error %d\n",
1467 q, i, j, error);
1468 return (error);
1469 }
1470 }
1471 }
1472 }
1473
1474 return (0);
1475 }
1476
1477 static void
vmxnet3_free_rxq_data(struct vmxnet3_softc * sc)1478 vmxnet3_free_rxq_data(struct vmxnet3_softc *sc)
1479 {
1480 device_t dev;
1481 struct vmxnet3_rxqueue *rxq;
1482 struct vmxnet3_rxring *rxr;
1483 struct vmxnet3_comp_ring *rxc;
1484 struct vmxnet3_rxbuf *rxb;
1485 int i, j, q;
1486
1487 dev = sc->vmx_dev;
1488
1489 for (q = 0; q < sc->vmx_nrxqueues; q++) {
1490 rxq = &sc->vmx_rxq[q];
1491 rxc = &rxq->vxrxq_comp_ring;
1492
1493 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1494 rxr = &rxq->vxrxq_cmd_ring[i];
1495
1496 if (rxr->vxrxr_spare_dmap != NULL) {
1497 bus_dmamap_destroy(rxr->vxrxr_rxtag,
1498 rxr->vxrxr_spare_dmap);
1499 rxr->vxrxr_spare_dmap = NULL;
1500 }
1501
1502 for (j = 0; j < rxr->vxrxr_ndesc; j++) {
1503 rxb = &rxr->vxrxr_rxbuf[j];
1504 if (rxb->vrxb_dmamap != NULL) {
1505 bus_dmamap_destroy(rxr->vxrxr_rxtag,
1506 rxb->vrxb_dmamap);
1507 rxb->vrxb_dmamap = NULL;
1508 }
1509 }
1510 }
1511
1512 if (rxc->vxcr_u.rxcd != NULL) {
1513 vmxnet3_dma_free(sc, &rxc->vxcr_dma);
1514 rxc->vxcr_u.rxcd = NULL;
1515 }
1516
1517 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1518 rxr = &rxq->vxrxq_cmd_ring[i];
1519
1520 if (rxr->vxrxr_rxd != NULL) {
1521 vmxnet3_dma_free(sc, &rxr->vxrxr_dma);
1522 rxr->vxrxr_rxd = NULL;
1523 }
1524
1525 if (rxr->vxrxr_rxtag != NULL) {
1526 bus_dma_tag_destroy(rxr->vxrxr_rxtag);
1527 rxr->vxrxr_rxtag = NULL;
1528 }
1529 }
1530 }
1531 }
1532
1533 static int
vmxnet3_alloc_queue_data(struct vmxnet3_softc * sc)1534 vmxnet3_alloc_queue_data(struct vmxnet3_softc *sc)
1535 {
1536 int error;
1537
1538 error = vmxnet3_alloc_txq_data(sc);
1539 if (error)
1540 return (error);
1541
1542 error = vmxnet3_alloc_rxq_data(sc);
1543 if (error)
1544 return (error);
1545
1546 return (0);
1547 }
1548
1549 static void
vmxnet3_free_queue_data(struct vmxnet3_softc * sc)1550 vmxnet3_free_queue_data(struct vmxnet3_softc *sc)
1551 {
1552
1553 if (sc->vmx_rxq != NULL)
1554 vmxnet3_free_rxq_data(sc);
1555
1556 if (sc->vmx_txq != NULL)
1557 vmxnet3_free_txq_data(sc);
1558 }
1559
1560 static int
vmxnet3_alloc_mcast_table(struct vmxnet3_softc * sc)1561 vmxnet3_alloc_mcast_table(struct vmxnet3_softc *sc)
1562 {
1563 int error;
1564
1565 error = vmxnet3_dma_malloc(sc, VMXNET3_MULTICAST_MAX * ETHER_ADDR_LEN,
1566 32, &sc->vmx_mcast_dma);
1567 if (error)
1568 device_printf(sc->vmx_dev, "unable to alloc multicast table\n");
1569 else
1570 sc->vmx_mcast = sc->vmx_mcast_dma.dma_vaddr;
1571
1572 return (error);
1573 }
1574
1575 static void
vmxnet3_free_mcast_table(struct vmxnet3_softc * sc)1576 vmxnet3_free_mcast_table(struct vmxnet3_softc *sc)
1577 {
1578
1579 if (sc->vmx_mcast != NULL) {
1580 vmxnet3_dma_free(sc, &sc->vmx_mcast_dma);
1581 sc->vmx_mcast = NULL;
1582 }
1583 }
1584
1585 static void
vmxnet3_init_shared_data(struct vmxnet3_softc * sc)1586 vmxnet3_init_shared_data(struct vmxnet3_softc *sc)
1587 {
1588 struct vmxnet3_driver_shared *ds;
1589 struct vmxnet3_txqueue *txq;
1590 struct vmxnet3_txq_shared *txs;
1591 struct vmxnet3_rxqueue *rxq;
1592 struct vmxnet3_rxq_shared *rxs;
1593 int i;
1594
1595 ds = sc->vmx_ds;
1596
1597 /*
1598 * Initialize fields of the shared data that remains the same across
1599 * reinits. Note the shared data is zero'd when allocated.
1600 */
1601
1602 ds->magic = VMXNET3_REV1_MAGIC;
1603
1604 /* DriverInfo */
1605 ds->version = VMXNET3_DRIVER_VERSION;
1606 ds->guest = VMXNET3_GOS_FREEBSD |
1607 #ifdef __LP64__
1608 VMXNET3_GOS_64BIT;
1609 #else
1610 VMXNET3_GOS_32BIT;
1611 #endif
1612 ds->vmxnet3_revision = 1;
1613 ds->upt_version = 1;
1614
1615 /* Misc. conf */
1616 ds->driver_data = vtophys(sc);
1617 ds->driver_data_len = sizeof(struct vmxnet3_softc);
1618 ds->queue_shared = sc->vmx_qs_dma.dma_paddr;
1619 ds->queue_shared_len = sc->vmx_qs_dma.dma_size;
1620 ds->nrxsg_max = sc->vmx_max_rxsegs;
1621
1622 /* RSS conf */
1623 if (sc->vmx_flags & VMXNET3_FLAG_RSS) {
1624 ds->rss.version = 1;
1625 ds->rss.paddr = sc->vmx_rss_dma.dma_paddr;
1626 ds->rss.len = sc->vmx_rss_dma.dma_size;
1627 }
1628
1629 /* Interrupt control. */
1630 ds->automask = sc->vmx_intr_mask_mode == VMXNET3_IMM_AUTO;
1631 ds->nintr = sc->vmx_nintrs;
1632 ds->evintr = sc->vmx_event_intr_idx;
1633 ds->ictrl = VMXNET3_ICTRL_DISABLE_ALL;
1634
1635 for (i = 0; i < sc->vmx_nintrs; i++)
1636 ds->modlevel[i] = UPT1_IMOD_ADAPTIVE;
1637
1638 /* Receive filter. */
1639 ds->mcast_table = sc->vmx_mcast_dma.dma_paddr;
1640 ds->mcast_tablelen = sc->vmx_mcast_dma.dma_size;
1641
1642 /* Tx queues */
1643 for (i = 0; i < sc->vmx_ntxqueues; i++) {
1644 txq = &sc->vmx_txq[i];
1645 txs = txq->vxtxq_ts;
1646
1647 txs->cmd_ring = txq->vxtxq_cmd_ring.vxtxr_dma.dma_paddr;
1648 txs->cmd_ring_len = txq->vxtxq_cmd_ring.vxtxr_ndesc;
1649 txs->comp_ring = txq->vxtxq_comp_ring.vxcr_dma.dma_paddr;
1650 txs->comp_ring_len = txq->vxtxq_comp_ring.vxcr_ndesc;
1651 txs->driver_data = vtophys(txq);
1652 txs->driver_data_len = sizeof(struct vmxnet3_txqueue);
1653 }
1654
1655 /* Rx queues */
1656 for (i = 0; i < sc->vmx_nrxqueues; i++) {
1657 rxq = &sc->vmx_rxq[i];
1658 rxs = rxq->vxrxq_rs;
1659
1660 rxs->cmd_ring[0] = rxq->vxrxq_cmd_ring[0].vxrxr_dma.dma_paddr;
1661 rxs->cmd_ring_len[0] = rxq->vxrxq_cmd_ring[0].vxrxr_ndesc;
1662 rxs->cmd_ring[1] = rxq->vxrxq_cmd_ring[1].vxrxr_dma.dma_paddr;
1663 rxs->cmd_ring_len[1] = rxq->vxrxq_cmd_ring[1].vxrxr_ndesc;
1664 rxs->comp_ring = rxq->vxrxq_comp_ring.vxcr_dma.dma_paddr;
1665 rxs->comp_ring_len = rxq->vxrxq_comp_ring.vxcr_ndesc;
1666 rxs->driver_data = vtophys(rxq);
1667 rxs->driver_data_len = sizeof(struct vmxnet3_rxqueue);
1668 }
1669 }
1670
1671 static void
vmxnet3_init_hwassist(struct vmxnet3_softc * sc)1672 vmxnet3_init_hwassist(struct vmxnet3_softc *sc)
1673 {
1674 struct ifnet *ifp = sc->vmx_ifp;
1675 uint64_t hwassist;
1676
1677 hwassist = 0;
1678 if (ifp->if_capenable & IFCAP_TXCSUM)
1679 hwassist |= VMXNET3_CSUM_OFFLOAD;
1680 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1681 hwassist |= VMXNET3_CSUM_OFFLOAD_IPV6;
1682 #if 0 /* XXX TSO */
1683 if (ifp->if_capenable & IFCAP_TSO4)
1684 hwassist |= CSUM_IP_TSO;
1685 if (ifp->if_capenable & IFCAP_TSO6)
1686 hwassist |= CSUM_IP6_TSO;
1687 #endif
1688 ifp->if_hwassist = hwassist;
1689 }
1690
1691 static void
vmxnet3_reinit_interface(struct vmxnet3_softc * sc)1692 vmxnet3_reinit_interface(struct vmxnet3_softc *sc)
1693 {
1694 struct ifnet *ifp;
1695
1696 ifp = sc->vmx_ifp;
1697
1698 /* Use the current MAC address. */
1699 bcopy(IF_LLADDR(sc->vmx_ifp), sc->vmx_lladdr, ETHER_ADDR_LEN);
1700 vmxnet3_set_lladdr(sc);
1701
1702 vmxnet3_init_hwassist(sc);
1703 }
1704
1705 static void
vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc * sc)1706 vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc *sc)
1707 {
1708 /*
1709 * Use the same key as the Linux driver until FreeBSD can do
1710 * RSS (presumably Toeplitz) in software.
1711 */
1712 static const uint8_t rss_key[UPT1_RSS_MAX_KEY_SIZE] = {
1713 0x3b, 0x56, 0xd1, 0x56, 0x13, 0x4a, 0xe7, 0xac,
1714 0xe8, 0x79, 0x09, 0x75, 0xe8, 0x65, 0x79, 0x28,
1715 0x35, 0x12, 0xb9, 0x56, 0x7c, 0x76, 0x4b, 0x70,
1716 0xd8, 0x56, 0xa3, 0x18, 0x9b, 0x0a, 0xee, 0xf3,
1717 0x96, 0xa6, 0x9f, 0x8f, 0x9e, 0x8c, 0x90, 0xc9,
1718 };
1719
1720 struct vmxnet3_driver_shared *ds;
1721 struct vmxnet3_rss_shared *rss;
1722 int i;
1723
1724 ds = sc->vmx_ds;
1725 rss = sc->vmx_rss;
1726
1727 rss->hash_type =
1728 UPT1_RSS_HASH_TYPE_IPV4 | UPT1_RSS_HASH_TYPE_TCP_IPV4 |
1729 UPT1_RSS_HASH_TYPE_IPV6 | UPT1_RSS_HASH_TYPE_TCP_IPV6;
1730 rss->hash_func = UPT1_RSS_HASH_FUNC_TOEPLITZ;
1731 rss->hash_key_size = UPT1_RSS_MAX_KEY_SIZE;
1732 rss->ind_table_size = UPT1_RSS_MAX_IND_TABLE_SIZE;
1733 memcpy(rss->hash_key, rss_key, UPT1_RSS_MAX_KEY_SIZE);
1734
1735 for (i = 0; i < UPT1_RSS_MAX_IND_TABLE_SIZE; i++)
1736 rss->ind_table[i] = i % sc->vmx_nrxqueues;
1737 }
1738
1739 static void
vmxnet3_reinit_shared_data(struct vmxnet3_softc * sc)1740 vmxnet3_reinit_shared_data(struct vmxnet3_softc *sc)
1741 {
1742 struct ifnet *ifp;
1743 struct vmxnet3_driver_shared *ds;
1744
1745 ifp = sc->vmx_ifp;
1746 ds = sc->vmx_ds;
1747
1748 ds->mtu = ifp->if_mtu;
1749 ds->ntxqueue = sc->vmx_ntxqueues;
1750 ds->nrxqueue = sc->vmx_nrxqueues;
1751
1752 ds->upt_features = 0;
1753 if (ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))
1754 ds->upt_features |= UPT1_F_CSUM;
1755 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1756 ds->upt_features |= UPT1_F_VLAN;
1757 #if 0 /* XXX LRO */
1758 if (ifp->if_capenable & IFCAP_LRO)
1759 ds->upt_features |= UPT1_F_LRO;
1760 #endif
1761
1762 if (sc->vmx_flags & VMXNET3_FLAG_RSS) {
1763 ds->upt_features |= UPT1_F_RSS;
1764 vmxnet3_reinit_rss_shared_data(sc);
1765 }
1766
1767 vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSL, sc->vmx_ds_dma.dma_paddr);
1768 vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSH,
1769 (uint64_t) sc->vmx_ds_dma.dma_paddr >> 32);
1770 }
1771
1772 static int
vmxnet3_alloc_data(struct vmxnet3_softc * sc)1773 vmxnet3_alloc_data(struct vmxnet3_softc *sc)
1774 {
1775 int error;
1776
1777 error = vmxnet3_alloc_shared_data(sc);
1778 if (error)
1779 return (error);
1780
1781 error = vmxnet3_alloc_queue_data(sc);
1782 if (error)
1783 return (error);
1784
1785 error = vmxnet3_alloc_mcast_table(sc);
1786 if (error)
1787 return (error);
1788
1789 vmxnet3_init_shared_data(sc);
1790
1791 return (0);
1792 }
1793
1794 static void
vmxnet3_free_data(struct vmxnet3_softc * sc)1795 vmxnet3_free_data(struct vmxnet3_softc *sc)
1796 {
1797
1798 vmxnet3_free_mcast_table(sc);
1799 vmxnet3_free_queue_data(sc);
1800 vmxnet3_free_shared_data(sc);
1801 }
1802
1803 static int
vmxnet3_setup_interface(struct vmxnet3_softc * sc)1804 vmxnet3_setup_interface(struct vmxnet3_softc *sc)
1805 {
1806 device_t dev;
1807 struct ifnet *ifp;
1808
1809 dev = sc->vmx_dev;
1810
1811 ifp = sc->vmx_ifp = if_alloc(IFT_ETHER);
1812 if (ifp == NULL) {
1813 device_printf(dev, "cannot allocate ifnet structure\n");
1814 return (ENOSPC);
1815 }
1816
1817 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1818 ifp->if_baudrate = IF_Gbps(10ULL);
1819 ifp->if_softc = sc;
1820 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1821 ifp->if_init = vmxnet3_init;
1822 ifp->if_ioctl = vmxnet3_ioctl;
1823 #if 0 /* XXX TSO */
1824 ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1825 ifp->if_hw_tsomaxsegcount = VMXNET3_TX_MAXSEGS;
1826 ifp->if_hw_tsomaxsegsize = VMXNET3_TX_MAXSEGSIZE;
1827 #endif
1828
1829 #ifdef VMXNET3_LEGACY_TX
1830 ifp->if_start = vmxnet3_start;
1831 ifq_set_maxlen(&ifp->if_snd, sc->vmx_ntxdescs - 1);
1832 ifq_set_ready(&ifp->if_snd);
1833 #else
1834 ifp->if_transmit = vmxnet3_txq_mq_start;
1835 ifp->if_qflush = vmxnet3_qflush;
1836 #endif
1837
1838 vmxnet3_get_lladdr(sc);
1839 ether_ifattach(ifp, sc->vmx_lladdr, NULL);
1840
1841 ifp->if_capabilities |= IFCAP_RXCSUM | IFCAP_TXCSUM;
1842 ifp->if_capabilities |= IFCAP_RXCSUM_IPV6 | IFCAP_TXCSUM_IPV6;
1843 #if 0 /* XXX TSO */
1844 ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6;
1845 #endif
1846 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
1847 IFCAP_VLAN_HWCSUM;
1848 ifp->if_capenable = ifp->if_capabilities;
1849
1850 #if 0 /* XXX LRO / VLAN_HWFILTER */
1851 /* These capabilities are not enabled by default. */
1852 ifp->if_capabilities |= /* IFCAP_LRO | */ IFCAP_VLAN_HWFILTER;
1853 #endif
1854
1855 sc->vmx_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
1856 vmxnet3_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
1857 sc->vmx_vlan_detach = EVENTHANDLER_REGISTER(vlan_config,
1858 vmxnet3_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
1859
1860 ifmedia_init(&sc->vmx_media, 0, vmxnet3_media_change,
1861 vmxnet3_media_status);
1862 ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1863 ifmedia_set(&sc->vmx_media, IFM_ETHER | IFM_AUTO);
1864
1865 return (0);
1866 }
1867
1868 static void
vmxnet3_evintr(struct vmxnet3_softc * sc)1869 vmxnet3_evintr(struct vmxnet3_softc *sc)
1870 {
1871 device_t dev;
1872 struct ifnet *ifp;
1873 struct vmxnet3_txq_shared *ts;
1874 struct vmxnet3_rxq_shared *rs;
1875 uint32_t event;
1876 int reset;
1877
1878 dev = sc->vmx_dev;
1879 ifp = sc->vmx_ifp;
1880 reset = 0;
1881
1882 VMXNET3_CORE_LOCK(sc);
1883
1884 /* Clear events. */
1885 event = sc->vmx_ds->event;
1886 vmxnet3_write_bar1(sc, VMXNET3_BAR1_EVENT, event);
1887
1888 if (event & VMXNET3_EVENT_LINK) {
1889 vmxnet3_link_status(sc);
1890 if (sc->vmx_link_active != 0)
1891 vmxnet3_tx_start_all(sc);
1892 }
1893
1894 if (event & (VMXNET3_EVENT_TQERROR | VMXNET3_EVENT_RQERROR)) {
1895 reset = 1;
1896 vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_STATUS);
1897 ts = sc->vmx_txq[0].vxtxq_ts;
1898 if (ts->stopped != 0)
1899 device_printf(dev, "Tx queue error %#x\n", ts->error);
1900 rs = sc->vmx_rxq[0].vxrxq_rs;
1901 if (rs->stopped != 0)
1902 device_printf(dev, "Rx queue error %#x\n", rs->error);
1903 device_printf(dev, "Rx/Tx queue error event ... resetting\n");
1904 }
1905
1906 if (event & VMXNET3_EVENT_DIC)
1907 device_printf(dev, "device implementation change event\n");
1908 if (event & VMXNET3_EVENT_DEBUG)
1909 device_printf(dev, "debug event\n");
1910
1911 if (reset != 0) {
1912 ifp->if_flags &= ~IFF_RUNNING;
1913 vmxnet3_init_locked(sc);
1914 }
1915
1916 VMXNET3_CORE_UNLOCK(sc);
1917 }
1918
1919 static void
vmxnet3_txq_eof(struct vmxnet3_txqueue * txq)1920 vmxnet3_txq_eof(struct vmxnet3_txqueue *txq)
1921 {
1922 struct vmxnet3_softc *sc;
1923 struct ifnet *ifp;
1924 struct vmxnet3_txring *txr;
1925 struct vmxnet3_comp_ring *txc;
1926 struct vmxnet3_txcompdesc *txcd;
1927 struct vmxnet3_txbuf *txb;
1928 struct mbuf *m;
1929 u_int sop;
1930
1931 sc = txq->vxtxq_sc;
1932 ifp = sc->vmx_ifp;
1933 txr = &txq->vxtxq_cmd_ring;
1934 txc = &txq->vxtxq_comp_ring;
1935
1936 VMXNET3_TXQ_LOCK_ASSERT(txq);
1937
1938 for (;;) {
1939 txcd = &txc->vxcr_u.txcd[txc->vxcr_next];
1940 if (txcd->gen != txc->vxcr_gen)
1941 break;
1942 vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
1943
1944 if (++txc->vxcr_next == txc->vxcr_ndesc) {
1945 txc->vxcr_next = 0;
1946 txc->vxcr_gen ^= 1;
1947 }
1948
1949 sop = txr->vxtxr_next;
1950 txb = &txr->vxtxr_txbuf[sop];
1951
1952 if ((m = txb->vtxb_m) != NULL) {
1953 bus_dmamap_sync(txr->vxtxr_txtag, txb->vtxb_dmamap,
1954 BUS_DMASYNC_POSTWRITE);
1955 bus_dmamap_unload(txr->vxtxr_txtag, txb->vtxb_dmamap);
1956
1957 txq->vxtxq_stats.vmtxs_opackets++;
1958 txq->vxtxq_stats.vmtxs_obytes += m->m_pkthdr.len;
1959 if (m->m_flags & M_MCAST)
1960 txq->vxtxq_stats.vmtxs_omcasts++;
1961
1962 m_freem(m);
1963 txb->vtxb_m = NULL;
1964 }
1965
1966 txr->vxtxr_next = (txcd->eop_idx + 1) % txr->vxtxr_ndesc;
1967 }
1968
1969 if (txr->vxtxr_head == txr->vxtxr_next)
1970 txq->vxtxq_watchdog = 0;
1971 }
1972
1973 static int
vmxnet3_newbuf(struct vmxnet3_softc * sc,struct vmxnet3_rxring * rxr)1974 vmxnet3_newbuf(struct vmxnet3_softc *sc, struct vmxnet3_rxring *rxr)
1975 {
1976 struct ifnet *ifp;
1977 struct mbuf *m;
1978 struct vmxnet3_rxdesc *rxd;
1979 struct vmxnet3_rxbuf *rxb;
1980 bus_dma_tag_t tag;
1981 bus_dmamap_t dmap;
1982 bus_dma_segment_t segs[1];
1983 int idx, clsize, btype, flags, nsegs, error;
1984
1985 ifp = sc->vmx_ifp;
1986 tag = rxr->vxrxr_rxtag;
1987 dmap = rxr->vxrxr_spare_dmap;
1988 idx = rxr->vxrxr_fill;
1989 rxd = &rxr->vxrxr_rxd[idx];
1990 rxb = &rxr->vxrxr_rxbuf[idx];
1991
1992 #ifdef VMXNET3_FAILPOINTS
1993 KFAIL_POINT_CODE(VMXNET3_FP, newbuf, return ENOBUFS);
1994 if (rxr->vxrxr_rid != 0)
1995 KFAIL_POINT_CODE(VMXNET3_FP, newbuf_body_only, return ENOBUFS);
1996 #endif
1997
1998 if (rxr->vxrxr_rid == 0 && (idx % sc->vmx_rx_max_chain) == 0) {
1999 flags = M_PKTHDR;
2000 clsize = MCLBYTES;
2001 btype = VMXNET3_BTYPE_HEAD;
2002 } else {
2003 flags = M_PKTHDR;
2004 clsize = MJUMPAGESIZE;
2005 btype = VMXNET3_BTYPE_BODY;
2006 }
2007
2008 m = m_getjcl(M_NOWAIT, MT_DATA, flags, clsize);
2009 if (m == NULL) {
2010 sc->vmx_stats.vmst_mgetcl_failed++;
2011 return (ENOBUFS);
2012 }
2013
2014 if (btype == VMXNET3_BTYPE_HEAD) {
2015 m->m_len = m->m_pkthdr.len = clsize;
2016 m_adj(m, ETHER_ALIGN);
2017 } else
2018 m->m_len = clsize;
2019
2020 error = bus_dmamap_load_mbuf_segment(tag, dmap, m, &segs[0], 1, &nsegs,
2021 BUS_DMA_NOWAIT);
2022 if (error) {
2023 m_freem(m);
2024 sc->vmx_stats.vmst_mbuf_load_failed++;
2025 return (error);
2026 }
2027 KASSERT(nsegs == 1,
2028 ("%s: mbuf %p with too many segments %d", __func__, m, nsegs));
2029 if (btype == VMXNET3_BTYPE_BODY)
2030 m->m_flags &= ~M_PKTHDR;
2031
2032 if (rxb->vrxb_m != NULL) {
2033 bus_dmamap_sync(tag, rxb->vrxb_dmamap, BUS_DMASYNC_POSTREAD);
2034 bus_dmamap_unload(tag, rxb->vrxb_dmamap);
2035 }
2036
2037 rxr->vxrxr_spare_dmap = rxb->vrxb_dmamap;
2038 rxb->vrxb_dmamap = dmap;
2039 rxb->vrxb_m = m;
2040
2041 rxd->addr = segs[0].ds_addr;
2042 rxd->len = segs[0].ds_len;
2043 rxd->btype = btype;
2044 rxd->gen = rxr->vxrxr_gen;
2045
2046 vmxnet3_rxr_increment_fill(rxr);
2047 return (0);
2048 }
2049
2050 static void
vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue * rxq,struct vmxnet3_rxring * rxr,int idx)2051 vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *rxq,
2052 struct vmxnet3_rxring *rxr, int idx)
2053 {
2054 struct vmxnet3_rxdesc *rxd;
2055
2056 rxd = &rxr->vxrxr_rxd[idx];
2057 rxd->gen = rxr->vxrxr_gen;
2058 vmxnet3_rxr_increment_fill(rxr);
2059 }
2060
2061 static void
vmxnet3_rxq_discard_chain(struct vmxnet3_rxqueue * rxq)2062 vmxnet3_rxq_discard_chain(struct vmxnet3_rxqueue *rxq)
2063 {
2064 struct vmxnet3_softc *sc;
2065 struct vmxnet3_rxring *rxr;
2066 struct vmxnet3_comp_ring *rxc;
2067 struct vmxnet3_rxcompdesc *rxcd;
2068 int idx, eof;
2069
2070 sc = rxq->vxrxq_sc;
2071 rxc = &rxq->vxrxq_comp_ring;
2072
2073 do {
2074 rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next];
2075 if (rxcd->gen != rxc->vxcr_gen)
2076 break; /* Not expected. */
2077 vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
2078
2079 if (++rxc->vxcr_next == rxc->vxcr_ndesc) {
2080 rxc->vxcr_next = 0;
2081 rxc->vxcr_gen ^= 1;
2082 }
2083
2084 idx = rxcd->rxd_idx;
2085 eof = rxcd->eop;
2086 if (rxcd->qid < sc->vmx_nrxqueues)
2087 rxr = &rxq->vxrxq_cmd_ring[0];
2088 else
2089 rxr = &rxq->vxrxq_cmd_ring[1];
2090 vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2091 } while (!eof);
2092 }
2093
2094 static void
vmxnet3_rx_csum(struct vmxnet3_rxcompdesc * rxcd,struct mbuf * m)2095 vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m)
2096 {
2097
2098 if (rxcd->ipv4) {
2099 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2100 if (rxcd->ipcsum_ok)
2101 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2102 }
2103
2104 if (!rxcd->fragment) {
2105 if (rxcd->csum_ok && (rxcd->tcp || rxcd->udp)) {
2106 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
2107 CSUM_PSEUDO_HDR;
2108 m->m_pkthdr.csum_data = 0xFFFF;
2109 }
2110 }
2111 }
2112
2113 static void
vmxnet3_rxq_input(struct vmxnet3_rxqueue * rxq,struct vmxnet3_rxcompdesc * rxcd,struct mbuf * m)2114 vmxnet3_rxq_input(struct vmxnet3_rxqueue *rxq,
2115 struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m)
2116 {
2117 struct vmxnet3_softc *sc;
2118 struct ifnet *ifp;
2119
2120 sc = rxq->vxrxq_sc;
2121 ifp = sc->vmx_ifp;
2122
2123 if (rxcd->error) {
2124 rxq->vxrxq_stats.vmrxs_ierrors++;
2125 m_freem(m);
2126 return;
2127 }
2128
2129 #if 0
2130 #ifdef notyet
2131 switch (rxcd->rss_type) {
2132 case VMXNET3_RCD_RSS_TYPE_IPV4:
2133 m->m_pkthdr.flowid = rxcd->rss_hash;
2134 M_HASHTYPE_SET(m, M_HASHTYPE_RSS_IPV4);
2135 break;
2136 case VMXNET3_RCD_RSS_TYPE_TCPIPV4:
2137 m->m_pkthdr.flowid = rxcd->rss_hash;
2138 M_HASHTYPE_SET(m, M_HASHTYPE_RSS_TCP_IPV4);
2139 break;
2140 case VMXNET3_RCD_RSS_TYPE_IPV6:
2141 m->m_pkthdr.flowid = rxcd->rss_hash;
2142 M_HASHTYPE_SET(m, M_HASHTYPE_RSS_IPV6);
2143 break;
2144 case VMXNET3_RCD_RSS_TYPE_TCPIPV6:
2145 m->m_pkthdr.flowid = rxcd->rss_hash;
2146 M_HASHTYPE_SET(m, M_HASHTYPE_RSS_TCP_IPV6);
2147 break;
2148 default: /* VMXNET3_RCD_RSS_TYPE_NONE */
2149 m->m_pkthdr.flowid = rxq->vxrxq_id;
2150 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
2151 break;
2152 }
2153 #else
2154 m->m_pkthdr.flowid = rxq->vxrxq_id;
2155 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
2156 #endif
2157 #endif
2158
2159 if (!rxcd->no_csum)
2160 vmxnet3_rx_csum(rxcd, m);
2161 if (rxcd->vlan) {
2162 m->m_flags |= M_VLANTAG;
2163 m->m_pkthdr.ether_vlantag = rxcd->vtag;
2164 }
2165
2166 rxq->vxrxq_stats.vmrxs_ipackets++;
2167 rxq->vxrxq_stats.vmrxs_ibytes += m->m_pkthdr.len;
2168
2169 VMXNET3_RXQ_UNLOCK(rxq);
2170 (*ifp->if_input)(ifp, m, NULL, -1);
2171 VMXNET3_RXQ_LOCK(rxq);
2172 }
2173
2174 static void
vmxnet3_rxq_eof(struct vmxnet3_rxqueue * rxq)2175 vmxnet3_rxq_eof(struct vmxnet3_rxqueue *rxq)
2176 {
2177 struct vmxnet3_softc *sc;
2178 struct ifnet *ifp;
2179 struct vmxnet3_rxring *rxr;
2180 struct vmxnet3_comp_ring *rxc;
2181 struct vmxnet3_rxdesc *rxd;
2182 struct vmxnet3_rxcompdesc *rxcd;
2183 struct mbuf *m, *m_head, *m_tail;
2184 int idx, length;
2185
2186 sc = rxq->vxrxq_sc;
2187 ifp = sc->vmx_ifp;
2188 rxc = &rxq->vxrxq_comp_ring;
2189
2190 VMXNET3_RXQ_LOCK_ASSERT(rxq);
2191
2192 if ((ifp->if_flags & IFF_RUNNING) == 0)
2193 return;
2194
2195 m_head = rxq->vxrxq_mhead;
2196 rxq->vxrxq_mhead = NULL;
2197 m_tail = rxq->vxrxq_mtail;
2198 rxq->vxrxq_mtail = NULL;
2199 KKASSERT(m_head == NULL || m_tail != NULL);
2200
2201 for (;;) {
2202 rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next];
2203 if (rxcd->gen != rxc->vxcr_gen) {
2204 rxq->vxrxq_mhead = m_head;
2205 rxq->vxrxq_mtail = m_tail;
2206 break;
2207 }
2208 vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
2209
2210 if (++rxc->vxcr_next == rxc->vxcr_ndesc) {
2211 rxc->vxcr_next = 0;
2212 rxc->vxcr_gen ^= 1;
2213 }
2214
2215 idx = rxcd->rxd_idx;
2216 length = rxcd->len;
2217 if (rxcd->qid < sc->vmx_nrxqueues)
2218 rxr = &rxq->vxrxq_cmd_ring[0];
2219 else
2220 rxr = &rxq->vxrxq_cmd_ring[1];
2221 rxd = &rxr->vxrxr_rxd[idx];
2222
2223 m = rxr->vxrxr_rxbuf[idx].vrxb_m;
2224 KASSERT(m != NULL, ("%s: queue %d idx %d without mbuf",
2225 __func__, rxcd->qid, idx));
2226
2227 /*
2228 * The host may skip descriptors. We detect this when this
2229 * descriptor does not match the previous fill index. Catch
2230 * up with the host now.
2231 */
2232 if (__predict_false(rxr->vxrxr_fill != idx)) {
2233 while (rxr->vxrxr_fill != idx) {
2234 rxr->vxrxr_rxd[rxr->vxrxr_fill].gen =
2235 rxr->vxrxr_gen;
2236 vmxnet3_rxr_increment_fill(rxr);
2237 }
2238 }
2239
2240 if (rxcd->sop) {
2241 KASSERT(rxd->btype == VMXNET3_BTYPE_HEAD,
2242 ("%s: start of frame w/o head buffer", __func__));
2243 KASSERT(rxr == &rxq->vxrxq_cmd_ring[0],
2244 ("%s: start of frame not in ring 0", __func__));
2245 KASSERT((idx % sc->vmx_rx_max_chain) == 0,
2246 ("%s: start of frame at unexcepted index %d (%d)",
2247 __func__, idx, sc->vmx_rx_max_chain));
2248 KASSERT(m_head == NULL,
2249 ("%s: duplicate start of frame?", __func__));
2250
2251 if (length == 0) {
2252 /* Just ignore this descriptor. */
2253 vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2254 goto nextp;
2255 }
2256
2257 if (vmxnet3_newbuf(sc, rxr) != 0) {
2258 rxq->vxrxq_stats.vmrxs_iqdrops++;
2259 vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2260 if (!rxcd->eop)
2261 vmxnet3_rxq_discard_chain(rxq);
2262 goto nextp;
2263 }
2264
2265 m->m_pkthdr.rcvif = ifp;
2266 m->m_pkthdr.len = m->m_len = length;
2267 m->m_pkthdr.csum_flags = 0;
2268 m_head = m_tail = m;
2269
2270 } else {
2271 KASSERT(rxd->btype == VMXNET3_BTYPE_BODY,
2272 ("%s: non start of frame w/o body buffer", __func__));
2273
2274 if (m_head == NULL && m_tail == NULL) {
2275 /*
2276 * This is a continuation of a packet that we
2277 * started to drop, but could not drop entirely
2278 * because this segment was still owned by the
2279 * host. So, drop the remainder now.
2280 */
2281 vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2282 if (!rxcd->eop)
2283 vmxnet3_rxq_discard_chain(rxq);
2284 goto nextp;
2285 }
2286
2287 KASSERT(m_head != NULL,
2288 ("%s: frame not started?", __func__));
2289
2290 if (vmxnet3_newbuf(sc, rxr) != 0) {
2291 rxq->vxrxq_stats.vmrxs_iqdrops++;
2292 vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2293 if (!rxcd->eop)
2294 vmxnet3_rxq_discard_chain(rxq);
2295 m_freem(m_head);
2296 m_head = m_tail = NULL;
2297 goto nextp;
2298 }
2299
2300 m->m_len = length;
2301 m_head->m_pkthdr.len += length;
2302 m_tail->m_next = m;
2303 m_tail = m;
2304 }
2305
2306 if (rxcd->eop) {
2307 vmxnet3_rxq_input(rxq, rxcd, m_head);
2308 m_head = m_tail = NULL;
2309
2310 /* Must recheck after dropping the Rx lock. */
2311 if ((ifp->if_flags & IFF_RUNNING) == 0)
2312 break;
2313 }
2314
2315 nextp:
2316 if (__predict_false(rxq->vxrxq_rs->update_rxhead)) {
2317 int qid = rxcd->qid;
2318 bus_size_t r;
2319
2320 idx = (idx + 1) % rxr->vxrxr_ndesc;
2321 if (qid >= sc->vmx_nrxqueues) {
2322 qid -= sc->vmx_nrxqueues;
2323 r = VMXNET3_BAR0_RXH2(qid);
2324 } else
2325 r = VMXNET3_BAR0_RXH1(qid);
2326 vmxnet3_write_bar0(sc, r, idx);
2327 }
2328 }
2329 }
2330
2331 static void
vmxnet3_legacy_intr(void * xsc)2332 vmxnet3_legacy_intr(void *xsc)
2333 {
2334 struct vmxnet3_softc *sc;
2335 struct vmxnet3_rxqueue *rxq;
2336 struct vmxnet3_txqueue *txq;
2337
2338 sc = xsc;
2339 rxq = &sc->vmx_rxq[0];
2340 txq = &sc->vmx_txq[0];
2341
2342 if (sc->vmx_intr_type == VMXNET3_IT_LEGACY) {
2343 if (vmxnet3_read_bar1(sc, VMXNET3_BAR1_INTR) == 0)
2344 return;
2345 }
2346 if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
2347 vmxnet3_disable_all_intrs(sc);
2348
2349 if (sc->vmx_ds->event != 0)
2350 vmxnet3_evintr(sc);
2351
2352 VMXNET3_RXQ_LOCK(rxq);
2353 vmxnet3_rxq_eof(rxq);
2354 VMXNET3_RXQ_UNLOCK(rxq);
2355
2356 VMXNET3_TXQ_LOCK(txq);
2357 vmxnet3_txq_eof(txq);
2358 vmxnet3_txq_start(txq);
2359 VMXNET3_TXQ_UNLOCK(txq);
2360
2361 vmxnet3_enable_all_intrs(sc);
2362 }
2363
2364 #ifdef __FreeBSD__
2365 static void
vmxnet3_txq_intr(void * xtxq)2366 vmxnet3_txq_intr(void *xtxq)
2367 {
2368 struct vmxnet3_softc *sc;
2369 struct vmxnet3_txqueue *txq;
2370
2371 txq = xtxq;
2372 sc = txq->vxtxq_sc;
2373
2374 if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
2375 vmxnet3_disable_intr(sc, txq->vxtxq_intr_idx);
2376
2377 VMXNET3_TXQ_LOCK(txq);
2378 vmxnet3_txq_eof(txq);
2379 vmxnet3_txq_start(txq);
2380 VMXNET3_TXQ_UNLOCK(txq);
2381
2382 vmxnet3_enable_intr(sc, txq->vxtxq_intr_idx);
2383 }
2384
2385 static void
vmxnet3_rxq_intr(void * xrxq)2386 vmxnet3_rxq_intr(void *xrxq)
2387 {
2388 struct vmxnet3_softc *sc;
2389 struct vmxnet3_rxqueue *rxq;
2390
2391 rxq = xrxq;
2392 sc = rxq->vxrxq_sc;
2393
2394 if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
2395 vmxnet3_disable_intr(sc, rxq->vxrxq_intr_idx);
2396
2397 VMXNET3_RXQ_LOCK(rxq);
2398 vmxnet3_rxq_eof(rxq);
2399 VMXNET3_RXQ_UNLOCK(rxq);
2400
2401 vmxnet3_enable_intr(sc, rxq->vxrxq_intr_idx);
2402 }
2403
2404 static void
vmxnet3_event_intr(void * xsc)2405 vmxnet3_event_intr(void *xsc)
2406 {
2407 struct vmxnet3_softc *sc;
2408
2409 sc = xsc;
2410
2411 if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
2412 vmxnet3_disable_intr(sc, sc->vmx_event_intr_idx);
2413
2414 if (sc->vmx_ds->event != 0)
2415 vmxnet3_evintr(sc);
2416
2417 vmxnet3_enable_intr(sc, sc->vmx_event_intr_idx);
2418 }
2419 #endif
2420
2421 static void
vmxnet3_txstop(struct vmxnet3_softc * sc,struct vmxnet3_txqueue * txq)2422 vmxnet3_txstop(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq)
2423 {
2424 struct vmxnet3_txring *txr;
2425 struct vmxnet3_txbuf *txb;
2426 int i;
2427
2428 txr = &txq->vxtxq_cmd_ring;
2429
2430 for (i = 0; i < txr->vxtxr_ndesc; i++) {
2431 txb = &txr->vxtxr_txbuf[i];
2432
2433 if (txb->vtxb_m == NULL)
2434 continue;
2435
2436 bus_dmamap_sync(txr->vxtxr_txtag, txb->vtxb_dmamap,
2437 BUS_DMASYNC_POSTWRITE);
2438 bus_dmamap_unload(txr->vxtxr_txtag, txb->vtxb_dmamap);
2439 m_freem(txb->vtxb_m);
2440 txb->vtxb_m = NULL;
2441 }
2442 }
2443
2444 static void
vmxnet3_rxstop(struct vmxnet3_softc * sc,struct vmxnet3_rxqueue * rxq)2445 vmxnet3_rxstop(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq)
2446 {
2447 struct vmxnet3_rxring *rxr;
2448 struct vmxnet3_rxbuf *rxb;
2449 int i, j;
2450
2451 if (rxq->vxrxq_mhead != NULL) {
2452 m_freem(rxq->vxrxq_mhead);
2453 rxq->vxrxq_mhead = NULL;
2454 rxq->vxrxq_mtail = NULL;
2455 }
2456
2457 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
2458 rxr = &rxq->vxrxq_cmd_ring[i];
2459
2460 for (j = 0; j < rxr->vxrxr_ndesc; j++) {
2461 rxb = &rxr->vxrxr_rxbuf[j];
2462
2463 if (rxb->vrxb_m == NULL)
2464 continue;
2465
2466 bus_dmamap_sync(rxr->vxrxr_rxtag, rxb->vrxb_dmamap,
2467 BUS_DMASYNC_POSTREAD);
2468 bus_dmamap_unload(rxr->vxrxr_rxtag, rxb->vrxb_dmamap);
2469 m_freem(rxb->vrxb_m);
2470 rxb->vrxb_m = NULL;
2471 }
2472 }
2473 }
2474
2475 static void
vmxnet3_stop_rendezvous(struct vmxnet3_softc * sc)2476 vmxnet3_stop_rendezvous(struct vmxnet3_softc *sc)
2477 {
2478 struct vmxnet3_rxqueue *rxq;
2479 struct vmxnet3_txqueue *txq;
2480 int i;
2481
2482 for (i = 0; i < sc->vmx_nrxqueues; i++) {
2483 rxq = &sc->vmx_rxq[i];
2484 VMXNET3_RXQ_LOCK(rxq);
2485 VMXNET3_RXQ_UNLOCK(rxq);
2486 }
2487
2488 for (i = 0; i < sc->vmx_ntxqueues; i++) {
2489 txq = &sc->vmx_txq[i];
2490 VMXNET3_TXQ_LOCK(txq);
2491 VMXNET3_TXQ_UNLOCK(txq);
2492 }
2493 }
2494
2495 static void
vmxnet3_stop(struct vmxnet3_softc * sc)2496 vmxnet3_stop(struct vmxnet3_softc *sc)
2497 {
2498 struct ifnet *ifp;
2499 int q;
2500
2501 ifp = sc->vmx_ifp;
2502 VMXNET3_CORE_LOCK_ASSERT(sc);
2503
2504 ifp->if_flags &= ~IFF_RUNNING;
2505 sc->vmx_link_active = 0;
2506 callout_stop(&sc->vmx_tick);
2507
2508 /* Disable interrupts. */
2509 vmxnet3_disable_all_intrs(sc);
2510 vmxnet3_write_cmd(sc, VMXNET3_CMD_DISABLE);
2511
2512 vmxnet3_stop_rendezvous(sc);
2513
2514 for (q = 0; q < sc->vmx_ntxqueues; q++)
2515 vmxnet3_txstop(sc, &sc->vmx_txq[q]);
2516 for (q = 0; q < sc->vmx_nrxqueues; q++)
2517 vmxnet3_rxstop(sc, &sc->vmx_rxq[q]);
2518
2519 vmxnet3_write_cmd(sc, VMXNET3_CMD_RESET);
2520 }
2521
2522 static void
vmxnet3_txinit(struct vmxnet3_softc * sc,struct vmxnet3_txqueue * txq)2523 vmxnet3_txinit(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq)
2524 {
2525 struct vmxnet3_txring *txr;
2526 struct vmxnet3_comp_ring *txc;
2527
2528 txr = &txq->vxtxq_cmd_ring;
2529 txr->vxtxr_head = 0;
2530 txr->vxtxr_next = 0;
2531 txr->vxtxr_gen = VMXNET3_INIT_GEN;
2532 bzero(txr->vxtxr_txd,
2533 txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc));
2534
2535 txc = &txq->vxtxq_comp_ring;
2536 txc->vxcr_next = 0;
2537 txc->vxcr_gen = VMXNET3_INIT_GEN;
2538 bzero(txc->vxcr_u.txcd,
2539 txc->vxcr_ndesc * sizeof(struct vmxnet3_txcompdesc));
2540 }
2541
2542 static int
vmxnet3_rxinit(struct vmxnet3_softc * sc,struct vmxnet3_rxqueue * rxq)2543 vmxnet3_rxinit(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq)
2544 {
2545 struct ifnet *ifp;
2546 struct vmxnet3_rxring *rxr;
2547 struct vmxnet3_comp_ring *rxc;
2548 int i, populate, idx, frame_size, error;
2549
2550 ifp = sc->vmx_ifp;
2551 frame_size = ETHER_ALIGN + sizeof(struct ether_vlan_header) +
2552 ifp->if_mtu;
2553
2554 /*
2555 * If the MTU causes us to exceed what a regular sized cluster can
2556 * handle, we allocate a second MJUMPAGESIZE cluster after it in
2557 * ring 0. If in use, ring 1 always contains MJUMPAGESIZE clusters.
2558 *
2559 * Keep rx_max_chain a divisor of the maximum Rx ring size to make
2560 * our life easier. We do not support changing the ring size after
2561 * the attach.
2562 */
2563 if (frame_size <= MCLBYTES)
2564 sc->vmx_rx_max_chain = 1;
2565 else
2566 sc->vmx_rx_max_chain = 2;
2567
2568 /*
2569 * Only populate ring 1 if the configuration will take advantage
2570 * of it. That is either when LRO is enabled or the frame size
2571 * exceeds what ring 0 can contain.
2572 */
2573 #if 0 /* XXX LRO */
2574 if ((ifp->if_capenable & IFCAP_LRO) == 0 &&
2575 #else
2576 if (
2577 #endif
2578 frame_size <= MCLBYTES + MJUMPAGESIZE)
2579 populate = 1;
2580 else
2581 populate = VMXNET3_RXRINGS_PERQ;
2582
2583 for (i = 0; i < populate; i++) {
2584 rxr = &rxq->vxrxq_cmd_ring[i];
2585 rxr->vxrxr_fill = 0;
2586 rxr->vxrxr_gen = VMXNET3_INIT_GEN;
2587 bzero(rxr->vxrxr_rxd,
2588 rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc));
2589
2590 for (idx = 0; idx < rxr->vxrxr_ndesc; idx++) {
2591 error = vmxnet3_newbuf(sc, rxr);
2592 if (error)
2593 return (error);
2594 }
2595 }
2596
2597 for (/**/; i < VMXNET3_RXRINGS_PERQ; i++) {
2598 rxr = &rxq->vxrxq_cmd_ring[i];
2599 rxr->vxrxr_fill = 0;
2600 rxr->vxrxr_gen = 0;
2601 bzero(rxr->vxrxr_rxd,
2602 rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc));
2603 }
2604
2605 rxc = &rxq->vxrxq_comp_ring;
2606 rxc->vxcr_next = 0;
2607 rxc->vxcr_gen = VMXNET3_INIT_GEN;
2608 bzero(rxc->vxcr_u.rxcd,
2609 rxc->vxcr_ndesc * sizeof(struct vmxnet3_rxcompdesc));
2610
2611 return (0);
2612 }
2613
2614 static int
vmxnet3_reinit_queues(struct vmxnet3_softc * sc)2615 vmxnet3_reinit_queues(struct vmxnet3_softc *sc)
2616 {
2617 device_t dev;
2618 int q, error;
2619
2620 dev = sc->vmx_dev;
2621
2622 for (q = 0; q < sc->vmx_ntxqueues; q++)
2623 vmxnet3_txinit(sc, &sc->vmx_txq[q]);
2624
2625 for (q = 0; q < sc->vmx_nrxqueues; q++) {
2626 error = vmxnet3_rxinit(sc, &sc->vmx_rxq[q]);
2627 if (error) {
2628 device_printf(dev, "cannot populate Rx queue %d\n", q);
2629 return (error);
2630 }
2631 }
2632
2633 return (0);
2634 }
2635
2636 static int
vmxnet3_enable_device(struct vmxnet3_softc * sc)2637 vmxnet3_enable_device(struct vmxnet3_softc *sc)
2638 {
2639 int q;
2640
2641 if (vmxnet3_read_cmd(sc, VMXNET3_CMD_ENABLE) != 0) {
2642 device_printf(sc->vmx_dev, "device enable command failed!\n");
2643 return (1);
2644 }
2645
2646 /* Reset the Rx queue heads. */
2647 for (q = 0; q < sc->vmx_nrxqueues; q++) {
2648 vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH1(q), 0);
2649 vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH2(q), 0);
2650 }
2651
2652 return (0);
2653 }
2654
2655 static void
vmxnet3_reinit_rxfilters(struct vmxnet3_softc * sc)2656 vmxnet3_reinit_rxfilters(struct vmxnet3_softc *sc)
2657 {
2658 struct ifnet *ifp;
2659
2660 ifp = sc->vmx_ifp;
2661
2662 vmxnet3_set_rxfilter(sc);
2663
2664 #if 0 /* VLAN_HWFILTER */
2665 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
2666 bcopy(sc->vmx_vlan_filter, sc->vmx_ds->vlan_filter,
2667 sizeof(sc->vmx_ds->vlan_filter));
2668 else
2669 #endif
2670 bzero(sc->vmx_ds->vlan_filter,
2671 sizeof(sc->vmx_ds->vlan_filter));
2672 vmxnet3_write_cmd(sc, VMXNET3_CMD_VLAN_FILTER);
2673 }
2674
2675 static int
vmxnet3_reinit(struct vmxnet3_softc * sc)2676 vmxnet3_reinit(struct vmxnet3_softc *sc)
2677 {
2678
2679 vmxnet3_reinit_interface(sc);
2680 vmxnet3_reinit_shared_data(sc);
2681
2682 if (vmxnet3_reinit_queues(sc) != 0)
2683 return (ENXIO);
2684
2685 if (vmxnet3_enable_device(sc) != 0)
2686 return (ENXIO);
2687
2688 vmxnet3_reinit_rxfilters(sc);
2689
2690 return (0);
2691 }
2692
2693 static void
vmxnet3_init_locked(struct vmxnet3_softc * sc)2694 vmxnet3_init_locked(struct vmxnet3_softc *sc)
2695 {
2696 struct ifnet *ifp;
2697
2698 ifp = sc->vmx_ifp;
2699
2700 if (ifp->if_flags & IFF_RUNNING)
2701 return;
2702
2703 vmxnet3_stop(sc);
2704
2705 if (vmxnet3_reinit(sc) != 0) {
2706 vmxnet3_stop(sc);
2707 return;
2708 }
2709
2710 ifp->if_flags |= IFF_RUNNING;
2711 vmxnet3_link_status(sc);
2712
2713 vmxnet3_enable_all_intrs(sc);
2714 callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc);
2715 }
2716
2717 static void
vmxnet3_init(void * xsc)2718 vmxnet3_init(void *xsc)
2719 {
2720 struct vmxnet3_softc *sc;
2721
2722 sc = xsc;
2723
2724 VMXNET3_CORE_LOCK(sc);
2725 vmxnet3_init_locked(sc);
2726 VMXNET3_CORE_UNLOCK(sc);
2727 }
2728
2729 /*
2730 * BMV: Much of this can go away once we finally have offsets in
2731 * the mbuf packet header. Bug andre@.
2732 */
2733 static int
vmxnet3_txq_offload_ctx(struct vmxnet3_txqueue * txq,struct mbuf * m,int * etype,int * proto,int * start)2734 vmxnet3_txq_offload_ctx(struct vmxnet3_txqueue *txq, struct mbuf *m,
2735 int *etype, int *proto, int *start)
2736 {
2737 struct ether_vlan_header *evh;
2738 int offset;
2739 #if defined(INET)
2740 struct ip *ip = NULL;
2741 #endif
2742 #if defined(INET6)
2743 struct ip6_hdr *ip6 = NULL;
2744 #endif
2745
2746 evh = mtod(m, struct ether_vlan_header *);
2747 if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2748 /* BMV: We should handle nested VLAN tags too. */
2749 *etype = ntohs(evh->evl_proto);
2750 offset = sizeof(struct ether_vlan_header);
2751 } else {
2752 *etype = ntohs(evh->evl_encap_proto);
2753 offset = sizeof(struct ether_header);
2754 }
2755
2756 switch (*etype) {
2757 #if defined(INET)
2758 case ETHERTYPE_IP:
2759 if (__predict_false(m->m_len < offset + sizeof(struct ip))) {
2760 m = m_pullup(m, offset + sizeof(struct ip));
2761 if (m == NULL)
2762 return (EINVAL);
2763 }
2764
2765 ip = (struct ip *)(mtod(m, uint8_t *) + offset);
2766 *proto = ip->ip_p;
2767 *start = offset + (ip->ip_hl << 2);
2768 break;
2769 #endif
2770 #if defined(INET6)
2771 case ETHERTYPE_IPV6:
2772 if (__predict_false(m->m_len <
2773 offset + sizeof(struct ip6_hdr))) {
2774 m = m_pullup(m, offset + sizeof(struct ip6_hdr));
2775 if (m == NULL)
2776 return (EINVAL);
2777 }
2778
2779 ip6 = (struct ip6_hdr *)(mtod(m, uint8_t *) + offset);
2780 *proto = -1;
2781 *start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto);
2782 /* Assert the network stack sent us a valid packet. */
2783 KASSERT(*start > offset,
2784 ("%s: mbuf %p start %d offset %d proto %d", __func__, m,
2785 *start, offset, *proto));
2786 break;
2787 #endif
2788 default:
2789 return (EINVAL);
2790 }
2791
2792 #if 0 /* XXX TSO */
2793 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
2794 struct tcphdr *tcp;
2795
2796 if (__predict_false(*proto != IPPROTO_TCP)) {
2797 /* Likely failed to correctly parse the mbuf. */
2798 return (EINVAL);
2799 }
2800
2801 if (m->m_len < *start + sizeof(struct tcphdr)) {
2802 m = m_pullup(m, *start + sizeof(struct tcphdr));
2803 if (m == NULL)
2804 return (EINVAL);
2805 }
2806
2807 tcp = (struct tcphdr *)(mtod(m, uint8_t *) + *start);
2808 *start += (tcp->th_off << 2);
2809
2810 txq->vxtxq_stats.vmtxs_tso++;
2811 } else
2812 #endif
2813 txq->vxtxq_stats.vmtxs_csum++;
2814
2815 return (0);
2816 }
2817
2818 static int
vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue * txq,struct mbuf ** m0,bus_dmamap_t dmap,bus_dma_segment_t segs[],int * nsegs)2819 vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *txq, struct mbuf **m0,
2820 bus_dmamap_t dmap, bus_dma_segment_t segs[], int *nsegs)
2821 {
2822 struct vmxnet3_txring *txr;
2823 struct mbuf *m;
2824 bus_dma_tag_t tag;
2825 int error;
2826
2827 txr = &txq->vxtxq_cmd_ring;
2828 m = *m0;
2829 tag = txr->vxtxr_txtag;
2830
2831 error = bus_dmamap_load_mbuf_segment(tag, dmap, m, segs, 1, nsegs,
2832 BUS_DMA_NOWAIT);
2833 if (error == 0 || error != EFBIG)
2834 return (error);
2835
2836 m = m_defrag(m, M_NOWAIT);
2837 if (m != NULL) {
2838 *m0 = m;
2839 error = bus_dmamap_load_mbuf_segment(tag, dmap, m, segs,
2840 1, nsegs, BUS_DMA_NOWAIT);
2841 } else
2842 error = ENOBUFS;
2843
2844 if (error) {
2845 m_freem(*m0);
2846 *m0 = NULL;
2847 txq->vxtxq_sc->vmx_stats.vmst_defrag_failed++;
2848 } else
2849 txq->vxtxq_sc->vmx_stats.vmst_defragged++;
2850
2851 return (error);
2852 }
2853
2854 static void
vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue * txq,bus_dmamap_t dmap)2855 vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *txq, bus_dmamap_t dmap)
2856 {
2857 struct vmxnet3_txring *txr;
2858
2859 txr = &txq->vxtxq_cmd_ring;
2860 bus_dmamap_unload(txr->vxtxr_txtag, dmap);
2861 }
2862
2863 static int
vmxnet3_txq_encap(struct vmxnet3_txqueue * txq,struct mbuf ** m0)2864 vmxnet3_txq_encap(struct vmxnet3_txqueue *txq, struct mbuf **m0)
2865 {
2866 struct vmxnet3_softc *sc;
2867 struct vmxnet3_txring *txr;
2868 struct vmxnet3_txdesc *txd, *sop;
2869 struct mbuf *m;
2870 bus_dmamap_t dmap;
2871 bus_dma_segment_t segs[VMXNET3_TX_MAXSEGS];
2872 int i, gen, nsegs, etype, proto, start, error;
2873
2874 sc = txq->vxtxq_sc;
2875 start = 0;
2876 txd = NULL;
2877 txr = &txq->vxtxq_cmd_ring;
2878 dmap = txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_dmamap;
2879
2880 error = vmxnet3_txq_load_mbuf(txq, m0, dmap, segs, &nsegs);
2881 if (error)
2882 return (error);
2883
2884 m = *m0;
2885 M_ASSERTPKTHDR(m);
2886 KASSERT(nsegs <= VMXNET3_TX_MAXSEGS,
2887 ("%s: mbuf %p with too many segments %d", __func__, m, nsegs));
2888
2889 if (VMXNET3_TXRING_AVAIL(txr) < nsegs) {
2890 txq->vxtxq_stats.vmtxs_full++;
2891 vmxnet3_txq_unload_mbuf(txq, dmap);
2892 return (ENOSPC);
2893 } else if (m->m_pkthdr.csum_flags & VMXNET3_CSUM_ALL_OFFLOAD) {
2894 error = vmxnet3_txq_offload_ctx(txq, m, &etype, &proto, &start);
2895 if (error) {
2896 txq->vxtxq_stats.vmtxs_offload_failed++;
2897 vmxnet3_txq_unload_mbuf(txq, dmap);
2898 m_freem(m);
2899 *m0 = NULL;
2900 return (error);
2901 }
2902 }
2903
2904 txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_m = m;
2905 sop = &txr->vxtxr_txd[txr->vxtxr_head];
2906 gen = txr->vxtxr_gen ^ 1; /* Owned by cpu (yet) */
2907
2908 for (i = 0; i < nsegs; i++) {
2909 txd = &txr->vxtxr_txd[txr->vxtxr_head];
2910
2911 txd->addr = segs[i].ds_addr;
2912 txd->len = segs[i].ds_len;
2913 txd->gen = gen;
2914 txd->dtype = 0;
2915 txd->offload_mode = VMXNET3_OM_NONE;
2916 txd->offload_pos = 0;
2917 txd->hlen = 0;
2918 txd->eop = 0;
2919 txd->compreq = 0;
2920 txd->vtag_mode = 0;
2921 txd->vtag = 0;
2922
2923 if (++txr->vxtxr_head == txr->vxtxr_ndesc) {
2924 txr->vxtxr_head = 0;
2925 txr->vxtxr_gen ^= 1;
2926 }
2927 gen = txr->vxtxr_gen;
2928 }
2929 txd->eop = 1;
2930 txd->compreq = 1;
2931
2932 if (m->m_flags & M_VLANTAG) {
2933 sop->vtag_mode = 1;
2934 sop->vtag = m->m_pkthdr.ether_vlantag;
2935 }
2936
2937
2938 #if 0 /* XXX TSO */
2939 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
2940 sop->offload_mode = VMXNET3_OM_TSO;
2941 sop->hlen = start;
2942 sop->offload_pos = m->m_pkthdr.tso_segsz;
2943 } else
2944 #endif
2945 if (m->m_pkthdr.csum_flags & (VMXNET3_CSUM_OFFLOAD |
2946 VMXNET3_CSUM_OFFLOAD_IPV6)) {
2947 sop->offload_mode = VMXNET3_OM_CSUM;
2948 sop->hlen = start;
2949 sop->offload_pos = start + m->m_pkthdr.csum_data;
2950 }
2951
2952 /* Finally, change the ownership. */
2953 vmxnet3_barrier(sc, VMXNET3_BARRIER_WR);
2954 sop->gen ^= 1;
2955
2956 txq->vxtxq_ts->npending += nsegs;
2957 if (txq->vxtxq_ts->npending >= txq->vxtxq_ts->intr_threshold) {
2958 txq->vxtxq_ts->npending = 0;
2959 vmxnet3_write_bar0(sc, VMXNET3_BAR0_TXH(txq->vxtxq_id),
2960 txr->vxtxr_head);
2961 }
2962
2963 return (0);
2964 }
2965
2966 #ifdef VMXNET3_LEGACY_TX
2967
2968 static void
vmxnet3_start_locked(struct ifnet * ifp)2969 vmxnet3_start_locked(struct ifnet *ifp)
2970 {
2971 struct vmxnet3_softc *sc;
2972 struct vmxnet3_txqueue *txq;
2973 struct vmxnet3_txring *txr;
2974 struct mbuf *m_head;
2975 int tx, avail;
2976
2977 sc = ifp->if_softc;
2978 txq = &sc->vmx_txq[0];
2979 txr = &txq->vxtxq_cmd_ring;
2980 tx = 0;
2981
2982 VMXNET3_TXQ_LOCK_ASSERT(txq);
2983
2984 if ((ifp->if_flags & IFF_RUNNING) == 0 ||
2985 sc->vmx_link_active == 0)
2986 return;
2987
2988 while (!ifq_is_empty(&ifp->if_snd)) {
2989 if ((avail = VMXNET3_TXRING_AVAIL(txr)) < 2)
2990 break;
2991
2992 m_head = ifq_dequeue(&ifp->if_snd);
2993 if (m_head == NULL)
2994 break;
2995
2996 /* Assume worse case if this mbuf is the head of a chain. */
2997 if (m_head->m_next != NULL && avail < VMXNET3_TX_MAXSEGS) {
2998 ifq_prepend(&ifp->if_snd, m_head);
2999 break;
3000 }
3001
3002 if (vmxnet3_txq_encap(txq, &m_head) != 0) {
3003 if (m_head != NULL)
3004 ifq_prepend(&ifp->if_snd, m_head);
3005 break;
3006 }
3007
3008 tx++;
3009 ETHER_BPF_MTAP(ifp, m_head);
3010 }
3011
3012 if (tx > 0)
3013 txq->vxtxq_watchdog = VMXNET3_WATCHDOG_TIMEOUT;
3014 }
3015
3016 static void
vmxnet3_start(struct ifnet * ifp,struct ifaltq_subque * ifsq)3017 vmxnet3_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
3018 {
3019 struct vmxnet3_softc *sc;
3020 struct vmxnet3_txqueue *txq;
3021
3022 sc = ifp->if_softc;
3023 txq = &sc->vmx_txq[0];
3024
3025 VMXNET3_TXQ_LOCK(txq);
3026 vmxnet3_start_locked(ifp);
3027 VMXNET3_TXQ_UNLOCK(txq);
3028 }
3029
3030 #else /* !VMXNET3_LEGACY_TX */
3031
3032 static int
vmxnet3_txq_mq_start_locked(struct vmxnet3_txqueue * txq,struct mbuf * m)3033 vmxnet3_txq_mq_start_locked(struct vmxnet3_txqueue *txq, struct mbuf *m)
3034 {
3035 struct vmxnet3_softc *sc;
3036 struct vmxnet3_txring *txr;
3037 struct buf_ring *br;
3038 struct ifnet *ifp;
3039 int tx, avail, error;
3040
3041 sc = txq->vxtxq_sc;
3042 br = txq->vxtxq_br;
3043 ifp = sc->vmx_ifp;
3044 txr = &txq->vxtxq_cmd_ring;
3045 tx = 0;
3046 error = 0;
3047
3048 VMXNET3_TXQ_LOCK_ASSERT(txq);
3049
3050 if ((ifp->if_flags & IFF_RUNNING) == 0 ||
3051 sc->vmx_link_active == 0) {
3052 if (m != NULL)
3053 error = drbr_enqueue(ifp, br, m);
3054 return (error);
3055 }
3056
3057 if (m != NULL) {
3058 error = drbr_enqueue(ifp, br, m);
3059 if (error)
3060 return (error);
3061 }
3062
3063 while ((avail = VMXNET3_TXRING_AVAIL(txr)) >= 2) {
3064 m = drbr_peek(ifp, br);
3065 if (m == NULL)
3066 break;
3067
3068 /* Assume worse case if this mbuf is the head of a chain. */
3069 if (m->m_next != NULL && avail < VMXNET3_TX_MAXSEGS) {
3070 drbr_putback(ifp, br, m);
3071 break;
3072 }
3073
3074 if (vmxnet3_txq_encap(txq, &m) != 0) {
3075 if (m != NULL)
3076 drbr_putback(ifp, br, m);
3077 else
3078 drbr_advance(ifp, br);
3079 break;
3080 }
3081 drbr_advance(ifp, br);
3082
3083 tx++;
3084 ETHER_BPF_MTAP(ifp, m);
3085 }
3086
3087 if (tx > 0)
3088 txq->vxtxq_watchdog = VMXNET3_WATCHDOG_TIMEOUT;
3089
3090 return (0);
3091 }
3092
3093 static int
vmxnet3_txq_mq_start(struct ifnet * ifp,struct mbuf * m)3094 vmxnet3_txq_mq_start(struct ifnet *ifp, struct mbuf *m)
3095 {
3096 struct vmxnet3_softc *sc;
3097 struct vmxnet3_txqueue *txq;
3098 int i, ntxq, error;
3099
3100 sc = ifp->if_softc;
3101 ntxq = sc->vmx_ntxqueues;
3102
3103 /* check if flowid is set */
3104 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
3105 i = m->m_pkthdr.flowid % ntxq;
3106 else
3107 i = curcpu % ntxq;
3108
3109 txq = &sc->vmx_txq[i];
3110
3111 if (VMXNET3_TXQ_TRYLOCK(txq) != 0) {
3112 error = vmxnet3_txq_mq_start_locked(txq, m);
3113 VMXNET3_TXQ_UNLOCK(txq);
3114 } else {
3115 error = drbr_enqueue(ifp, txq->vxtxq_br, m);
3116 taskqueue_enqueue(sc->vmx_tq, &txq->vxtxq_defrtask);
3117 }
3118
3119 return (error);
3120 }
3121
3122 static void
vmxnet3_txq_tq_deferred(void * xtxq,int pending)3123 vmxnet3_txq_tq_deferred(void *xtxq, int pending)
3124 {
3125 struct vmxnet3_softc *sc;
3126 struct vmxnet3_txqueue *txq;
3127
3128 txq = xtxq;
3129 sc = txq->vxtxq_sc;
3130
3131 VMXNET3_TXQ_LOCK(txq);
3132 if (!drbr_empty(sc->vmx_ifp, txq->vxtxq_br))
3133 vmxnet3_txq_mq_start_locked(txq, NULL);
3134 VMXNET3_TXQ_UNLOCK(txq);
3135 }
3136
3137 #endif /* VMXNET3_LEGACY_TX */
3138
3139 static void
vmxnet3_txq_start(struct vmxnet3_txqueue * txq)3140 vmxnet3_txq_start(struct vmxnet3_txqueue *txq)
3141 {
3142 struct vmxnet3_softc *sc;
3143 struct ifnet *ifp;
3144
3145 sc = txq->vxtxq_sc;
3146 ifp = sc->vmx_ifp;
3147
3148 #ifdef VMXNET3_LEGACY_TX
3149 if (!ifq_is_empty(&ifp->if_snd))
3150 vmxnet3_start_locked(ifp);
3151 #else
3152 if (!drbr_empty(ifp, txq->vxtxq_br))
3153 vmxnet3_txq_mq_start_locked(txq, NULL);
3154 #endif
3155 }
3156
3157 static void
vmxnet3_tx_start_all(struct vmxnet3_softc * sc)3158 vmxnet3_tx_start_all(struct vmxnet3_softc *sc)
3159 {
3160 struct vmxnet3_txqueue *txq;
3161 int i;
3162
3163 VMXNET3_CORE_LOCK_ASSERT(sc);
3164
3165 for (i = 0; i < sc->vmx_ntxqueues; i++) {
3166 txq = &sc->vmx_txq[i];
3167
3168 VMXNET3_TXQ_LOCK(txq);
3169 vmxnet3_txq_start(txq);
3170 VMXNET3_TXQ_UNLOCK(txq);
3171 }
3172 }
3173
3174 static void
vmxnet3_update_vlan_filter(struct vmxnet3_softc * sc,int add,uint16_t tag)3175 vmxnet3_update_vlan_filter(struct vmxnet3_softc *sc, int add, uint16_t tag)
3176 {
3177 struct ifnet *ifp;
3178 int idx, bit;
3179
3180 ifp = sc->vmx_ifp;
3181 idx = (tag >> 5) & 0x7F;
3182 bit = tag & 0x1F;
3183
3184 if (tag == 0 || tag > 4095)
3185 return;
3186
3187 VMXNET3_CORE_LOCK(sc);
3188
3189 /* Update our private VLAN bitvector. */
3190 if (add)
3191 sc->vmx_vlan_filter[idx] |= (1 << bit);
3192 else
3193 sc->vmx_vlan_filter[idx] &= ~(1 << bit);
3194
3195 #if 0 /* VLAN_HWFILTER */
3196 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3197 if (add)
3198 sc->vmx_ds->vlan_filter[idx] |= (1 << bit);
3199 else
3200 sc->vmx_ds->vlan_filter[idx] &= ~(1 << bit);
3201 vmxnet3_write_cmd(sc, VMXNET3_CMD_VLAN_FILTER);
3202 }
3203 #endif
3204
3205 VMXNET3_CORE_UNLOCK(sc);
3206 }
3207
3208 static void
vmxnet3_register_vlan(void * arg,struct ifnet * ifp,uint16_t tag)3209 vmxnet3_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
3210 {
3211
3212 if (ifp->if_softc == arg)
3213 vmxnet3_update_vlan_filter(arg, 1, tag);
3214 }
3215
3216 static void
vmxnet3_unregister_vlan(void * arg,struct ifnet * ifp,uint16_t tag)3217 vmxnet3_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
3218 {
3219
3220 if (ifp->if_softc == arg)
3221 vmxnet3_update_vlan_filter(arg, 0, tag);
3222 }
3223
3224 static void
vmxnet3_set_rxfilter(struct vmxnet3_softc * sc)3225 vmxnet3_set_rxfilter(struct vmxnet3_softc *sc)
3226 {
3227 struct ifnet *ifp;
3228 struct vmxnet3_driver_shared *ds;
3229 struct ifmultiaddr *ifma;
3230 u_int mode;
3231
3232 ifp = sc->vmx_ifp;
3233 ds = sc->vmx_ds;
3234
3235 mode = VMXNET3_RXMODE_UCAST | VMXNET3_RXMODE_BCAST;
3236 if (ifp->if_flags & IFF_PROMISC)
3237 mode |= VMXNET3_RXMODE_PROMISC;
3238 if (ifp->if_flags & IFF_ALLMULTI)
3239 mode |= VMXNET3_RXMODE_ALLMULTI;
3240 else {
3241 int cnt = 0, overflow = 0;
3242
3243 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3244 if (ifma->ifma_addr->sa_family != AF_LINK)
3245 continue;
3246 else if (cnt == VMXNET3_MULTICAST_MAX) {
3247 overflow = 1;
3248 break;
3249 }
3250
3251 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
3252 &sc->vmx_mcast[cnt*ETHER_ADDR_LEN], ETHER_ADDR_LEN);
3253 cnt++;
3254 }
3255
3256 if (overflow != 0) {
3257 cnt = 0;
3258 mode |= VMXNET3_RXMODE_ALLMULTI;
3259 } else if (cnt > 0)
3260 mode |= VMXNET3_RXMODE_MCAST;
3261 ds->mcast_tablelen = cnt * ETHER_ADDR_LEN;
3262 }
3263
3264 ds->rxmode = mode;
3265
3266 vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_FILTER);
3267 vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_RXMODE);
3268 }
3269
3270 static int
vmxnet3_change_mtu(struct vmxnet3_softc * sc,int mtu)3271 vmxnet3_change_mtu(struct vmxnet3_softc *sc, int mtu)
3272 {
3273 struct ifnet *ifp;
3274
3275 ifp = sc->vmx_ifp;
3276
3277 if (mtu < VMXNET3_MIN_MTU || mtu > VMXNET3_MAX_MTU)
3278 return (EINVAL);
3279
3280 ifp->if_mtu = mtu;
3281
3282 if (ifp->if_flags & IFF_RUNNING) {
3283 ifp->if_flags &= ~IFF_RUNNING;
3284 vmxnet3_init_locked(sc);
3285 }
3286
3287 return (0);
3288 }
3289
3290 static int
vmxnet3_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data,struct ucred * cred)3291 vmxnet3_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cred)
3292 {
3293 struct vmxnet3_softc *sc;
3294 struct ifreq *ifr;
3295 int reinit, mask, error;
3296
3297 sc = ifp->if_softc;
3298 ifr = (struct ifreq *) data;
3299 error = 0;
3300
3301 switch (cmd) {
3302 case SIOCSIFMTU:
3303 if (ifp->if_mtu != ifr->ifr_mtu) {
3304 VMXNET3_CORE_LOCK(sc);
3305 error = vmxnet3_change_mtu(sc, ifr->ifr_mtu);
3306 VMXNET3_CORE_UNLOCK(sc);
3307 }
3308 break;
3309
3310 case SIOCSIFFLAGS:
3311 VMXNET3_CORE_LOCK(sc);
3312 if (ifp->if_flags & IFF_UP) {
3313 if ((ifp->if_flags & IFF_RUNNING)) {
3314 if ((ifp->if_flags ^ sc->vmx_if_flags) &
3315 (IFF_PROMISC | IFF_ALLMULTI)) {
3316 vmxnet3_set_rxfilter(sc);
3317 }
3318 } else
3319 vmxnet3_init_locked(sc);
3320 } else {
3321 if (ifp->if_flags & IFF_RUNNING)
3322 vmxnet3_stop(sc);
3323 }
3324 sc->vmx_if_flags = ifp->if_flags;
3325 VMXNET3_CORE_UNLOCK(sc);
3326 break;
3327
3328 case SIOCADDMULTI:
3329 case SIOCDELMULTI:
3330 VMXNET3_CORE_LOCK(sc);
3331 if (ifp->if_flags & IFF_RUNNING)
3332 vmxnet3_set_rxfilter(sc);
3333 VMXNET3_CORE_UNLOCK(sc);
3334 break;
3335
3336 case SIOCSIFMEDIA:
3337 case SIOCGIFMEDIA:
3338 error = ifmedia_ioctl(ifp, ifr, &sc->vmx_media, cmd);
3339 break;
3340
3341 case SIOCSIFCAP:
3342 VMXNET3_CORE_LOCK(sc);
3343 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3344
3345 if (mask & IFCAP_TXCSUM)
3346 ifp->if_capenable ^= IFCAP_TXCSUM;
3347 if (mask & IFCAP_TXCSUM_IPV6)
3348 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
3349 #if 0 /* XXX TSO */
3350 if (mask & IFCAP_TSO4)
3351 ifp->if_capenable ^= IFCAP_TSO4;
3352 if (mask & IFCAP_TSO6)
3353 ifp->if_capenable ^= IFCAP_TSO6;
3354 #endif
3355
3356 if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | /* IFCAP_LRO | */
3357 IFCAP_VLAN_HWTAGGING /* | IFCAP_VLAN_HWFILTER */)) {
3358 /* Changing these features requires us to reinit. */
3359 reinit = 1;
3360
3361 if (mask & IFCAP_RXCSUM)
3362 ifp->if_capenable ^= IFCAP_RXCSUM;
3363 if (mask & IFCAP_RXCSUM_IPV6)
3364 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
3365 #if 0 /* XXX LRO */
3366 if (mask & IFCAP_LRO)
3367 ifp->if_capenable ^= IFCAP_LRO;
3368 #endif
3369 if (mask & IFCAP_VLAN_HWTAGGING)
3370 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
3371 #if 0 /* XXX VLAN_HWFILTER */
3372 if (mask & IFCAP_VLAN_HWFILTER)
3373 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
3374 #endif
3375 } else
3376 reinit = 0;
3377
3378 #if 0 /* XXX TSO */
3379 if (mask & IFCAP_VLAN_HWTSO)
3380 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
3381 #endif
3382
3383 if (reinit && (ifp->if_flags & IFF_RUNNING)) {
3384 ifp->if_flags &= ~IFF_RUNNING;
3385 vmxnet3_init_locked(sc);
3386 } else {
3387 vmxnet3_init_hwassist(sc);
3388 }
3389
3390 VMXNET3_CORE_UNLOCK(sc);
3391 #if 0 /* XXX */
3392 VLAN_CAPABILITIES(ifp);
3393 #endif
3394 break;
3395
3396 default:
3397 error = ether_ioctl(ifp, cmd, data);
3398 break;
3399 }
3400
3401 VMXNET3_CORE_LOCK_ASSERT_NOTOWNED(sc);
3402
3403 return (error);
3404 }
3405
3406 #ifndef VMXNET3_LEGACY_TX
3407 static void
vmxnet3_qflush(struct ifnet * ifp)3408 vmxnet3_qflush(struct ifnet *ifp)
3409 {
3410 struct vmxnet3_softc *sc;
3411 struct vmxnet3_txqueue *txq;
3412 struct mbuf *m;
3413 int i;
3414
3415 sc = ifp->if_softc;
3416
3417 for (i = 0; i < sc->vmx_ntxqueues; i++) {
3418 txq = &sc->vmx_txq[i];
3419
3420 VMXNET3_TXQ_LOCK(txq);
3421 while ((m = buf_ring_dequeue_sc(txq->vxtxq_br)) != NULL)
3422 m_freem(m);
3423 VMXNET3_TXQ_UNLOCK(txq);
3424 }
3425
3426 if_qflush(ifp);
3427 }
3428 #endif
3429
3430 static int
vmxnet3_watchdog(struct vmxnet3_txqueue * txq)3431 vmxnet3_watchdog(struct vmxnet3_txqueue *txq)
3432 {
3433 struct vmxnet3_softc *sc;
3434
3435 sc = txq->vxtxq_sc;
3436
3437 VMXNET3_TXQ_LOCK(txq);
3438 if (txq->vxtxq_watchdog == 0 || --txq->vxtxq_watchdog) {
3439 VMXNET3_TXQ_UNLOCK(txq);
3440 return (0);
3441 }
3442 VMXNET3_TXQ_UNLOCK(txq);
3443
3444 if_printf(sc->vmx_ifp, "watchdog timeout on queue %d\n",
3445 txq->vxtxq_id);
3446 return (1);
3447 }
3448
3449 static void
vmxnet3_refresh_host_stats(struct vmxnet3_softc * sc)3450 vmxnet3_refresh_host_stats(struct vmxnet3_softc *sc)
3451 {
3452
3453 vmxnet3_write_cmd(sc, VMXNET3_CMD_GET_STATS);
3454 }
3455
3456 static void
vmxnet3_txq_accum_stats(struct vmxnet3_txqueue * txq,struct vmxnet3_txq_stats * accum)3457 vmxnet3_txq_accum_stats(struct vmxnet3_txqueue *txq,
3458 struct vmxnet3_txq_stats *accum)
3459 {
3460 struct vmxnet3_txq_stats *st;
3461
3462 st = &txq->vxtxq_stats;
3463
3464 accum->vmtxs_opackets += st->vmtxs_opackets;
3465 accum->vmtxs_obytes += st->vmtxs_obytes;
3466 accum->vmtxs_omcasts += st->vmtxs_omcasts;
3467 accum->vmtxs_csum += st->vmtxs_csum;
3468 accum->vmtxs_tso += st->vmtxs_tso;
3469 accum->vmtxs_full += st->vmtxs_full;
3470 accum->vmtxs_offload_failed += st->vmtxs_offload_failed;
3471 }
3472
3473 static void
vmxnet3_rxq_accum_stats(struct vmxnet3_rxqueue * rxq,struct vmxnet3_rxq_stats * accum)3474 vmxnet3_rxq_accum_stats(struct vmxnet3_rxqueue *rxq,
3475 struct vmxnet3_rxq_stats *accum)
3476 {
3477 struct vmxnet3_rxq_stats *st;
3478
3479 st = &rxq->vxrxq_stats;
3480
3481 accum->vmrxs_ipackets += st->vmrxs_ipackets;
3482 accum->vmrxs_ibytes += st->vmrxs_ibytes;
3483 accum->vmrxs_iqdrops += st->vmrxs_iqdrops;
3484 accum->vmrxs_ierrors += st->vmrxs_ierrors;
3485 }
3486
3487 static void
vmxnet3_accumulate_stats(struct vmxnet3_softc * sc)3488 vmxnet3_accumulate_stats(struct vmxnet3_softc *sc)
3489 {
3490 struct ifnet *ifp;
3491 struct vmxnet3_statistics *st;
3492 struct vmxnet3_txq_stats txaccum;
3493 struct vmxnet3_rxq_stats rxaccum;
3494 int i;
3495
3496 ifp = sc->vmx_ifp;
3497 st = &sc->vmx_stats;
3498
3499 bzero(&txaccum, sizeof(struct vmxnet3_txq_stats));
3500 bzero(&rxaccum, sizeof(struct vmxnet3_rxq_stats));
3501
3502 for (i = 0; i < sc->vmx_ntxqueues; i++)
3503 vmxnet3_txq_accum_stats(&sc->vmx_txq[i], &txaccum);
3504 for (i = 0; i < sc->vmx_nrxqueues; i++)
3505 vmxnet3_rxq_accum_stats(&sc->vmx_rxq[i], &rxaccum);
3506
3507 /*
3508 * With the exception of if_ierrors, these ifnet statistics are
3509 * only updated in the driver, so just set them to our accumulated
3510 * values. if_ierrors is updated in ether_input() for malformed
3511 * frames that we should have already discarded.
3512 */
3513 ifp->if_ipackets = rxaccum.vmrxs_ipackets;
3514 ifp->if_iqdrops = rxaccum.vmrxs_iqdrops;
3515 ifp->if_ierrors = rxaccum.vmrxs_ierrors;
3516 ifp->if_opackets = txaccum.vmtxs_opackets;
3517 #ifndef VMXNET3_LEGACY_TX
3518 ifp->if_obytes = txaccum.vmtxs_obytes;
3519 ifp->if_omcasts = txaccum.vmtxs_omcasts;
3520 #endif
3521 }
3522
3523 static void
vmxnet3_tick(void * xsc)3524 vmxnet3_tick(void *xsc)
3525 {
3526 struct vmxnet3_softc *sc;
3527 struct ifnet *ifp;
3528 int i, timedout;
3529
3530 sc = xsc;
3531 ifp = sc->vmx_ifp;
3532 timedout = 0;
3533
3534 VMXNET3_CORE_LOCK_ASSERT(sc);
3535
3536 vmxnet3_accumulate_stats(sc);
3537 vmxnet3_refresh_host_stats(sc);
3538
3539 for (i = 0; i < sc->vmx_ntxqueues; i++)
3540 timedout |= vmxnet3_watchdog(&sc->vmx_txq[i]);
3541
3542 if (timedout != 0) {
3543 ifp->if_flags &= ~IFF_RUNNING;
3544 vmxnet3_init_locked(sc);
3545 } else
3546 callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc);
3547 }
3548
3549 static int
vmxnet3_link_is_up(struct vmxnet3_softc * sc)3550 vmxnet3_link_is_up(struct vmxnet3_softc *sc)
3551 {
3552 uint32_t status;
3553
3554 /* Also update the link speed while here. */
3555 status = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_LINK);
3556 sc->vmx_link_speed = status >> 16;
3557 return !!(status & 0x1);
3558 }
3559
3560 static void
vmxnet3_link_status(struct vmxnet3_softc * sc)3561 vmxnet3_link_status(struct vmxnet3_softc *sc)
3562 {
3563 struct ifnet *ifp;
3564 int link;
3565
3566 ifp = sc->vmx_ifp;
3567 link = vmxnet3_link_is_up(sc);
3568
3569 if (link != 0 && sc->vmx_link_active == 0) {
3570 sc->vmx_link_active = 1;
3571 ifp->if_link_state = LINK_STATE_UP;
3572 if_link_state_change(ifp);
3573 } else if (link == 0 && sc->vmx_link_active != 0) {
3574 sc->vmx_link_active = 0;
3575 ifp->if_link_state = LINK_STATE_DOWN;
3576 if_link_state_change(ifp);
3577 }
3578 }
3579
3580 static void
vmxnet3_media_status(struct ifnet * ifp,struct ifmediareq * ifmr)3581 vmxnet3_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
3582 {
3583 struct vmxnet3_softc *sc;
3584
3585 sc = ifp->if_softc;
3586
3587 ifmr->ifm_active = IFM_ETHER | IFM_AUTO;
3588 ifmr->ifm_status = IFM_AVALID;
3589
3590 VMXNET3_CORE_LOCK(sc);
3591 if (vmxnet3_link_is_up(sc) != 0)
3592 ifmr->ifm_status |= IFM_ACTIVE;
3593 else
3594 ifmr->ifm_status |= IFM_NONE;
3595 VMXNET3_CORE_UNLOCK(sc);
3596 }
3597
3598 static int
vmxnet3_media_change(struct ifnet * ifp)3599 vmxnet3_media_change(struct ifnet *ifp)
3600 {
3601
3602 /* Ignore. */
3603 return (0);
3604 }
3605
3606 static void
vmxnet3_set_lladdr(struct vmxnet3_softc * sc)3607 vmxnet3_set_lladdr(struct vmxnet3_softc *sc)
3608 {
3609 uint32_t ml, mh;
3610
3611 ml = sc->vmx_lladdr[0];
3612 ml |= sc->vmx_lladdr[1] << 8;
3613 ml |= sc->vmx_lladdr[2] << 16;
3614 ml |= sc->vmx_lladdr[3] << 24;
3615 vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACL, ml);
3616
3617 mh = sc->vmx_lladdr[4];
3618 mh |= sc->vmx_lladdr[5] << 8;
3619 vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACH, mh);
3620 }
3621
3622 static void
vmxnet3_get_lladdr(struct vmxnet3_softc * sc)3623 vmxnet3_get_lladdr(struct vmxnet3_softc *sc)
3624 {
3625 uint32_t ml, mh;
3626
3627 ml = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACL);
3628 mh = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACH);
3629
3630 sc->vmx_lladdr[0] = ml;
3631 sc->vmx_lladdr[1] = ml >> 8;
3632 sc->vmx_lladdr[2] = ml >> 16;
3633 sc->vmx_lladdr[3] = ml >> 24;
3634 sc->vmx_lladdr[4] = mh;
3635 sc->vmx_lladdr[5] = mh >> 8;
3636 }
3637
3638 static void
vmxnet3_setup_txq_sysctl(struct vmxnet3_txqueue * txq,struct sysctl_ctx_list * ctx,struct sysctl_oid_list * child)3639 vmxnet3_setup_txq_sysctl(struct vmxnet3_txqueue *txq,
3640 struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
3641 {
3642 struct sysctl_oid *node, *txsnode;
3643 struct sysctl_oid_list *list, *txslist;
3644 struct vmxnet3_txq_stats *stats;
3645 struct UPT1_TxStats *txstats;
3646 char namebuf[16];
3647
3648 stats = &txq->vxtxq_stats;
3649 txstats = &txq->vxtxq_ts->stats;
3650
3651 ksnprintf(namebuf, sizeof(namebuf), "txq%d", txq->vxtxq_id);
3652 node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD,
3653 NULL, "Transmit Queue");
3654 txq->vxtxq_sysctl = list = SYSCTL_CHILDREN(node);
3655
3656 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets", CTLFLAG_RD,
3657 &stats->vmtxs_opackets, 0, "Transmit packets");
3658 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes", CTLFLAG_RD,
3659 &stats->vmtxs_obytes, 0, "Transmit bytes");
3660 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts", CTLFLAG_RD,
3661 &stats->vmtxs_omcasts, 0, "Transmit multicasts");
3662 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
3663 &stats->vmtxs_csum, 0, "Transmit checksum offloaded");
3664 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD,
3665 &stats->vmtxs_tso, 0, "Transmit TCP segmentation offloaded");
3666 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ringfull", CTLFLAG_RD,
3667 &stats->vmtxs_full, 0, "Transmit ring full");
3668 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "offload_failed", CTLFLAG_RD,
3669 &stats->vmtxs_offload_failed, 0, "Transmit checksum offload failed");
3670
3671 /*
3672 * Add statistics reported by the host. These are updated once
3673 * per second.
3674 */
3675 txsnode = SYSCTL_ADD_NODE(ctx, list, OID_AUTO, "hstats", CTLFLAG_RD,
3676 NULL, "Host Statistics");
3677 txslist = SYSCTL_CHILDREN(txsnode);
3678 #if 0 /* XXX TSO */
3679 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tso_packets", CTLFLAG_RD,
3680 &txstats->TSO_packets, 0, "TSO packets");
3681 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tso_bytes", CTLFLAG_RD,
3682 &txstats->TSO_bytes, 0, "TSO bytes");
3683 #endif
3684 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "ucast_packets", CTLFLAG_RD,
3685 &txstats->ucast_packets, 0, "Unicast packets");
3686 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "unicast_bytes", CTLFLAG_RD,
3687 &txstats->ucast_bytes, 0, "Unicast bytes");
3688 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "mcast_packets", CTLFLAG_RD,
3689 &txstats->mcast_packets, 0, "Multicast packets");
3690 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "mcast_bytes", CTLFLAG_RD,
3691 &txstats->mcast_bytes, 0, "Multicast bytes");
3692 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "error", CTLFLAG_RD,
3693 &txstats->error, 0, "Errors");
3694 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "discard", CTLFLAG_RD,
3695 &txstats->discard, 0, "Discards");
3696 }
3697
3698 static void
vmxnet3_setup_rxq_sysctl(struct vmxnet3_rxqueue * rxq,struct sysctl_ctx_list * ctx,struct sysctl_oid_list * child)3699 vmxnet3_setup_rxq_sysctl(struct vmxnet3_rxqueue *rxq,
3700 struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
3701 {
3702 struct sysctl_oid *node, *rxsnode;
3703 struct sysctl_oid_list *list, *rxslist;
3704 struct vmxnet3_rxq_stats *stats;
3705 struct UPT1_RxStats *rxstats;
3706 char namebuf[16];
3707
3708 stats = &rxq->vxrxq_stats;
3709 rxstats = &rxq->vxrxq_rs->stats;
3710
3711 ksnprintf(namebuf, sizeof(namebuf), "rxq%d", rxq->vxrxq_id);
3712 node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD,
3713 NULL, "Receive Queue");
3714 rxq->vxrxq_sysctl = list = SYSCTL_CHILDREN(node);
3715
3716 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets", CTLFLAG_RD,
3717 &stats->vmrxs_ipackets, 0, "Receive packets");
3718 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes", CTLFLAG_RD,
3719 &stats->vmrxs_ibytes, 0, "Receive bytes");
3720 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops", CTLFLAG_RD,
3721 &stats->vmrxs_iqdrops, 0, "Receive drops");
3722 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors", CTLFLAG_RD,
3723 &stats->vmrxs_ierrors, 0, "Receive errors");
3724
3725 /*
3726 * Add statistics reported by the host. These are updated once
3727 * per second.
3728 */
3729 rxsnode = SYSCTL_ADD_NODE(ctx, list, OID_AUTO, "hstats", CTLFLAG_RD,
3730 NULL, "Host Statistics");
3731 rxslist = SYSCTL_CHILDREN(rxsnode);
3732 #if 0 /* XXX LRO */
3733 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "lro_packets", CTLFLAG_RD,
3734 &rxstats->LRO_packets, 0, "LRO packets");
3735 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "lro_bytes", CTLFLAG_RD,
3736 &rxstats->LRO_bytes, 0, "LRO bytes");
3737 #endif
3738 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "ucast_packets", CTLFLAG_RD,
3739 &rxstats->ucast_packets, 0, "Unicast packets");
3740 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "unicast_bytes", CTLFLAG_RD,
3741 &rxstats->ucast_bytes, 0, "Unicast bytes");
3742 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "mcast_packets", CTLFLAG_RD,
3743 &rxstats->mcast_packets, 0, "Multicast packets");
3744 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "mcast_bytes", CTLFLAG_RD,
3745 &rxstats->mcast_bytes, 0, "Multicast bytes");
3746 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "bcast_packets", CTLFLAG_RD,
3747 &rxstats->bcast_packets, 0, "Broadcast packets");
3748 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "bcast_bytes", CTLFLAG_RD,
3749 &rxstats->bcast_bytes, 0, "Broadcast bytes");
3750 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "nobuffer", CTLFLAG_RD,
3751 &rxstats->nobuffer, 0, "No buffer");
3752 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "error", CTLFLAG_RD,
3753 &rxstats->error, 0, "Errors");
3754 }
3755
3756 static void
vmxnet3_setup_debug_sysctl(struct vmxnet3_softc * sc,struct sysctl_ctx_list * ctx,struct sysctl_oid_list * child)3757 vmxnet3_setup_debug_sysctl(struct vmxnet3_softc *sc,
3758 struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
3759 {
3760 struct sysctl_oid *node;
3761 struct sysctl_oid_list *list;
3762 int i;
3763
3764 for (i = 0; i < sc->vmx_ntxqueues; i++) {
3765 struct vmxnet3_txqueue *txq = &sc->vmx_txq[i];
3766
3767 node = SYSCTL_ADD_NODE(ctx, txq->vxtxq_sysctl, OID_AUTO,
3768 "debug", CTLFLAG_RD, NULL, "");
3769 list = SYSCTL_CHILDREN(node);
3770
3771 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_head", CTLFLAG_RD,
3772 &txq->vxtxq_cmd_ring.vxtxr_head, 0, "");
3773 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_next", CTLFLAG_RD,
3774 &txq->vxtxq_cmd_ring.vxtxr_next, 0, "");
3775 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_ndesc", CTLFLAG_RD,
3776 &txq->vxtxq_cmd_ring.vxtxr_ndesc, 0, "");
3777 SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd_gen", CTLFLAG_RD,
3778 &txq->vxtxq_cmd_ring.vxtxr_gen, 0, "");
3779 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_next", CTLFLAG_RD,
3780 &txq->vxtxq_comp_ring.vxcr_next, 0, "");
3781 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_ndesc", CTLFLAG_RD,
3782 &txq->vxtxq_comp_ring.vxcr_ndesc, 0,"");
3783 SYSCTL_ADD_INT(ctx, list, OID_AUTO, "comp_gen", CTLFLAG_RD,
3784 &txq->vxtxq_comp_ring.vxcr_gen, 0, "");
3785 }
3786
3787 for (i = 0; i < sc->vmx_nrxqueues; i++) {
3788 struct vmxnet3_rxqueue *rxq = &sc->vmx_rxq[i];
3789
3790 node = SYSCTL_ADD_NODE(ctx, rxq->vxrxq_sysctl, OID_AUTO,
3791 "debug", CTLFLAG_RD, NULL, "");
3792 list = SYSCTL_CHILDREN(node);
3793
3794 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd0_fill", CTLFLAG_RD,
3795 &rxq->vxrxq_cmd_ring[0].vxrxr_fill, 0, "");
3796 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd0_ndesc", CTLFLAG_RD,
3797 &rxq->vxrxq_cmd_ring[0].vxrxr_ndesc, 0, "");
3798 SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd0_gen", CTLFLAG_RD,
3799 &rxq->vxrxq_cmd_ring[0].vxrxr_gen, 0, "");
3800 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd1_fill", CTLFLAG_RD,
3801 &rxq->vxrxq_cmd_ring[1].vxrxr_fill, 0, "");
3802 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd1_ndesc", CTLFLAG_RD,
3803 &rxq->vxrxq_cmd_ring[1].vxrxr_ndesc, 0, "");
3804 SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd1_gen", CTLFLAG_RD,
3805 &rxq->vxrxq_cmd_ring[1].vxrxr_gen, 0, "");
3806 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_next", CTLFLAG_RD,
3807 &rxq->vxrxq_comp_ring.vxcr_next, 0, "");
3808 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_ndesc", CTLFLAG_RD,
3809 &rxq->vxrxq_comp_ring.vxcr_ndesc, 0,"");
3810 SYSCTL_ADD_INT(ctx, list, OID_AUTO, "comp_gen", CTLFLAG_RD,
3811 &rxq->vxrxq_comp_ring.vxcr_gen, 0, "");
3812 }
3813 }
3814
3815 static void
vmxnet3_setup_queue_sysctl(struct vmxnet3_softc * sc,struct sysctl_ctx_list * ctx,struct sysctl_oid_list * child)3816 vmxnet3_setup_queue_sysctl(struct vmxnet3_softc *sc,
3817 struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
3818 {
3819 int i;
3820
3821 for (i = 0; i < sc->vmx_ntxqueues; i++)
3822 vmxnet3_setup_txq_sysctl(&sc->vmx_txq[i], ctx, child);
3823 for (i = 0; i < sc->vmx_nrxqueues; i++)
3824 vmxnet3_setup_rxq_sysctl(&sc->vmx_rxq[i], ctx, child);
3825
3826 vmxnet3_setup_debug_sysctl(sc, ctx, child);
3827 }
3828
3829 static void
vmxnet3_setup_sysctl(struct vmxnet3_softc * sc)3830 vmxnet3_setup_sysctl(struct vmxnet3_softc *sc)
3831 {
3832 device_t dev;
3833 struct vmxnet3_statistics *stats;
3834 struct sysctl_ctx_list *ctx;
3835 struct sysctl_oid *tree;
3836 struct sysctl_oid_list *child;
3837
3838 dev = sc->vmx_dev;
3839 ctx = device_get_sysctl_ctx(dev);
3840 tree = device_get_sysctl_tree(dev);
3841 child = SYSCTL_CHILDREN(tree);
3842
3843 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_ntxqueues", CTLFLAG_RD,
3844 &sc->vmx_max_ntxqueues, 0, "Maximum number of Tx queues");
3845 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_nrxqueues", CTLFLAG_RD,
3846 &sc->vmx_max_nrxqueues, 0, "Maximum number of Rx queues");
3847 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "ntxqueues", CTLFLAG_RD,
3848 &sc->vmx_ntxqueues, 0, "Number of Tx queues");
3849 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "nrxqueues", CTLFLAG_RD,
3850 &sc->vmx_nrxqueues, 0, "Number of Rx queues");
3851
3852 stats = &sc->vmx_stats;
3853 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "defragged", CTLFLAG_RD,
3854 &stats->vmst_defragged, 0, "Tx mbuf chains defragged");
3855 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "defrag_failed", CTLFLAG_RD,
3856 &stats->vmst_defrag_failed, 0,
3857 "Tx mbuf dropped because defrag failed");
3858 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "mgetcl_failed", CTLFLAG_RD,
3859 &stats->vmst_mgetcl_failed, 0, "mbuf cluster allocation failed");
3860 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "mbuf_load_failed", CTLFLAG_RD,
3861 &stats->vmst_mbuf_load_failed, 0, "mbuf load segments failed");
3862
3863 vmxnet3_setup_queue_sysctl(sc, ctx, child);
3864 }
3865
3866 static void
vmxnet3_write_bar0(struct vmxnet3_softc * sc,bus_size_t r,uint32_t v)3867 vmxnet3_write_bar0(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v)
3868 {
3869
3870 bus_space_write_4(sc->vmx_iot0, sc->vmx_ioh0, r, v);
3871 }
3872
3873 static uint32_t
vmxnet3_read_bar1(struct vmxnet3_softc * sc,bus_size_t r)3874 vmxnet3_read_bar1(struct vmxnet3_softc *sc, bus_size_t r)
3875 {
3876
3877 return (bus_space_read_4(sc->vmx_iot1, sc->vmx_ioh1, r));
3878 }
3879
3880 static void
vmxnet3_write_bar1(struct vmxnet3_softc * sc,bus_size_t r,uint32_t v)3881 vmxnet3_write_bar1(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v)
3882 {
3883
3884 bus_space_write_4(sc->vmx_iot1, sc->vmx_ioh1, r, v);
3885 }
3886
3887 static void
vmxnet3_write_cmd(struct vmxnet3_softc * sc,uint32_t cmd)3888 vmxnet3_write_cmd(struct vmxnet3_softc *sc, uint32_t cmd)
3889 {
3890
3891 vmxnet3_write_bar1(sc, VMXNET3_BAR1_CMD, cmd);
3892 }
3893
3894 static uint32_t
vmxnet3_read_cmd(struct vmxnet3_softc * sc,uint32_t cmd)3895 vmxnet3_read_cmd(struct vmxnet3_softc *sc, uint32_t cmd)
3896 {
3897
3898 vmxnet3_write_cmd(sc, cmd);
3899 bus_space_barrier(sc->vmx_iot1, sc->vmx_ioh1, 0, 0,
3900 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
3901 return (vmxnet3_read_bar1(sc, VMXNET3_BAR1_CMD));
3902 }
3903
3904 static void
vmxnet3_enable_intr(struct vmxnet3_softc * sc,int irq)3905 vmxnet3_enable_intr(struct vmxnet3_softc *sc, int irq)
3906 {
3907
3908 vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 0);
3909 }
3910
3911 static void
vmxnet3_disable_intr(struct vmxnet3_softc * sc,int irq)3912 vmxnet3_disable_intr(struct vmxnet3_softc *sc, int irq)
3913 {
3914
3915 vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 1);
3916 }
3917
3918 static void
vmxnet3_enable_all_intrs(struct vmxnet3_softc * sc)3919 vmxnet3_enable_all_intrs(struct vmxnet3_softc *sc)
3920 {
3921 int i;
3922
3923 sc->vmx_ds->ictrl &= ~VMXNET3_ICTRL_DISABLE_ALL;
3924 for (i = 0; i < sc->vmx_nintrs; i++)
3925 vmxnet3_enable_intr(sc, i);
3926 }
3927
3928 static void
vmxnet3_disable_all_intrs(struct vmxnet3_softc * sc)3929 vmxnet3_disable_all_intrs(struct vmxnet3_softc *sc)
3930 {
3931 int i;
3932
3933 sc->vmx_ds->ictrl |= VMXNET3_ICTRL_DISABLE_ALL;
3934 for (i = 0; i < sc->vmx_nintrs; i++)
3935 vmxnet3_disable_intr(sc, i);
3936 }
3937
3938 static void
vmxnet3_dmamap_cb(void * arg,bus_dma_segment_t * segs,int nseg,int error)3939 vmxnet3_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3940 {
3941 bus_addr_t *baddr = arg;
3942
3943 if (error == 0)
3944 *baddr = segs->ds_addr;
3945 }
3946
3947 static int
vmxnet3_dma_malloc(struct vmxnet3_softc * sc,bus_size_t size,bus_size_t align,struct vmxnet3_dma_alloc * dma)3948 vmxnet3_dma_malloc(struct vmxnet3_softc *sc, bus_size_t size, bus_size_t align,
3949 struct vmxnet3_dma_alloc *dma)
3950 {
3951 device_t dev;
3952 int error;
3953
3954 dev = sc->vmx_dev;
3955 bzero(dma, sizeof(struct vmxnet3_dma_alloc));
3956
3957 error = bus_dma_tag_create(bus_get_dma_tag(dev),
3958 align, 0, /* alignment, bounds */
3959 BUS_SPACE_MAXADDR, /* lowaddr */
3960 BUS_SPACE_MAXADDR, /* highaddr */
3961 size, /* maxsize */
3962 1, /* nsegments */
3963 size, /* maxsegsize */
3964 BUS_DMA_ALLOCNOW, /* flags */
3965 &dma->dma_tag);
3966 if (error) {
3967 device_printf(dev, "bus_dma_tag_create failed: %d\n", error);
3968 goto fail;
3969 }
3970
3971 error = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
3972 BUS_DMA_ZERO | BUS_DMA_NOWAIT, &dma->dma_map);
3973 if (error) {
3974 device_printf(dev, "bus_dmamem_alloc failed: %d\n", error);
3975 goto fail;
3976 }
3977
3978 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
3979 size, vmxnet3_dmamap_cb, &dma->dma_paddr, BUS_DMA_NOWAIT);
3980 if (error) {
3981 device_printf(dev, "bus_dmamap_load failed: %d\n", error);
3982 goto fail;
3983 }
3984
3985 dma->dma_size = size;
3986
3987 fail:
3988 if (error)
3989 vmxnet3_dma_free(sc, dma);
3990
3991 return (error);
3992 }
3993
3994 static void
vmxnet3_dma_free(struct vmxnet3_softc * sc,struct vmxnet3_dma_alloc * dma)3995 vmxnet3_dma_free(struct vmxnet3_softc *sc, struct vmxnet3_dma_alloc *dma)
3996 {
3997
3998 if (dma->dma_tag != NULL) {
3999 if (dma->dma_paddr != 0) {
4000 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
4001 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4002 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
4003 }
4004
4005 if (dma->dma_vaddr != NULL) {
4006 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr,
4007 dma->dma_map);
4008 }
4009
4010 bus_dma_tag_destroy(dma->dma_tag);
4011 }
4012 bzero(dma, sizeof(struct vmxnet3_dma_alloc));
4013 }
4014
4015 static int
vmxnet3_tunable_int(struct vmxnet3_softc * sc,const char * knob,int def)4016 vmxnet3_tunable_int(struct vmxnet3_softc *sc, const char *knob, int def)
4017 {
4018 char path[64];
4019
4020 ksnprintf(path, sizeof(path),
4021 "hw.vmx.%d.%s", device_get_unit(sc->vmx_dev), knob);
4022 TUNABLE_INT_FETCH(path, &def);
4023
4024 return (def);
4025 }
4026
4027 #define mb() __asm volatile("mfence" ::: "memory")
4028 #define wmb() __asm volatile("sfence" ::: "memory")
4029 #define rmb() __asm volatile("lfence" ::: "memory")
4030
4031 /*
4032 * Since this is a purely paravirtualized device, we do not have
4033 * to worry about DMA coherency. But at times, we must make sure
4034 * both the compiler and CPU do not reorder memory operations.
4035 */
4036 static inline void
vmxnet3_barrier(struct vmxnet3_softc * sc,vmxnet3_barrier_t type)4037 vmxnet3_barrier(struct vmxnet3_softc *sc, vmxnet3_barrier_t type)
4038 {
4039
4040 switch (type) {
4041 case VMXNET3_BARRIER_RD:
4042 rmb();
4043 break;
4044 case VMXNET3_BARRIER_WR:
4045 wmb();
4046 break;
4047 case VMXNET3_BARRIER_RDWR:
4048 mb();
4049 break;
4050 default:
4051 panic("%s: bad barrier type %d", __func__, type);
4052 }
4053 }
4054