xref: /openbsd/sys/dev/pci/if_vmx.c (revision a6445c1d)
1 /*	$OpenBSD: if_vmx.c,v 1.20 2014/08/26 23:55:28 dlg Exp $	*/
2 
3 /*
4  * Copyright (c) 2013 Tsubai Masanari
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bpfilter.h"
20 #include "vlan.h"
21 
22 #include <sys/param.h>
23 #include <sys/device.h>
24 #include <sys/mbuf.h>
25 #include <sys/socket.h>
26 #include <sys/sockio.h>
27 #include <sys/systm.h>
28 
29 #include <net/bpf.h>
30 #include <net/if.h>
31 #include <net/if_arp.h>
32 #include <net/if_media.h>
33 #include <net/if_types.h>
34 
35 #include <netinet/in.h>
36 #include <netinet/if_ether.h>
37 #include <netinet/ip.h>
38 #include <netinet/tcp.h>
39 #include <netinet/udp.h>
40 
41 #include <net/if_vlan_var.h>
42 
43 #include <machine/bus.h>
44 
45 #include <dev/pci/if_vmxreg.h>
46 #include <dev/pci/pcivar.h>
47 #include <dev/pci/pcireg.h>
48 #include <dev/pci/pcidevs.h>
49 
50 #define NRXQUEUE 1
51 #define NTXQUEUE 1
52 
53 #define NTXDESC 128 /* tx ring size */
54 #define NTXSEGS 8 /* tx descriptors per packet */
55 #define NRXDESC 128
56 #define NTXCOMPDESC NTXDESC
57 #define NRXCOMPDESC (NRXDESC * 2)	/* ring1 + ring2 */
58 
59 #define VMXNET3_DRIVER_VERSION 0x00010000
60 
61 struct vmxnet3_txring {
62 	struct mbuf *m[NTXDESC];
63 	bus_dmamap_t dmap[NTXDESC];
64 	struct vmxnet3_txdesc *txd;
65 	u_int head;
66 	u_int next;
67 	u_int8_t gen;
68 };
69 
70 struct vmxnet3_rxring {
71 	struct mbuf *m[NRXDESC];
72 	bus_dmamap_t dmap[NRXDESC];
73 	struct if_rxring rxr;
74 	struct vmxnet3_rxdesc *rxd;
75 	u_int fill;
76 	u_int8_t gen;
77 	u_int8_t rid;
78 };
79 
80 struct vmxnet3_comp_ring {
81 	union {
82 		struct vmxnet3_txcompdesc *txcd;
83 		struct vmxnet3_rxcompdesc *rxcd;
84 	};
85 	u_int next;
86 	u_int8_t gen;
87 };
88 
89 struct vmxnet3_txqueue {
90 	struct vmxnet3_txring cmd_ring;
91 	struct vmxnet3_comp_ring comp_ring;
92 	struct vmxnet3_txq_shared *ts;
93 };
94 
95 struct vmxnet3_rxqueue {
96 	struct vmxnet3_rxring cmd_ring[2];
97 	struct vmxnet3_comp_ring comp_ring;
98 	struct vmxnet3_rxq_shared *rs;
99 };
100 
101 struct vmxnet3_softc {
102 	struct device sc_dev;
103 	struct arpcom sc_arpcom;
104 	struct ifmedia sc_media;
105 
106 	bus_space_tag_t	sc_iot0;
107 	bus_space_tag_t	sc_iot1;
108 	bus_space_handle_t sc_ioh0;
109 	bus_space_handle_t sc_ioh1;
110 	bus_dma_tag_t sc_dmat;
111 
112 	struct vmxnet3_txqueue sc_txq[NTXQUEUE];
113 	struct vmxnet3_rxqueue sc_rxq[NRXQUEUE];
114 	struct vmxnet3_driver_shared *sc_ds;
115 	u_int8_t *sc_mcast;
116 };
117 
118 #define VMXNET3_STAT
119 
120 #ifdef VMXNET3_STAT
121 struct {
122 	u_int ntxdesc;
123 	u_int nrxdesc;
124 	u_int txhead;
125 	u_int txdone;
126 	u_int maxtxlen;
127 	u_int rxdone;
128 	u_int rxfill;
129 	u_int intr;
130 } vmxstat = {
131 	NTXDESC, NRXDESC
132 };
133 #endif
134 
135 #define JUMBO_LEN (1024 * 9)
136 #define DMAADDR(map) ((map)->dm_segs[0].ds_addr)
137 
138 #define READ_BAR0(sc, reg) bus_space_read_4((sc)->sc_iot0, (sc)->sc_ioh0, reg)
139 #define READ_BAR1(sc, reg) bus_space_read_4((sc)->sc_iot1, (sc)->sc_ioh1, reg)
140 #define WRITE_BAR0(sc, reg, val) \
141 	bus_space_write_4((sc)->sc_iot0, (sc)->sc_ioh0, reg, val)
142 #define WRITE_BAR1(sc, reg, val) \
143 	bus_space_write_4((sc)->sc_iot1, (sc)->sc_ioh1, reg, val)
144 #define WRITE_CMD(sc, cmd) WRITE_BAR1(sc, VMXNET3_BAR1_CMD, cmd)
145 #define vtophys(va) 0		/* XXX ok? */
146 
147 int vmxnet3_match(struct device *, void *, void *);
148 void vmxnet3_attach(struct device *, struct device *, void *);
149 int vmxnet3_dma_init(struct vmxnet3_softc *);
150 int vmxnet3_alloc_txring(struct vmxnet3_softc *, int);
151 int vmxnet3_alloc_rxring(struct vmxnet3_softc *, int);
152 void vmxnet3_txinit(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
153 void vmxnet3_rxinit(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
154 void vmxnet3_txstop(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
155 void vmxnet3_rxstop(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
156 void vmxnet3_link_state(struct vmxnet3_softc *);
157 void vmxnet3_enable_all_intrs(struct vmxnet3_softc *);
158 void vmxnet3_disable_all_intrs(struct vmxnet3_softc *);
159 int vmxnet3_intr(void *);
160 void vmxnet3_evintr(struct vmxnet3_softc *);
161 void vmxnet3_txintr(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
162 void vmxnet3_rxintr(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
163 void vmxnet3_iff(struct vmxnet3_softc *);
164 void vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *, struct mbuf *);
165 int vmxnet3_getbuf(struct vmxnet3_softc *, struct vmxnet3_rxring *);
166 void vmxnet3_stop(struct ifnet *);
167 void vmxnet3_reset(struct ifnet *);
168 int vmxnet3_init(struct vmxnet3_softc *);
169 int vmxnet3_ioctl(struct ifnet *, u_long, caddr_t);
170 void vmxnet3_start(struct ifnet *);
171 int vmxnet3_load_mbuf(struct vmxnet3_softc *, struct mbuf *);
172 void vmxnet3_watchdog(struct ifnet *);
173 void vmxnet3_media_status(struct ifnet *, struct ifmediareq *);
174 int vmxnet3_media_change(struct ifnet *);
175 void *vmxnet3_dma_allocmem(struct vmxnet3_softc *, u_int, u_int, bus_addr_t *);
176 
177 const struct pci_matchid vmx_devices[] = {
178 	{ PCI_VENDOR_VMWARE, PCI_PRODUCT_VMWARE_NET_3 }
179 };
180 
181 struct cfattach vmx_ca = {
182 	sizeof(struct vmxnet3_softc), vmxnet3_match, vmxnet3_attach
183 };
184 
185 struct cfdriver vmx_cd = {
186 	NULL, "vmx", DV_IFNET
187 };
188 
189 int
190 vmxnet3_match(struct device *parent, void *match, void *aux)
191 {
192 	return (pci_matchbyid(aux, vmx_devices, nitems(vmx_devices)));
193 }
194 
195 void
196 vmxnet3_attach(struct device *parent, struct device *self, void *aux)
197 {
198 	struct vmxnet3_softc *sc = (void *)self;
199 	struct pci_attach_args *pa = aux;
200 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
201 	pci_intr_handle_t ih;
202 	const char *intrstr;
203 	u_int memtype, ver, macl, mach;
204 	u_char enaddr[ETHER_ADDR_LEN];
205 
206 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 0x10);
207 	if (pci_mapreg_map(pa, 0x10, memtype, 0, &sc->sc_iot0, &sc->sc_ioh0,
208 	    NULL, NULL, 0)) {
209 		printf(": failed to map BAR0\n");
210 		return;
211 	}
212 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 0x14);
213 	if (pci_mapreg_map(pa, 0x14, memtype, 0, &sc->sc_iot1, &sc->sc_ioh1,
214 	    NULL, NULL, 0)) {
215 		printf(": failed to map BAR1\n");
216 		return;
217 	}
218 
219 	ver = READ_BAR1(sc, VMXNET3_BAR1_VRRS);
220 	if ((ver & 0x1) == 0) {
221 		printf(": unsupported hardware version 0x%x\n", ver);
222 		return;
223 	}
224 	WRITE_BAR1(sc, VMXNET3_BAR1_VRRS, 1);
225 
226 	ver = READ_BAR1(sc, VMXNET3_BAR1_UVRS);
227 	if ((ver & 0x1) == 0) {
228 		printf(": incompatiable UPT version 0x%x\n", ver);
229 		return;
230 	}
231 	WRITE_BAR1(sc, VMXNET3_BAR1_UVRS, 1);
232 
233 	sc->sc_dmat = pa->pa_dmat;
234 	if (vmxnet3_dma_init(sc)) {
235 		printf(": failed to setup DMA\n");
236 		return;
237 	}
238 
239 	if (pci_intr_map(pa, &ih)) {
240 		printf(": failed to map interrupt\n");
241 		return;
242 	}
243 	pci_intr_establish(pa->pa_pc, ih, IPL_NET, vmxnet3_intr, sc,
244 	    self->dv_xname);
245 	intrstr = pci_intr_string(pa->pa_pc, ih);
246 	if (intrstr)
247 		printf(": %s", intrstr);
248 
249 	WRITE_CMD(sc, VMXNET3_CMD_GET_MACL);
250 	macl = READ_BAR1(sc, VMXNET3_BAR1_CMD);
251 	enaddr[0] = macl;
252 	enaddr[1] = macl >> 8;
253 	enaddr[2] = macl >> 16;
254 	enaddr[3] = macl >> 24;
255 	WRITE_CMD(sc, VMXNET3_CMD_GET_MACH);
256 	mach = READ_BAR1(sc, VMXNET3_BAR1_CMD);
257 	enaddr[4] = mach;
258 	enaddr[5] = mach >> 8;
259 
260 	WRITE_BAR1(sc, VMXNET3_BAR1_MACL, macl);
261 	WRITE_BAR1(sc, VMXNET3_BAR1_MACH, mach);
262 	printf(", address %s\n", ether_sprintf(enaddr));
263 
264 	bcopy(enaddr, sc->sc_arpcom.ac_enaddr, 6);
265 	strlcpy(ifp->if_xname, self->dv_xname, IFNAMSIZ);
266 	ifp->if_softc = sc;
267 	ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
268 	ifp->if_ioctl = vmxnet3_ioctl;
269 	ifp->if_start = vmxnet3_start;
270 	ifp->if_watchdog = vmxnet3_watchdog;
271 	ifp->if_hardmtu = VMXNET3_MAX_MTU;
272 	ifp->if_capabilities = IFCAP_VLAN_MTU;
273 	if (sc->sc_ds->upt_features & UPT1_F_CSUM)
274 		ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
275 	if (sc->sc_ds->upt_features & UPT1_F_VLAN)
276 		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
277 
278 	IFQ_SET_MAXLEN(&ifp->if_snd, NTXDESC);
279 	IFQ_SET_READY(&ifp->if_snd);
280 
281 	ifmedia_init(&sc->sc_media, IFM_IMASK, vmxnet3_media_change,
282 	    vmxnet3_media_status);
283 	ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_AUTO, 0, NULL);
284 	ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_10G_T|IFM_FDX, 0, NULL);
285 	ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_10G_T, 0, NULL);
286 	ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL);
287 	ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_1000_T, 0, NULL);
288 	ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
289 
290 	if_attach(ifp);
291 	ether_ifattach(ifp);
292 	vmxnet3_link_state(sc);
293 }
294 
295 int
296 vmxnet3_dma_init(struct vmxnet3_softc *sc)
297 {
298 	struct vmxnet3_driver_shared *ds;
299 	struct vmxnet3_txq_shared *ts;
300 	struct vmxnet3_rxq_shared *rs;
301 	bus_addr_t ds_pa, qs_pa, mcast_pa;
302 	int i, queue, qs_len;
303 	u_int major, minor, release_code, rev;
304 
305 	qs_len = NTXQUEUE * sizeof *ts + NRXQUEUE * sizeof *rs;
306 	ts = vmxnet3_dma_allocmem(sc, qs_len, VMXNET3_DMADESC_ALIGN, &qs_pa);
307 	if (ts == NULL)
308 		return -1;
309 	for (queue = 0; queue < NTXQUEUE; queue++)
310 		sc->sc_txq[queue].ts = ts++;
311 	rs = (void *)ts;
312 	for (queue = 0; queue < NRXQUEUE; queue++)
313 		sc->sc_rxq[queue].rs = rs++;
314 
315 	for (queue = 0; queue < NTXQUEUE; queue++)
316 		if (vmxnet3_alloc_txring(sc, queue))
317 			return -1;
318 	for (queue = 0; queue < NRXQUEUE; queue++)
319 		if (vmxnet3_alloc_rxring(sc, queue))
320 			return -1;
321 
322 	sc->sc_mcast = vmxnet3_dma_allocmem(sc, 682 * ETHER_ADDR_LEN, 32, &mcast_pa);
323 	if (sc->sc_mcast == NULL)
324 		return -1;
325 
326 	ds = vmxnet3_dma_allocmem(sc, sizeof *sc->sc_ds, 8, &ds_pa);
327 	if (ds == NULL)
328 		return -1;
329 	sc->sc_ds = ds;
330 	ds->magic = VMXNET3_REV1_MAGIC;
331 	ds->version = VMXNET3_DRIVER_VERSION;
332 
333 	/*
334 	 * XXX FreeBSD version uses following values:
335 	 * (Does the device behavior depend on them?)
336 	 *
337 	 * major = __FreeBSD_version / 100000;
338 	 * minor = (__FreeBSD_version / 1000) % 100;
339 	 * release_code = (__FreeBSD_version / 100) % 10;
340 	 * rev = __FreeBSD_version % 100;
341 	 */
342 	major = 0;
343 	minor = 0;
344 	release_code = 0;
345 	rev = 0;
346 #ifdef __LP64__
347 	ds->guest = release_code << 30 | rev << 22 | major << 14 | minor << 6
348 	    | VMXNET3_GOS_FREEBSD | VMXNET3_GOS_64BIT;
349 #else
350 	ds->guest = release_code << 30 | rev << 22 | major << 14 | minor << 6
351 	    | VMXNET3_GOS_FREEBSD | VMXNET3_GOS_32BIT;
352 #endif
353 	ds->vmxnet3_revision = 1;
354 	ds->upt_version = 1;
355 	ds->upt_features = UPT1_F_CSUM | UPT1_F_VLAN;
356 	ds->driver_data = vtophys(sc);
357 	ds->driver_data_len = sizeof(struct vmxnet3_softc);
358 	ds->queue_shared = qs_pa;
359 	ds->queue_shared_len = qs_len;
360 	ds->mtu = VMXNET3_MAX_MTU;
361 	ds->ntxqueue = NTXQUEUE;
362 	ds->nrxqueue = NRXQUEUE;
363 	ds->mcast_table = mcast_pa;
364 	ds->automask = 1;
365 	ds->nintr = VMXNET3_NINTR;
366 	ds->evintr = 0;
367 	ds->ictrl = VMXNET3_ICTRL_DISABLE_ALL;
368 	for (i = 0; i < VMXNET3_NINTR; i++)
369 		ds->modlevel[i] = UPT1_IMOD_ADAPTIVE;
370 	WRITE_BAR1(sc, VMXNET3_BAR1_DSL, ds_pa);
371 	WRITE_BAR1(sc, VMXNET3_BAR1_DSH, (u_int64_t)ds_pa >> 32);
372 	return 0;
373 }
374 
375 int
376 vmxnet3_alloc_txring(struct vmxnet3_softc *sc, int queue)
377 {
378 	struct vmxnet3_txqueue *tq = &sc->sc_txq[queue];
379 	struct vmxnet3_txq_shared *ts;
380 	struct vmxnet3_txring *ring = &tq->cmd_ring;
381 	struct vmxnet3_comp_ring *comp_ring = &tq->comp_ring;
382 	bus_addr_t pa, comp_pa;
383 	int idx;
384 
385 	ring->txd = vmxnet3_dma_allocmem(sc, NTXDESC * sizeof ring->txd[0], 512, &pa);
386 	if (ring->txd == NULL)
387 		return -1;
388 	comp_ring->txcd = vmxnet3_dma_allocmem(sc,
389 	    NTXCOMPDESC * sizeof comp_ring->txcd[0], 512, &comp_pa);
390 	if (comp_ring->txcd == NULL)
391 		return -1;
392 
393 	for (idx = 0; idx < NTXDESC; idx++) {
394 		if (bus_dmamap_create(sc->sc_dmat, JUMBO_LEN, NTXSEGS,
395 		    JUMBO_LEN, 0, BUS_DMA_NOWAIT, &ring->dmap[idx]))
396 			return -1;
397 	}
398 
399 	ts = tq->ts;
400 	bzero(ts, sizeof *ts);
401 	ts->npending = 0;
402 	ts->intr_threshold = 1;
403 	ts->cmd_ring = pa;
404 	ts->cmd_ring_len = NTXDESC;
405 	ts->comp_ring = comp_pa;
406 	ts->comp_ring_len = NTXCOMPDESC;
407 	ts->driver_data = vtophys(tq);
408 	ts->driver_data_len = sizeof *tq;
409 	ts->intr_idx = 0;
410 	ts->stopped = 1;
411 	ts->error = 0;
412 	return 0;
413 }
414 
415 int
416 vmxnet3_alloc_rxring(struct vmxnet3_softc *sc, int queue)
417 {
418 	struct vmxnet3_rxqueue *rq = &sc->sc_rxq[queue];
419 	struct vmxnet3_rxq_shared *rs;
420 	struct vmxnet3_rxring *ring;
421 	struct vmxnet3_comp_ring *comp_ring;
422 	bus_addr_t pa[2], comp_pa;
423 	int i, idx;
424 
425 	for (i = 0; i < 2; i++) {
426 		ring = &rq->cmd_ring[i];
427 		ring->rxd = vmxnet3_dma_allocmem(sc, NRXDESC * sizeof ring->rxd[0],
428 		    512, &pa[i]);
429 		if (ring->rxd == NULL)
430 			return -1;
431 	}
432 	comp_ring = &rq->comp_ring;
433 	comp_ring->rxcd = vmxnet3_dma_allocmem(sc,
434 	    NRXCOMPDESC * sizeof comp_ring->rxcd[0], 512, &comp_pa);
435 	if (comp_ring->rxcd == NULL)
436 		return -1;
437 
438 	for (i = 0; i < 2; i++) {
439 		ring = &rq->cmd_ring[i];
440 		ring->rid = i;
441 		for (idx = 0; idx < NRXDESC; idx++) {
442 			if (bus_dmamap_create(sc->sc_dmat, JUMBO_LEN, 1,
443 			    JUMBO_LEN, 0, BUS_DMA_NOWAIT, &ring->dmap[idx]))
444 				return -1;
445 		}
446 	}
447 
448 	rs = rq->rs;
449 	bzero(rs, sizeof *rs);
450 	rs->cmd_ring[0] = pa[0];
451 	rs->cmd_ring[1] = pa[1];
452 	rs->cmd_ring_len[0] = NRXDESC;
453 	rs->cmd_ring_len[1] = NRXDESC;
454 	rs->comp_ring = comp_pa;
455 	rs->comp_ring_len = NRXCOMPDESC;
456 	rs->driver_data = vtophys(rq);
457 	rs->driver_data_len = sizeof *rq;
458 	rs->intr_idx = 0;
459 	rs->stopped = 1;
460 	rs->error = 0;
461 	return 0;
462 }
463 
464 void
465 vmxnet3_txinit(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *tq)
466 {
467 	struct vmxnet3_txring *ring = &tq->cmd_ring;
468 	struct vmxnet3_comp_ring *comp_ring = &tq->comp_ring;
469 
470 	ring->head = ring->next = 0;
471 	ring->gen = 1;
472 	comp_ring->next = 0;
473 	comp_ring->gen = 1;
474 	bzero(ring->txd, NTXDESC * sizeof ring->txd[0]);
475 	bzero(comp_ring->txcd, NTXCOMPDESC * sizeof comp_ring->txcd[0]);
476 }
477 
478 void
479 vmxnet3_rxinit(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rq)
480 {
481 	struct vmxnet3_rxring *ring;
482 	struct vmxnet3_comp_ring *comp_ring;
483 	int i;
484 	u_int slots;
485 
486 	for (i = 0; i < 2; i++) {
487 		ring = &rq->cmd_ring[i];
488 		ring->fill = 0;
489 		ring->gen = 1;
490 		bzero(ring->rxd, NRXDESC * sizeof ring->rxd[0]);
491 		if_rxr_init(&ring->rxr, 2, NRXDESC - 1);
492 		for (slots = if_rxr_get(&ring->rxr, NRXDESC);
493 		    slots > 0; slots--) {
494 			if (vmxnet3_getbuf(sc, ring))
495 				break;
496 		}
497 		if_rxr_put(&ring->rxr, slots);
498 	}
499 	comp_ring = &rq->comp_ring;
500 	comp_ring->next = 0;
501 	comp_ring->gen = 1;
502 	bzero(comp_ring->rxcd, NRXCOMPDESC * sizeof comp_ring->rxcd[0]);
503 }
504 
505 void
506 vmxnet3_txstop(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *tq)
507 {
508 	struct vmxnet3_txring *ring = &tq->cmd_ring;
509 	int idx;
510 
511 	for (idx = 0; idx < NTXDESC; idx++) {
512 		if (ring->m[idx]) {
513 			bus_dmamap_unload(sc->sc_dmat, ring->dmap[idx]);
514 			m_freem(ring->m[idx]);
515 			ring->m[idx] = NULL;
516 		}
517 	}
518 }
519 
520 void
521 vmxnet3_rxstop(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rq)
522 {
523 	struct vmxnet3_rxring *ring;
524 	int i, idx;
525 
526 	for (i = 0; i < 2; i++) {
527 		ring = &rq->cmd_ring[i];
528 		for (idx = 0; idx < NRXDESC; idx++) {
529 			if (ring->m[idx]) {
530 				m_freem(ring->m[idx]);
531 				ring->m[idx] = NULL;
532 			}
533 		}
534 	}
535 }
536 
537 void
538 vmxnet3_link_state(struct vmxnet3_softc *sc)
539 {
540 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
541 	u_int x, link, speed;
542 
543 	WRITE_CMD(sc, VMXNET3_CMD_GET_LINK);
544 	x = READ_BAR1(sc, VMXNET3_BAR1_CMD);
545 	speed = x >> 16;
546 	if (x & 1) {
547 		ifp->if_baudrate = IF_Mbps(speed);
548 		link = LINK_STATE_UP;
549 	} else
550 		link = LINK_STATE_DOWN;
551 
552 	if (ifp->if_link_state != link) {
553 		ifp->if_link_state = link;
554 		if_link_state_change(ifp);
555 	}
556 }
557 
558 static inline void
559 vmxnet3_enable_intr(struct vmxnet3_softc *sc, int irq)
560 {
561 	WRITE_BAR0(sc, VMXNET3_BAR0_IMASK(irq), 0);
562 }
563 
564 static inline void
565 vmxnet3_disable_intr(struct vmxnet3_softc *sc, int irq)
566 {
567 	WRITE_BAR0(sc, VMXNET3_BAR0_IMASK(irq), 1);
568 }
569 
570 void
571 vmxnet3_enable_all_intrs(struct vmxnet3_softc *sc)
572 {
573 	int i;
574 
575 	sc->sc_ds->ictrl &= ~VMXNET3_ICTRL_DISABLE_ALL;
576 	for (i = 0; i < VMXNET3_NINTR; i++)
577 		vmxnet3_enable_intr(sc, i);
578 }
579 
580 void
581 vmxnet3_disable_all_intrs(struct vmxnet3_softc *sc)
582 {
583 	int i;
584 
585 	sc->sc_ds->ictrl |= VMXNET3_ICTRL_DISABLE_ALL;
586 	for (i = 0; i < VMXNET3_NINTR; i++)
587 		vmxnet3_disable_intr(sc, i);
588 }
589 
590 int
591 vmxnet3_intr(void *arg)
592 {
593 	struct vmxnet3_softc *sc = arg;
594 
595 	if (READ_BAR1(sc, VMXNET3_BAR1_INTR) == 0)
596 		return 0;
597 	if (sc->sc_ds->event)
598 		vmxnet3_evintr(sc);
599 	vmxnet3_rxintr(sc, &sc->sc_rxq[0]);
600 	vmxnet3_txintr(sc, &sc->sc_txq[0]);
601 #ifdef VMXNET3_STAT
602 	vmxstat.intr++;
603 #endif
604 	vmxnet3_enable_intr(sc, 0);
605 	return 1;
606 }
607 
608 void
609 vmxnet3_evintr(struct vmxnet3_softc *sc)
610 {
611 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
612 	u_int event = sc->sc_ds->event;
613 	struct vmxnet3_txq_shared *ts;
614 	struct vmxnet3_rxq_shared *rs;
615 
616 	/* Clear events. */
617 	WRITE_BAR1(sc, VMXNET3_BAR1_EVENT, event);
618 
619 	/* Link state change? */
620 	if (event & VMXNET3_EVENT_LINK)
621 		vmxnet3_link_state(sc);
622 
623 	/* Queue error? */
624 	if (event & (VMXNET3_EVENT_TQERROR | VMXNET3_EVENT_RQERROR)) {
625 		WRITE_CMD(sc, VMXNET3_CMD_GET_STATUS);
626 
627 		ts = sc->sc_txq[0].ts;
628 		if (ts->stopped)
629 			printf("%s: TX error 0x%x\n", ifp->if_xname, ts->error);
630 		rs = sc->sc_rxq[0].rs;
631 		if (rs->stopped)
632 			printf("%s: RX error 0x%x\n", ifp->if_xname, rs->error);
633 		vmxnet3_reset(ifp);
634 	}
635 
636 	if (event & VMXNET3_EVENT_DIC)
637 		printf("%s: device implementation change event\n",
638 		    ifp->if_xname);
639 	if (event & VMXNET3_EVENT_DEBUG)
640 		printf("%s: debug event\n", ifp->if_xname);
641 }
642 
643 void
644 vmxnet3_txintr(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *tq)
645 {
646 	struct vmxnet3_txring *ring = &tq->cmd_ring;
647 	struct vmxnet3_comp_ring *comp_ring = &tq->comp_ring;
648 	struct vmxnet3_txcompdesc *txcd;
649 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
650 	u_int sop;
651 
652 	for (;;) {
653 		txcd = &comp_ring->txcd[comp_ring->next];
654 
655 		if (letoh32((txcd->txc_word3 >> VMXNET3_TXC_GEN_S) &
656 		    VMXNET3_TXC_GEN_M) != comp_ring->gen)
657 			break;
658 
659 		comp_ring->next++;
660 		if (comp_ring->next == NTXCOMPDESC) {
661 			comp_ring->next = 0;
662 			comp_ring->gen ^= 1;
663 		}
664 
665 		sop = ring->next;
666 		if (ring->m[sop] == NULL)
667 			panic("vmxnet3_txintr");
668 		m_freem(ring->m[sop]);
669 		ring->m[sop] = NULL;
670 		bus_dmamap_unload(sc->sc_dmat, ring->dmap[sop]);
671 		ring->next = (letoh32((txcd->txc_word0 >>
672 		    VMXNET3_TXC_EOPIDX_S) & VMXNET3_TXC_EOPIDX_M) + 1)
673 		    % NTXDESC;
674 
675 		ifp->if_flags &= ~IFF_OACTIVE;
676 	}
677 	if (ring->head == ring->next)
678 		ifp->if_timer = 0;
679 	vmxnet3_start(ifp);
680 }
681 
682 void
683 vmxnet3_rxintr(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rq)
684 {
685 	struct vmxnet3_comp_ring *comp_ring = &rq->comp_ring;
686 	struct vmxnet3_rxring *ring;
687 	struct vmxnet3_rxdesc *rxd;
688 	struct vmxnet3_rxcompdesc *rxcd;
689 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
690 	struct mbuf *m;
691 	int idx, len;
692 	u_int slots;
693 
694 	for (;;) {
695 		rxcd = &comp_ring->rxcd[comp_ring->next];
696 		if (letoh32((rxcd->rxc_word3 >> VMXNET3_RXC_GEN_S) &
697 		    VMXNET3_RXC_GEN_M) != comp_ring->gen)
698 			break;
699 
700 		comp_ring->next++;
701 		if (comp_ring->next == NRXCOMPDESC) {
702 			comp_ring->next = 0;
703 			comp_ring->gen ^= 1;
704 		}
705 
706 		idx = letoh32((rxcd->rxc_word0 >> VMXNET3_RXC_IDX_S) &
707 		    VMXNET3_RXC_IDX_M);
708 		if (letoh32((rxcd->rxc_word0 >> VMXNET3_RXC_QID_S) &
709 		    VMXNET3_RXC_QID_M) < NRXQUEUE)
710 			ring = &rq->cmd_ring[0];
711 		else
712 			ring = &rq->cmd_ring[1];
713 		rxd = &ring->rxd[idx];
714 		len = letoh32((rxcd->rxc_word2 >> VMXNET3_RXC_LEN_S) &
715 		    VMXNET3_RXC_LEN_M);
716 		m = ring->m[idx];
717 		ring->m[idx] = NULL;
718 		if_rxr_put(&ring->rxr, 1);
719 		bus_dmamap_unload(sc->sc_dmat, ring->dmap[idx]);
720 
721 		if (m == NULL)
722 			panic("NULL mbuf");
723 
724 		if (letoh32((rxd->rx_word2 >> VMXNET3_RX_BTYPE_S) &
725 		    VMXNET3_RX_BTYPE_M) != VMXNET3_BTYPE_HEAD) {
726 			m_freem(m);
727 			goto skip_buffer;
728 		}
729 		if (letoh32(rxcd->rxc_word2 & VMXNET3_RXC_ERROR)) {
730 			ifp->if_ierrors++;
731 			m_freem(m);
732 			goto skip_buffer;
733 		}
734 		if (len < VMXNET3_MIN_MTU) {
735 			printf("%s: short packet (%d)\n", ifp->if_xname, len);
736 			m_freem(m);
737 			goto skip_buffer;
738 		}
739 
740 		ifp->if_ipackets++;
741 		ifp->if_ibytes += len;
742 
743 		vmxnet3_rx_csum(rxcd, m);
744 		m->m_pkthdr.rcvif = ifp;
745 		m->m_pkthdr.len = m->m_len = len;
746 		if (letoh32(rxcd->rxc_word2 & VMXNET3_RXC_VLAN)) {
747 			m->m_flags |= M_VLANTAG;
748 			m->m_pkthdr.ether_vtag = letoh32((rxcd->rxc_word2 >>
749 			    VMXNET3_RXC_VLANTAG_S) & VMXNET3_RXC_VLANTAG_M);
750 		}
751 
752 #if NBPFILTER > 0
753 		if (ifp->if_bpf)
754 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN);
755 #endif
756 		ether_input_mbuf(ifp, m);
757 
758 skip_buffer:
759 #ifdef VMXNET3_STAT
760 		vmxstat.rxdone = idx;
761 #endif
762 		if (rq->rs->update_rxhead) {
763 			u_int qid = letoh32((rxcd->rxc_word0 >>
764 			    VMXNET3_RXC_QID_S) & VMXNET3_RXC_QID_M);
765 
766 			idx = (idx + 1) % NRXDESC;
767 			if (qid < NRXQUEUE) {
768 				WRITE_BAR0(sc, VMXNET3_BAR0_RXH1(qid), idx);
769 			} else {
770 				qid -= NRXQUEUE;
771 				WRITE_BAR0(sc, VMXNET3_BAR0_RXH2(qid), idx);
772 			}
773 		}
774 	}
775 
776 	/* XXX Should we (try to) allocate buffers for ring 2 too? */
777 	ring = &rq->cmd_ring[0];
778 	for (slots = if_rxr_get(&ring->rxr, NRXDESC); slots > 0; slots--) {
779 		if (vmxnet3_getbuf(sc, ring))
780 			break;
781 	}
782 	if_rxr_put(&ring->rxr, slots);
783 }
784 
785 void
786 vmxnet3_iff(struct vmxnet3_softc *sc)
787 {
788 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
789 	struct arpcom *ac = &sc->sc_arpcom;
790 	struct vmxnet3_driver_shared *ds = sc->sc_ds;
791 	struct ether_multi *enm;
792 	struct ether_multistep step;
793 	u_int mode;
794 	u_int8_t *p;
795 
796 	ds->mcast_tablelen = 0;
797 	CLR(ifp->if_flags, IFF_ALLMULTI);
798 
799 	/*
800 	 * Always accept broadcast frames.
801 	 * Always accept frames destined to our station address.
802 	 */
803 	mode = VMXNET3_RXMODE_BCAST | VMXNET3_RXMODE_UCAST;
804 
805 	if (ISSET(ifp->if_flags, IFF_PROMISC) || ac->ac_multirangecnt > 0 ||
806 	    ac->ac_multicnt > 682) {
807 		SET(ifp->if_flags, IFF_ALLMULTI);
808 		SET(mode, (VMXNET3_RXMODE_ALLMULTI | VMXNET3_RXMODE_MCAST));
809 		if (ifp->if_flags & IFF_PROMISC)
810 			SET(mode, VMXNET3_RXMODE_PROMISC);
811 	} else {
812 		p = sc->sc_mcast;
813 		ETHER_FIRST_MULTI(step, ac, enm);
814 		while (enm != NULL) {
815 			bcopy(enm->enm_addrlo, p, ETHER_ADDR_LEN);
816 
817 			p += ETHER_ADDR_LEN;
818 
819 			ETHER_NEXT_MULTI(step, enm);
820 		}
821 
822 		if (ac->ac_multicnt > 0) {
823 			SET(mode, VMXNET3_RXMODE_MCAST);
824 			ds->mcast_tablelen = p - sc->sc_mcast;
825 		}
826 	}
827 
828 	WRITE_CMD(sc, VMXNET3_CMD_SET_FILTER);
829 	ds->rxmode = mode;
830 	WRITE_CMD(sc, VMXNET3_CMD_SET_RXMODE);
831 }
832 
833 
834 void
835 vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m)
836 {
837 	if (letoh32(rxcd->rxc_word0 & VMXNET3_RXC_NOCSUM))
838 		return;
839 
840 	if ((rxcd->rxc_word3 & (VMXNET3_RXC_IPV4 | VMXNET3_RXC_IPSUM_OK)) ==
841 	    (VMXNET3_RXC_IPV4 | VMXNET3_RXC_IPSUM_OK))
842 		m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
843 
844 	if (rxcd->rxc_word3 & VMXNET3_RXC_FRAGMENT)
845 		return;
846 
847 	if (rxcd->rxc_word3 & (VMXNET3_RXC_TCP | VMXNET3_RXC_UDP)) {
848 		if (rxcd->rxc_word3 & VMXNET3_RXC_CSUM_OK)
849 			m->m_pkthdr.csum_flags |=
850 			    M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
851 	}
852 }
853 
854 int
855 vmxnet3_getbuf(struct vmxnet3_softc *sc, struct vmxnet3_rxring *ring)
856 {
857 	int idx = ring->fill;
858 	struct vmxnet3_rxdesc *rxd = &ring->rxd[idx];
859 	struct mbuf *m;
860 	int btype;
861 
862 	if (ring->m[idx])
863 		panic("vmxnet3_getbuf: buffer has mbuf");
864 
865 #if 1
866 	/* XXX Don't allocate buffers for ring 2 for now. */
867 	if (ring->rid != 0)
868 		return -1;
869 	btype = VMXNET3_BTYPE_HEAD;
870 #else
871 	if (ring->rid == 0)
872 		btype = VMXNET3_BTYPE_HEAD;
873 	else
874 		btype = VMXNET3_BTYPE_BODY;
875 #endif
876 
877 	m = MCLGETI(NULL, M_DONTWAIT, NULL, JUMBO_LEN);
878 	if (m == NULL)
879 		return -1;
880 
881 	m->m_pkthdr.len = m->m_len = JUMBO_LEN;
882 	m_adj(m, ETHER_ALIGN);
883 	ring->m[idx] = m;
884 
885 	if (bus_dmamap_load_mbuf(sc->sc_dmat, ring->dmap[idx], m,
886 	    BUS_DMA_NOWAIT))
887 		panic("load mbuf");
888 	rxd->rx_addr = htole64(DMAADDR(ring->dmap[idx]));
889 	rxd->rx_word2 = htole32(((m->m_pkthdr.len & VMXNET3_RX_LEN_M) <<
890 	    VMXNET3_RX_LEN_S) | ((btype & VMXNET3_RX_BTYPE_M) <<
891 	    VMXNET3_RX_BTYPE_S) | ((ring->gen & VMXNET3_RX_GEN_M) <<
892 	    VMXNET3_RX_GEN_S));
893 	idx++;
894 	if (idx == NRXDESC) {
895 		idx = 0;
896 		ring->gen ^= 1;
897 	}
898 	ring->fill = idx;
899 #ifdef VMXNET3_STAT
900 	vmxstat.rxfill = ring->fill;
901 #endif
902 	return 0;
903 }
904 
905 void
906 vmxnet3_stop(struct ifnet *ifp)
907 {
908 	struct vmxnet3_softc *sc = ifp->if_softc;
909 	int queue;
910 
911 	vmxnet3_disable_all_intrs(sc);
912 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
913 	ifp->if_timer = 0;
914 
915 	WRITE_CMD(sc, VMXNET3_CMD_DISABLE);
916 
917 	for (queue = 0; queue < NTXQUEUE; queue++)
918 		vmxnet3_txstop(sc, &sc->sc_txq[queue]);
919 	for (queue = 0; queue < NRXQUEUE; queue++)
920 		vmxnet3_rxstop(sc, &sc->sc_rxq[queue]);
921 }
922 
923 void
924 vmxnet3_reset(struct ifnet *ifp)
925 {
926 	struct vmxnet3_softc *sc = ifp->if_softc;
927 
928 	vmxnet3_stop(ifp);
929 	WRITE_CMD(sc, VMXNET3_CMD_RESET);
930 	vmxnet3_init(sc);
931 }
932 
933 int
934 vmxnet3_init(struct vmxnet3_softc *sc)
935 {
936 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
937 	int queue;
938 
939 	ifp->if_flags |= IFF_RUNNING;
940 	ifp->if_flags &= ~IFF_OACTIVE;
941 
942 	for (queue = 0; queue < NTXQUEUE; queue++)
943 		vmxnet3_txinit(sc, &sc->sc_txq[queue]);
944 	for (queue = 0; queue < NRXQUEUE; queue++)
945 		vmxnet3_rxinit(sc, &sc->sc_rxq[queue]);
946 
947 	WRITE_CMD(sc, VMXNET3_CMD_ENABLE);
948 	if (READ_BAR1(sc, VMXNET3_BAR1_CMD)) {
949 		printf("%s: failed to initialize\n", ifp->if_xname);
950 		vmxnet3_stop(ifp);
951 		return EIO;
952 	}
953 
954 	for (queue = 0; queue < NRXQUEUE; queue++) {
955 		WRITE_BAR0(sc, VMXNET3_BAR0_RXH1(queue), 0);
956 		WRITE_BAR0(sc, VMXNET3_BAR0_RXH2(queue), 0);
957 	}
958 
959 	vmxnet3_iff(sc);
960 	vmxnet3_enable_all_intrs(sc);
961 	vmxnet3_link_state(sc);
962 	return 0;
963 }
964 
965 int
966 vmxnet3_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
967 {
968 	struct vmxnet3_softc *sc = ifp->if_softc;
969 	struct ifreq *ifr = (struct ifreq *)data;
970 	struct ifaddr *ifa = (struct ifaddr *)data;
971 	int error = 0, s;
972 
973 	s = splnet();
974 
975 	switch (cmd) {
976 	case SIOCSIFADDR:
977 		ifp->if_flags |= IFF_UP;
978 		if ((ifp->if_flags & IFF_RUNNING) == 0)
979 			error = vmxnet3_init(sc);
980 #ifdef INET
981 		if (ifa->ifa_addr->sa_family == AF_INET)
982 			arp_ifinit(&sc->sc_arpcom, ifa);
983 #endif
984 		break;
985 	case SIOCSIFFLAGS:
986 		if (ifp->if_flags & IFF_UP) {
987 			if (ifp->if_flags & IFF_RUNNING)
988 				error = ENETRESET;
989 			else
990 				error = vmxnet3_init(sc);
991 		} else {
992 			if (ifp->if_flags & IFF_RUNNING)
993 				vmxnet3_stop(ifp);
994 		}
995 		break;
996 	case SIOCSIFMEDIA:
997 	case SIOCGIFMEDIA:
998 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
999 		break;
1000 	case SIOCGIFRXR:
1001 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
1002 		    NULL, JUMBO_LEN, &sc->sc_rxq[0].cmd_ring[0].rxr);
1003 		break;
1004 	default:
1005 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1006 	}
1007 
1008 	if (error == ENETRESET) {
1009 		if (ifp->if_flags & IFF_RUNNING)
1010 			vmxnet3_iff(sc);
1011 		error = 0;
1012 	}
1013 
1014 	splx(s);
1015 	return error;
1016 }
1017 
1018 void
1019 vmxnet3_start(struct ifnet *ifp)
1020 {
1021 	struct vmxnet3_softc *sc = ifp->if_softc;
1022 	struct vmxnet3_txqueue *tq = &sc->sc_txq[0];
1023 	struct vmxnet3_txring *ring = &tq->cmd_ring;
1024 	struct mbuf *m;
1025 	int n = 0;
1026 
1027 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1028 		return;
1029 
1030 	for (;;) {
1031 		IFQ_POLL(&ifp->if_snd, m);
1032 		if (m == NULL)
1033 			break;
1034 		if ((ring->next - ring->head - 1) % NTXDESC < NTXSEGS) {
1035 			ifp->if_flags |= IFF_OACTIVE;
1036 			break;
1037 		}
1038 
1039 		IFQ_DEQUEUE(&ifp->if_snd, m);
1040 		if (vmxnet3_load_mbuf(sc, m) != 0) {
1041 			ifp->if_oerrors++;
1042 			continue;
1043 		}
1044 #if NBPFILTER > 0
1045 		if (ifp->if_bpf)
1046 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1047 #endif
1048 		ifp->if_timer = 5;
1049 		ifp->if_opackets++;
1050 		n++;
1051 	}
1052 
1053 	if (n > 0)
1054 		WRITE_BAR0(sc, VMXNET3_BAR0_TXH(0), ring->head);
1055 #ifdef VMXNET3_STAT
1056 	vmxstat.txhead = ring->head;
1057 	vmxstat.txdone = ring->next;
1058 	vmxstat.maxtxlen =
1059 	    max(vmxstat.maxtxlen, (ring->head - ring->next) % NTXDESC);
1060 #endif
1061 }
1062 
1063 int
1064 vmxnet3_load_mbuf(struct vmxnet3_softc *sc, struct mbuf *m)
1065 {
1066 	struct vmxnet3_txqueue *tq = &sc->sc_txq[0];
1067 	struct vmxnet3_txring *ring = &tq->cmd_ring;
1068 	struct vmxnet3_txdesc *txd, *sop;
1069 	struct mbuf *mp;
1070 	struct ip *ip;
1071 	bus_dmamap_t map = ring->dmap[ring->head];
1072 	u_int hlen = ETHER_HDR_LEN, csum_off;
1073 	int offp, gen, i;
1074 
1075 #if 0
1076 	if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) {
1077 		printf("%s: IP checksum offloading is not supported\n",
1078 		    sc->sc_dev.dv_xname);
1079 		return -1;
1080 	}
1081 #endif
1082 	if (m->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT|M_UDP_CSUM_OUT)) {
1083 		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
1084 			csum_off = offsetof(struct tcphdr, th_sum);
1085 		else
1086 			csum_off = offsetof(struct udphdr, uh_sum);
1087 
1088 		mp = m_pulldown(m, hlen, sizeof(*ip), &offp);
1089 		if (mp == NULL)
1090 			return (-1);
1091 
1092 		ip = (struct ip *)(mp->m_data + offp);
1093 		hlen += ip->ip_hl << 2;
1094 
1095 		mp = m_pulldown(m, 0, hlen + csum_off + 2, &offp);
1096 		if (mp == NULL)
1097 			return (-1);
1098 	}
1099 
1100 	switch (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT)) {
1101 	case 0:
1102 		break;
1103 	case EFBIG:
1104 		if (m_defrag(m, M_DONTWAIT) == 0 &&
1105 		    bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1106 		     BUS_DMA_NOWAIT) == 0)
1107 			break;
1108 
1109 		/* FALLTHROUGH */
1110 	default:
1111 		m_freem(m);
1112 		return -1;
1113 	}
1114 
1115 	ring->m[ring->head] = m;
1116 	sop = &ring->txd[ring->head];
1117 	gen = ring->gen ^ 1;		/* owned by cpu (yet) */
1118 	for (i = 0; i < map->dm_nsegs; i++) {
1119 		txd = &ring->txd[ring->head];
1120 		txd->tx_addr = htole64(map->dm_segs[i].ds_addr);
1121 		txd->tx_word2 = htole32(((map->dm_segs[i].ds_len &
1122 		    VMXNET3_TX_LEN_M) << VMXNET3_TX_LEN_S) |
1123 		    ((gen & VMXNET3_TX_GEN_M) << VMXNET3_TX_GEN_S));
1124 		txd->tx_word3 = 0;
1125 		ring->head++;
1126 		if (ring->head == NTXDESC) {
1127 			ring->head = 0;
1128 			ring->gen ^= 1;
1129 		}
1130 		gen = ring->gen;
1131 	}
1132 	txd->tx_word3 |= htole32(VMXNET3_TX_EOP | VMXNET3_TX_COMPREQ);
1133 
1134 	if (m->m_flags & M_VLANTAG) {
1135 		sop->tx_word3 |= htole32(VMXNET3_TX_VTAG_MODE);
1136 		sop->tx_word3 |= htole32((m->m_pkthdr.ether_vtag &
1137 		    VMXNET3_TX_VLANTAG_M) << VMXNET3_TX_VLANTAG_S);
1138 	}
1139 	if (m->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT|M_UDP_CSUM_OUT)) {
1140 		sop->tx_word2 |= htole32(((hlen + csum_off) &
1141 		    VMXNET3_TX_OP_M) << VMXNET3_TX_OP_S);
1142 		sop->tx_word3 |= htole32(((hlen & VMXNET3_TX_HLEN_M) <<
1143 		    VMXNET3_TX_HLEN_S) | (VMXNET3_OM_CSUM << VMXNET3_TX_OM_S));
1144 	}
1145 
1146 	/* Change the ownership by flipping the "generation" bit */
1147 	sop->tx_word2 ^= htole32(VMXNET3_TX_GEN_M << VMXNET3_TX_GEN_S);
1148 
1149 	return (0);
1150 }
1151 
1152 void
1153 vmxnet3_watchdog(struct ifnet *ifp)
1154 {
1155 	struct vmxnet3_softc *sc = ifp->if_softc;
1156 	int s;
1157 
1158 	printf("%s: device timeout\n", ifp->if_xname);
1159 	s = splnet();
1160 	vmxnet3_stop(ifp);
1161 	vmxnet3_init(sc);
1162 	splx(s);
1163 }
1164 
1165 void
1166 vmxnet3_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1167 {
1168 	struct vmxnet3_softc *sc = ifp->if_softc;
1169 
1170 	vmxnet3_link_state(sc);
1171 
1172 	ifmr->ifm_status = IFM_AVALID;
1173 	ifmr->ifm_active = IFM_ETHER;
1174 
1175 	if (ifp->if_link_state != LINK_STATE_UP)
1176 		return;
1177 
1178 	ifmr->ifm_status |= IFM_ACTIVE;
1179 
1180 	if (ifp->if_baudrate >= IF_Gbps(10))
1181 		ifmr->ifm_active |= IFM_10G_T;
1182 }
1183 
1184 int
1185 vmxnet3_media_change(struct ifnet *ifp)
1186 {
1187 	return 0;
1188 }
1189 
1190 void *
1191 vmxnet3_dma_allocmem(struct vmxnet3_softc *sc, u_int size, u_int align, bus_addr_t *pa)
1192 {
1193 	bus_dma_tag_t t = sc->sc_dmat;
1194 	bus_dma_segment_t segs[1];
1195 	bus_dmamap_t map;
1196 	caddr_t va;
1197 	int n;
1198 
1199 	if (bus_dmamem_alloc(t, size, align, 0, segs, 1, &n, BUS_DMA_NOWAIT))
1200 		return NULL;
1201 	if (bus_dmamem_map(t, segs, 1, size, &va, BUS_DMA_NOWAIT))
1202 		return NULL;
1203 	if (bus_dmamap_create(t, size, 1, size, 0, BUS_DMA_NOWAIT, &map))
1204 		return NULL;
1205 	if (bus_dmamap_load(t, map, va, size, NULL, BUS_DMA_NOWAIT))
1206 		return NULL;
1207 	bzero(va, size);
1208 	*pa = DMAADDR(map);
1209 	bus_dmamap_unload(t, map);
1210 	bus_dmamap_destroy(t, map);
1211 	return va;
1212 }
1213