xref: /openbsd/sys/dev/pci/if_vmx.c (revision 264ca280)
1 /*	$OpenBSD: if_vmx.c,v 1.44 2016/04/13 10:34:32 mpi Exp $	*/
2 
3 /*
4  * Copyright (c) 2013 Tsubai Masanari
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bpfilter.h"
20 
21 #include <sys/param.h>
22 #include <sys/device.h>
23 #include <sys/mbuf.h>
24 #include <sys/socket.h>
25 #include <sys/sockio.h>
26 #include <sys/systm.h>
27 #include <sys/atomic.h>
28 
29 #include <net/bpf.h>
30 #include <net/if.h>
31 #include <net/if_media.h>
32 
33 #include <netinet/in.h>
34 #include <netinet/if_ether.h>
35 #include <netinet/ip.h>
36 #include <netinet/tcp.h>
37 #include <netinet/udp.h>
38 
39 #include <machine/bus.h>
40 
41 #include <dev/pci/if_vmxreg.h>
42 #include <dev/pci/pcivar.h>
43 #include <dev/pci/pcidevs.h>
44 
45 #define NRXQUEUE 1
46 #define NTXQUEUE 1
47 
48 #define NTXDESC 512 /* tx ring size */
49 #define NTXSEGS 8 /* tx descriptors per packet */
50 #define NRXDESC 512
51 #define NTXCOMPDESC NTXDESC
52 #define NRXCOMPDESC (NRXDESC * 2)	/* ring1 + ring2 */
53 
54 #define VMXNET3_DRIVER_VERSION 0x00010000
55 
56 struct vmxnet3_txring {
57 	struct mbuf *m[NTXDESC];
58 	bus_dmamap_t dmap[NTXDESC];
59 	struct vmxnet3_txdesc *txd;
60 	u_int prod;
61 	u_int cons;
62 	u_int free;
63 	u_int8_t gen;
64 };
65 
66 struct vmxnet3_rxring {
67 	struct mbuf *m[NRXDESC];
68 	bus_dmamap_t dmap[NRXDESC];
69 	struct if_rxring rxr;
70 	struct vmxnet3_rxdesc *rxd;
71 	u_int fill;
72 	u_int8_t gen;
73 	u_int8_t rid;
74 };
75 
76 struct vmxnet3_comp_ring {
77 	union {
78 		struct vmxnet3_txcompdesc *txcd;
79 		struct vmxnet3_rxcompdesc *rxcd;
80 	};
81 	u_int next;
82 	u_int8_t gen;
83 };
84 
85 struct vmxnet3_txqueue {
86 	struct vmxnet3_txring cmd_ring;
87 	struct vmxnet3_comp_ring comp_ring;
88 	struct vmxnet3_txq_shared *ts;
89 };
90 
91 struct vmxnet3_rxqueue {
92 	struct vmxnet3_rxring cmd_ring[2];
93 	struct vmxnet3_comp_ring comp_ring;
94 	struct vmxnet3_rxq_shared *rs;
95 };
96 
97 struct vmxnet3_softc {
98 	struct device sc_dev;
99 	struct arpcom sc_arpcom;
100 	struct ifmedia sc_media;
101 
102 	bus_space_tag_t	sc_iot0;
103 	bus_space_tag_t	sc_iot1;
104 	bus_space_handle_t sc_ioh0;
105 	bus_space_handle_t sc_ioh1;
106 	bus_dma_tag_t sc_dmat;
107 	void *sc_ih;
108 
109 	struct vmxnet3_txqueue sc_txq[NTXQUEUE];
110 	struct vmxnet3_rxqueue sc_rxq[NRXQUEUE];
111 	struct vmxnet3_driver_shared *sc_ds;
112 	u_int8_t *sc_mcast;
113 };
114 
115 #define VMXNET3_STAT
116 
117 #ifdef VMXNET3_STAT
118 struct {
119 	u_int ntxdesc;
120 	u_int nrxdesc;
121 	u_int txhead;
122 	u_int txdone;
123 	u_int maxtxlen;
124 	u_int rxdone;
125 	u_int rxfill;
126 	u_int intr;
127 } vmxstat = {
128 	NTXDESC, NRXDESC
129 };
130 #endif
131 
132 #define JUMBO_LEN (1024 * 9)
133 #define DMAADDR(map) ((map)->dm_segs[0].ds_addr)
134 
135 #define READ_BAR0(sc, reg) bus_space_read_4((sc)->sc_iot0, (sc)->sc_ioh0, reg)
136 #define READ_BAR1(sc, reg) bus_space_read_4((sc)->sc_iot1, (sc)->sc_ioh1, reg)
137 #define WRITE_BAR0(sc, reg, val) \
138 	bus_space_write_4((sc)->sc_iot0, (sc)->sc_ioh0, reg, val)
139 #define WRITE_BAR1(sc, reg, val) \
140 	bus_space_write_4((sc)->sc_iot1, (sc)->sc_ioh1, reg, val)
141 #define WRITE_CMD(sc, cmd) WRITE_BAR1(sc, VMXNET3_BAR1_CMD, cmd)
142 #define vtophys(va) 0		/* XXX ok? */
143 
144 int vmxnet3_match(struct device *, void *, void *);
145 void vmxnet3_attach(struct device *, struct device *, void *);
146 int vmxnet3_dma_init(struct vmxnet3_softc *);
147 int vmxnet3_alloc_txring(struct vmxnet3_softc *, int);
148 int vmxnet3_alloc_rxring(struct vmxnet3_softc *, int);
149 void vmxnet3_txinit(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
150 void vmxnet3_rxinit(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
151 void vmxnet3_txstop(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
152 void vmxnet3_rxstop(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
153 void vmxnet3_link_state(struct vmxnet3_softc *);
154 void vmxnet3_enable_all_intrs(struct vmxnet3_softc *);
155 void vmxnet3_disable_all_intrs(struct vmxnet3_softc *);
156 int vmxnet3_intr(void *);
157 void vmxnet3_evintr(struct vmxnet3_softc *);
158 void vmxnet3_txintr(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
159 void vmxnet3_rxintr(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
160 void vmxnet3_iff(struct vmxnet3_softc *);
161 void vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *, struct mbuf *);
162 int vmxnet3_getbuf(struct vmxnet3_softc *, struct vmxnet3_rxring *);
163 void vmxnet3_stop(struct ifnet *);
164 void vmxnet3_reset(struct vmxnet3_softc *);
165 int vmxnet3_init(struct vmxnet3_softc *);
166 int vmxnet3_ioctl(struct ifnet *, u_long, caddr_t);
167 void vmxnet3_start(struct ifnet *);
168 int vmxnet3_load_mbuf(struct vmxnet3_softc *, struct vmxnet3_txring *,
169     struct mbuf **);
170 void vmxnet3_watchdog(struct ifnet *);
171 void vmxnet3_media_status(struct ifnet *, struct ifmediareq *);
172 int vmxnet3_media_change(struct ifnet *);
173 void *vmxnet3_dma_allocmem(struct vmxnet3_softc *, u_int, u_int, bus_addr_t *);
174 
175 const struct pci_matchid vmx_devices[] = {
176 	{ PCI_VENDOR_VMWARE, PCI_PRODUCT_VMWARE_NET_3 }
177 };
178 
179 struct cfattach vmx_ca = {
180 	sizeof(struct vmxnet3_softc), vmxnet3_match, vmxnet3_attach
181 };
182 
183 struct cfdriver vmx_cd = {
184 	NULL, "vmx", DV_IFNET
185 };
186 
187 int
188 vmxnet3_match(struct device *parent, void *match, void *aux)
189 {
190 	return (pci_matchbyid(aux, vmx_devices, nitems(vmx_devices)));
191 }
192 
193 void
194 vmxnet3_attach(struct device *parent, struct device *self, void *aux)
195 {
196 	struct vmxnet3_softc *sc = (void *)self;
197 	struct pci_attach_args *pa = aux;
198 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
199 	pci_intr_handle_t ih;
200 	const char *intrstr;
201 	u_int memtype, ver, macl, mach;
202 	u_char enaddr[ETHER_ADDR_LEN];
203 
204 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 0x10);
205 	if (pci_mapreg_map(pa, 0x10, memtype, 0, &sc->sc_iot0, &sc->sc_ioh0,
206 	    NULL, NULL, 0)) {
207 		printf(": failed to map BAR0\n");
208 		return;
209 	}
210 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 0x14);
211 	if (pci_mapreg_map(pa, 0x14, memtype, 0, &sc->sc_iot1, &sc->sc_ioh1,
212 	    NULL, NULL, 0)) {
213 		printf(": failed to map BAR1\n");
214 		return;
215 	}
216 
217 	ver = READ_BAR1(sc, VMXNET3_BAR1_VRRS);
218 	if ((ver & 0x1) == 0) {
219 		printf(": unsupported hardware version 0x%x\n", ver);
220 		return;
221 	}
222 	WRITE_BAR1(sc, VMXNET3_BAR1_VRRS, 1);
223 
224 	ver = READ_BAR1(sc, VMXNET3_BAR1_UVRS);
225 	if ((ver & 0x1) == 0) {
226 		printf(": incompatiable UPT version 0x%x\n", ver);
227 		return;
228 	}
229 	WRITE_BAR1(sc, VMXNET3_BAR1_UVRS, 1);
230 
231 	sc->sc_dmat = pa->pa_dmat;
232 	if (vmxnet3_dma_init(sc)) {
233 		printf(": failed to setup DMA\n");
234 		return;
235 	}
236 
237 	if (pci_intr_map(pa, &ih)) {
238 		printf(": failed to map interrupt\n");
239 		return;
240 	}
241 	sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET | IPL_MPSAFE,
242 	    vmxnet3_intr, sc, self->dv_xname);
243 	intrstr = pci_intr_string(pa->pa_pc, ih);
244 	if (intrstr)
245 		printf(": %s", intrstr);
246 
247 	WRITE_CMD(sc, VMXNET3_CMD_GET_MACL);
248 	macl = READ_BAR1(sc, VMXNET3_BAR1_CMD);
249 	enaddr[0] = macl;
250 	enaddr[1] = macl >> 8;
251 	enaddr[2] = macl >> 16;
252 	enaddr[3] = macl >> 24;
253 	WRITE_CMD(sc, VMXNET3_CMD_GET_MACH);
254 	mach = READ_BAR1(sc, VMXNET3_BAR1_CMD);
255 	enaddr[4] = mach;
256 	enaddr[5] = mach >> 8;
257 
258 	WRITE_BAR1(sc, VMXNET3_BAR1_MACL, macl);
259 	WRITE_BAR1(sc, VMXNET3_BAR1_MACH, mach);
260 	printf(", address %s\n", ether_sprintf(enaddr));
261 
262 	bcopy(enaddr, sc->sc_arpcom.ac_enaddr, 6);
263 	strlcpy(ifp->if_xname, self->dv_xname, IFNAMSIZ);
264 	ifp->if_softc = sc;
265 	ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
266 	ifp->if_ioctl = vmxnet3_ioctl;
267 	ifp->if_start = vmxnet3_start;
268 	ifp->if_watchdog = vmxnet3_watchdog;
269 	ifp->if_hardmtu = VMXNET3_MAX_MTU;
270 	ifp->if_capabilities = IFCAP_VLAN_MTU;
271 	if (sc->sc_ds->upt_features & UPT1_F_CSUM)
272 		ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
273 	if (sc->sc_ds->upt_features & UPT1_F_VLAN)
274 		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
275 
276 	IFQ_SET_MAXLEN(&ifp->if_snd, NTXDESC);
277 
278 	ifmedia_init(&sc->sc_media, IFM_IMASK, vmxnet3_media_change,
279 	    vmxnet3_media_status);
280 	ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_AUTO, 0, NULL);
281 	ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_10G_T|IFM_FDX, 0, NULL);
282 	ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_10G_T, 0, NULL);
283 	ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL);
284 	ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_1000_T, 0, NULL);
285 	ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
286 
287 	if_attach(ifp);
288 	ether_ifattach(ifp);
289 	vmxnet3_link_state(sc);
290 }
291 
292 int
293 vmxnet3_dma_init(struct vmxnet3_softc *sc)
294 {
295 	struct vmxnet3_driver_shared *ds;
296 	struct vmxnet3_txq_shared *ts;
297 	struct vmxnet3_rxq_shared *rs;
298 	bus_addr_t ds_pa, qs_pa, mcast_pa;
299 	int i, queue, qs_len;
300 	u_int major, minor, release_code, rev;
301 
302 	qs_len = NTXQUEUE * sizeof *ts + NRXQUEUE * sizeof *rs;
303 	ts = vmxnet3_dma_allocmem(sc, qs_len, VMXNET3_DMADESC_ALIGN, &qs_pa);
304 	if (ts == NULL)
305 		return -1;
306 	for (queue = 0; queue < NTXQUEUE; queue++)
307 		sc->sc_txq[queue].ts = ts++;
308 	rs = (void *)ts;
309 	for (queue = 0; queue < NRXQUEUE; queue++)
310 		sc->sc_rxq[queue].rs = rs++;
311 
312 	for (queue = 0; queue < NTXQUEUE; queue++)
313 		if (vmxnet3_alloc_txring(sc, queue))
314 			return -1;
315 	for (queue = 0; queue < NRXQUEUE; queue++)
316 		if (vmxnet3_alloc_rxring(sc, queue))
317 			return -1;
318 
319 	sc->sc_mcast = vmxnet3_dma_allocmem(sc, 682 * ETHER_ADDR_LEN, 32, &mcast_pa);
320 	if (sc->sc_mcast == NULL)
321 		return -1;
322 
323 	ds = vmxnet3_dma_allocmem(sc, sizeof *sc->sc_ds, 8, &ds_pa);
324 	if (ds == NULL)
325 		return -1;
326 	sc->sc_ds = ds;
327 	ds->magic = VMXNET3_REV1_MAGIC;
328 	ds->version = VMXNET3_DRIVER_VERSION;
329 
330 	/*
331 	 * XXX FreeBSD version uses following values:
332 	 * (Does the device behavior depend on them?)
333 	 *
334 	 * major = __FreeBSD_version / 100000;
335 	 * minor = (__FreeBSD_version / 1000) % 100;
336 	 * release_code = (__FreeBSD_version / 100) % 10;
337 	 * rev = __FreeBSD_version % 100;
338 	 */
339 	major = 0;
340 	minor = 0;
341 	release_code = 0;
342 	rev = 0;
343 #ifdef __LP64__
344 	ds->guest = release_code << 30 | rev << 22 | major << 14 | minor << 6
345 	    | VMXNET3_GOS_FREEBSD | VMXNET3_GOS_64BIT;
346 #else
347 	ds->guest = release_code << 30 | rev << 22 | major << 14 | minor << 6
348 	    | VMXNET3_GOS_FREEBSD | VMXNET3_GOS_32BIT;
349 #endif
350 	ds->vmxnet3_revision = 1;
351 	ds->upt_version = 1;
352 	ds->upt_features = UPT1_F_CSUM | UPT1_F_VLAN;
353 	ds->driver_data = vtophys(sc);
354 	ds->driver_data_len = sizeof(struct vmxnet3_softc);
355 	ds->queue_shared = qs_pa;
356 	ds->queue_shared_len = qs_len;
357 	ds->mtu = VMXNET3_MAX_MTU;
358 	ds->ntxqueue = NTXQUEUE;
359 	ds->nrxqueue = NRXQUEUE;
360 	ds->mcast_table = mcast_pa;
361 	ds->automask = 1;
362 	ds->nintr = VMXNET3_NINTR;
363 	ds->evintr = 0;
364 	ds->ictrl = VMXNET3_ICTRL_DISABLE_ALL;
365 	for (i = 0; i < VMXNET3_NINTR; i++)
366 		ds->modlevel[i] = UPT1_IMOD_ADAPTIVE;
367 	WRITE_BAR1(sc, VMXNET3_BAR1_DSL, ds_pa);
368 	WRITE_BAR1(sc, VMXNET3_BAR1_DSH, (u_int64_t)ds_pa >> 32);
369 	return 0;
370 }
371 
372 int
373 vmxnet3_alloc_txring(struct vmxnet3_softc *sc, int queue)
374 {
375 	struct vmxnet3_txqueue *tq = &sc->sc_txq[queue];
376 	struct vmxnet3_txq_shared *ts;
377 	struct vmxnet3_txring *ring = &tq->cmd_ring;
378 	struct vmxnet3_comp_ring *comp_ring = &tq->comp_ring;
379 	bus_addr_t pa, comp_pa;
380 	int idx;
381 
382 	ring->txd = vmxnet3_dma_allocmem(sc, NTXDESC * sizeof ring->txd[0], 512, &pa);
383 	if (ring->txd == NULL)
384 		return -1;
385 	comp_ring->txcd = vmxnet3_dma_allocmem(sc,
386 	    NTXCOMPDESC * sizeof comp_ring->txcd[0], 512, &comp_pa);
387 	if (comp_ring->txcd == NULL)
388 		return -1;
389 
390 	for (idx = 0; idx < NTXDESC; idx++) {
391 		if (bus_dmamap_create(sc->sc_dmat, JUMBO_LEN, NTXSEGS,
392 		    JUMBO_LEN, 0, BUS_DMA_NOWAIT, &ring->dmap[idx]))
393 			return -1;
394 	}
395 
396 	ts = tq->ts;
397 	bzero(ts, sizeof *ts);
398 	ts->npending = 0;
399 	ts->intr_threshold = 1;
400 	ts->cmd_ring = pa;
401 	ts->cmd_ring_len = NTXDESC;
402 	ts->comp_ring = comp_pa;
403 	ts->comp_ring_len = NTXCOMPDESC;
404 	ts->driver_data = vtophys(tq);
405 	ts->driver_data_len = sizeof *tq;
406 	ts->intr_idx = 0;
407 	ts->stopped = 1;
408 	ts->error = 0;
409 	return 0;
410 }
411 
412 int
413 vmxnet3_alloc_rxring(struct vmxnet3_softc *sc, int queue)
414 {
415 	struct vmxnet3_rxqueue *rq = &sc->sc_rxq[queue];
416 	struct vmxnet3_rxq_shared *rs;
417 	struct vmxnet3_rxring *ring;
418 	struct vmxnet3_comp_ring *comp_ring;
419 	bus_addr_t pa[2], comp_pa;
420 	int i, idx;
421 
422 	for (i = 0; i < 2; i++) {
423 		ring = &rq->cmd_ring[i];
424 		ring->rxd = vmxnet3_dma_allocmem(sc, NRXDESC * sizeof ring->rxd[0],
425 		    512, &pa[i]);
426 		if (ring->rxd == NULL)
427 			return -1;
428 	}
429 	comp_ring = &rq->comp_ring;
430 	comp_ring->rxcd = vmxnet3_dma_allocmem(sc,
431 	    NRXCOMPDESC * sizeof comp_ring->rxcd[0], 512, &comp_pa);
432 	if (comp_ring->rxcd == NULL)
433 		return -1;
434 
435 	for (i = 0; i < 2; i++) {
436 		ring = &rq->cmd_ring[i];
437 		ring->rid = i;
438 		for (idx = 0; idx < NRXDESC; idx++) {
439 			if (bus_dmamap_create(sc->sc_dmat, JUMBO_LEN, 1,
440 			    JUMBO_LEN, 0, BUS_DMA_NOWAIT, &ring->dmap[idx]))
441 				return -1;
442 		}
443 	}
444 
445 	rs = rq->rs;
446 	bzero(rs, sizeof *rs);
447 	rs->cmd_ring[0] = pa[0];
448 	rs->cmd_ring[1] = pa[1];
449 	rs->cmd_ring_len[0] = NRXDESC;
450 	rs->cmd_ring_len[1] = NRXDESC;
451 	rs->comp_ring = comp_pa;
452 	rs->comp_ring_len = NRXCOMPDESC;
453 	rs->driver_data = vtophys(rq);
454 	rs->driver_data_len = sizeof *rq;
455 	rs->intr_idx = 0;
456 	rs->stopped = 1;
457 	rs->error = 0;
458 	return 0;
459 }
460 
461 void
462 vmxnet3_txinit(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *tq)
463 {
464 	struct vmxnet3_txring *ring = &tq->cmd_ring;
465 	struct vmxnet3_comp_ring *comp_ring = &tq->comp_ring;
466 
467 	ring->cons = ring->prod = 0;
468 	ring->free = NTXDESC;
469 	ring->gen = 1;
470 	comp_ring->next = 0;
471 	comp_ring->gen = 1;
472 	bzero(ring->txd, NTXDESC * sizeof ring->txd[0]);
473 	bzero(comp_ring->txcd, NTXCOMPDESC * sizeof comp_ring->txcd[0]);
474 }
475 
476 void
477 vmxnet3_rxinit(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rq)
478 {
479 	struct vmxnet3_rxring *ring;
480 	struct vmxnet3_comp_ring *comp_ring;
481 	int i;
482 	u_int slots;
483 
484 	for (i = 0; i < 2; i++) {
485 		ring = &rq->cmd_ring[i];
486 		ring->fill = 0;
487 		ring->gen = 1;
488 		bzero(ring->rxd, NRXDESC * sizeof ring->rxd[0]);
489 		if_rxr_init(&ring->rxr, 2, NRXDESC - 1);
490 		for (slots = if_rxr_get(&ring->rxr, NRXDESC);
491 		    slots > 0; slots--) {
492 			if (vmxnet3_getbuf(sc, ring))
493 				break;
494 		}
495 		if_rxr_put(&ring->rxr, slots);
496 	}
497 	comp_ring = &rq->comp_ring;
498 	comp_ring->next = 0;
499 	comp_ring->gen = 1;
500 	bzero(comp_ring->rxcd, NRXCOMPDESC * sizeof comp_ring->rxcd[0]);
501 }
502 
503 void
504 vmxnet3_txstop(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *tq)
505 {
506 	struct vmxnet3_txring *ring = &tq->cmd_ring;
507 	int idx;
508 
509 	for (idx = 0; idx < NTXDESC; idx++) {
510 		if (ring->m[idx]) {
511 			bus_dmamap_unload(sc->sc_dmat, ring->dmap[idx]);
512 			m_freem(ring->m[idx]);
513 			ring->m[idx] = NULL;
514 		}
515 	}
516 }
517 
518 void
519 vmxnet3_rxstop(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rq)
520 {
521 	struct vmxnet3_rxring *ring;
522 	int i, idx;
523 
524 	for (i = 0; i < 2; i++) {
525 		ring = &rq->cmd_ring[i];
526 		for (idx = 0; idx < NRXDESC; idx++) {
527 			if (ring->m[idx]) {
528 				m_freem(ring->m[idx]);
529 				ring->m[idx] = NULL;
530 			}
531 		}
532 	}
533 }
534 
535 void
536 vmxnet3_link_state(struct vmxnet3_softc *sc)
537 {
538 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
539 	u_int x, link, speed;
540 
541 	WRITE_CMD(sc, VMXNET3_CMD_GET_LINK);
542 	x = READ_BAR1(sc, VMXNET3_BAR1_CMD);
543 	speed = x >> 16;
544 	if (x & 1) {
545 		ifp->if_baudrate = IF_Mbps(speed);
546 		link = LINK_STATE_UP;
547 	} else
548 		link = LINK_STATE_DOWN;
549 
550 	if (ifp->if_link_state != link) {
551 		ifp->if_link_state = link;
552 		if_link_state_change(ifp);
553 	}
554 }
555 
556 static inline void
557 vmxnet3_enable_intr(struct vmxnet3_softc *sc, int irq)
558 {
559 	WRITE_BAR0(sc, VMXNET3_BAR0_IMASK(irq), 0);
560 }
561 
562 static inline void
563 vmxnet3_disable_intr(struct vmxnet3_softc *sc, int irq)
564 {
565 	WRITE_BAR0(sc, VMXNET3_BAR0_IMASK(irq), 1);
566 }
567 
568 void
569 vmxnet3_enable_all_intrs(struct vmxnet3_softc *sc)
570 {
571 	int i;
572 
573 	sc->sc_ds->ictrl &= ~VMXNET3_ICTRL_DISABLE_ALL;
574 	for (i = 0; i < VMXNET3_NINTR; i++)
575 		vmxnet3_enable_intr(sc, i);
576 }
577 
578 void
579 vmxnet3_disable_all_intrs(struct vmxnet3_softc *sc)
580 {
581 	int i;
582 
583 	sc->sc_ds->ictrl |= VMXNET3_ICTRL_DISABLE_ALL;
584 	for (i = 0; i < VMXNET3_NINTR; i++)
585 		vmxnet3_disable_intr(sc, i);
586 }
587 
588 int
589 vmxnet3_intr(void *arg)
590 {
591 	struct vmxnet3_softc *sc = arg;
592 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
593 
594 	if (READ_BAR1(sc, VMXNET3_BAR1_INTR) == 0)
595 		return 0;
596 
597 	if (sc->sc_ds->event) {
598 		KERNEL_LOCK();
599 		vmxnet3_evintr(sc);
600 		KERNEL_UNLOCK();
601 	}
602 
603 	if (ifp->if_flags & IFF_RUNNING) {
604 		vmxnet3_rxintr(sc, &sc->sc_rxq[0]);
605 		vmxnet3_txintr(sc, &sc->sc_txq[0]);
606 		vmxnet3_enable_intr(sc, 0);
607 	}
608 
609 	return 1;
610 }
611 
612 void
613 vmxnet3_evintr(struct vmxnet3_softc *sc)
614 {
615 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
616 	u_int event = sc->sc_ds->event;
617 	struct vmxnet3_txq_shared *ts;
618 	struct vmxnet3_rxq_shared *rs;
619 
620 	/* Clear events. */
621 	WRITE_BAR1(sc, VMXNET3_BAR1_EVENT, event);
622 
623 	/* Link state change? */
624 	if (event & VMXNET3_EVENT_LINK)
625 		vmxnet3_link_state(sc);
626 
627 	/* Queue error? */
628 	if (event & (VMXNET3_EVENT_TQERROR | VMXNET3_EVENT_RQERROR)) {
629 		WRITE_CMD(sc, VMXNET3_CMD_GET_STATUS);
630 
631 		ts = sc->sc_txq[0].ts;
632 		if (ts->stopped)
633 			printf("%s: TX error 0x%x\n", ifp->if_xname, ts->error);
634 		rs = sc->sc_rxq[0].rs;
635 		if (rs->stopped)
636 			printf("%s: RX error 0x%x\n", ifp->if_xname, rs->error);
637 		vmxnet3_init(sc);
638 	}
639 
640 	if (event & VMXNET3_EVENT_DIC)
641 		printf("%s: device implementation change event\n",
642 		    ifp->if_xname);
643 	if (event & VMXNET3_EVENT_DEBUG)
644 		printf("%s: debug event\n", ifp->if_xname);
645 }
646 
647 void
648 vmxnet3_txintr(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *tq)
649 {
650 	struct vmxnet3_txring *ring = &tq->cmd_ring;
651 	struct vmxnet3_comp_ring *comp_ring = &tq->comp_ring;
652 	struct vmxnet3_txcompdesc *txcd;
653 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
654 	bus_dmamap_t map;
655 	struct mbuf *m;
656 	u_int cons;
657 	u_int free = 0;
658 
659 	cons = ring->cons;
660 
661 	for (;;) {
662 		txcd = &comp_ring->txcd[comp_ring->next];
663 
664 		if (letoh32((txcd->txc_word3 >> VMXNET3_TXC_GEN_S) &
665 		    VMXNET3_TXC_GEN_M) != comp_ring->gen)
666 			break;
667 
668 		comp_ring->next++;
669 		if (comp_ring->next == NTXCOMPDESC) {
670 			comp_ring->next = 0;
671 			comp_ring->gen ^= 1;
672 		}
673 
674 		m = ring->m[cons];
675 		ring->m[cons] = NULL;
676 
677 		KASSERT(m != NULL);
678 
679 		map = ring->dmap[cons];
680 		free += map->dm_nsegs;
681 		bus_dmamap_unload(sc->sc_dmat, map);
682 		m_freem(m);
683 
684 		cons = (letoh32((txcd->txc_word0 >>
685 		    VMXNET3_TXC_EOPIDX_S) & VMXNET3_TXC_EOPIDX_M) + 1)
686 		    % NTXDESC;
687 	}
688 
689 	ring->cons = cons;
690 
691 	if (atomic_add_int_nv(&ring->free, free) == NTXDESC)
692 		ifp->if_timer = 0;
693 
694 	if (ifq_is_oactive(&ifp->if_snd)) {
695 		KERNEL_LOCK();
696 		ifq_clr_oactive(&ifp->if_snd);
697 		vmxnet3_start(ifp);
698 		KERNEL_UNLOCK();
699 	}
700 }
701 
702 void
703 vmxnet3_rxintr(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rq)
704 {
705 	struct vmxnet3_comp_ring *comp_ring = &rq->comp_ring;
706 	struct vmxnet3_rxring *ring;
707 	struct vmxnet3_rxdesc *rxd;
708 	struct vmxnet3_rxcompdesc *rxcd;
709 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
710 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
711 	struct mbuf *m;
712 	int idx, len;
713 	u_int slots;
714 
715 	for (;;) {
716 		rxcd = &comp_ring->rxcd[comp_ring->next];
717 		if (letoh32((rxcd->rxc_word3 >> VMXNET3_RXC_GEN_S) &
718 		    VMXNET3_RXC_GEN_M) != comp_ring->gen)
719 			break;
720 
721 		comp_ring->next++;
722 		if (comp_ring->next == NRXCOMPDESC) {
723 			comp_ring->next = 0;
724 			comp_ring->gen ^= 1;
725 		}
726 
727 		idx = letoh32((rxcd->rxc_word0 >> VMXNET3_RXC_IDX_S) &
728 		    VMXNET3_RXC_IDX_M);
729 		if (letoh32((rxcd->rxc_word0 >> VMXNET3_RXC_QID_S) &
730 		    VMXNET3_RXC_QID_M) < NRXQUEUE)
731 			ring = &rq->cmd_ring[0];
732 		else
733 			ring = &rq->cmd_ring[1];
734 		rxd = &ring->rxd[idx];
735 		len = letoh32((rxcd->rxc_word2 >> VMXNET3_RXC_LEN_S) &
736 		    VMXNET3_RXC_LEN_M);
737 		m = ring->m[idx];
738 		ring->m[idx] = NULL;
739 		if_rxr_put(&ring->rxr, 1);
740 		bus_dmamap_unload(sc->sc_dmat, ring->dmap[idx]);
741 
742 		if (m == NULL)
743 			panic("%s: NULL ring->m[%u]", __func__, idx);
744 
745 		if (letoh32((rxd->rx_word2 >> VMXNET3_RX_BTYPE_S) &
746 		    VMXNET3_RX_BTYPE_M) != VMXNET3_BTYPE_HEAD) {
747 			m_freem(m);
748 			goto skip_buffer;
749 		}
750 		if (letoh32(rxcd->rxc_word2 & VMXNET3_RXC_ERROR)) {
751 			ifp->if_ierrors++;
752 			m_freem(m);
753 			goto skip_buffer;
754 		}
755 		if (len < VMXNET3_MIN_MTU) {
756 			m_freem(m);
757 			goto skip_buffer;
758 		}
759 
760 		vmxnet3_rx_csum(rxcd, m);
761 		m->m_pkthdr.len = m->m_len = len;
762 		if (letoh32(rxcd->rxc_word2 & VMXNET3_RXC_VLAN)) {
763 			m->m_flags |= M_VLANTAG;
764 			m->m_pkthdr.ether_vtag = letoh32((rxcd->rxc_word2 >>
765 			    VMXNET3_RXC_VLANTAG_S) & VMXNET3_RXC_VLANTAG_M);
766 		}
767 
768 		ml_enqueue(&ml, m);
769 
770 skip_buffer:
771 #ifdef VMXNET3_STAT
772 		vmxstat.rxdone = idx;
773 #endif
774 		if (rq->rs->update_rxhead) {
775 			u_int qid = letoh32((rxcd->rxc_word0 >>
776 			    VMXNET3_RXC_QID_S) & VMXNET3_RXC_QID_M);
777 
778 			idx = (idx + 1) % NRXDESC;
779 			if (qid < NRXQUEUE) {
780 				WRITE_BAR0(sc, VMXNET3_BAR0_RXH1(qid), idx);
781 			} else {
782 				qid -= NRXQUEUE;
783 				WRITE_BAR0(sc, VMXNET3_BAR0_RXH2(qid), idx);
784 			}
785 		}
786 	}
787 
788 	if_input(ifp, &ml);
789 
790 	/* XXX Should we (try to) allocate buffers for ring 2 too? */
791 	ring = &rq->cmd_ring[0];
792 	for (slots = if_rxr_get(&ring->rxr, NRXDESC); slots > 0; slots--) {
793 		if (vmxnet3_getbuf(sc, ring))
794 			break;
795 	}
796 	if_rxr_put(&ring->rxr, slots);
797 }
798 
799 void
800 vmxnet3_iff(struct vmxnet3_softc *sc)
801 {
802 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
803 	struct arpcom *ac = &sc->sc_arpcom;
804 	struct vmxnet3_driver_shared *ds = sc->sc_ds;
805 	struct ether_multi *enm;
806 	struct ether_multistep step;
807 	u_int mode;
808 	u_int8_t *p;
809 
810 	ds->mcast_tablelen = 0;
811 	CLR(ifp->if_flags, IFF_ALLMULTI);
812 
813 	/*
814 	 * Always accept broadcast frames.
815 	 * Always accept frames destined to our station address.
816 	 */
817 	mode = VMXNET3_RXMODE_BCAST | VMXNET3_RXMODE_UCAST;
818 
819 	if (ISSET(ifp->if_flags, IFF_PROMISC) || ac->ac_multirangecnt > 0 ||
820 	    ac->ac_multicnt > 682) {
821 		SET(ifp->if_flags, IFF_ALLMULTI);
822 		SET(mode, (VMXNET3_RXMODE_ALLMULTI | VMXNET3_RXMODE_MCAST));
823 		if (ifp->if_flags & IFF_PROMISC)
824 			SET(mode, VMXNET3_RXMODE_PROMISC);
825 	} else {
826 		p = sc->sc_mcast;
827 		ETHER_FIRST_MULTI(step, ac, enm);
828 		while (enm != NULL) {
829 			bcopy(enm->enm_addrlo, p, ETHER_ADDR_LEN);
830 
831 			p += ETHER_ADDR_LEN;
832 
833 			ETHER_NEXT_MULTI(step, enm);
834 		}
835 
836 		if (ac->ac_multicnt > 0) {
837 			SET(mode, VMXNET3_RXMODE_MCAST);
838 			ds->mcast_tablelen = p - sc->sc_mcast;
839 		}
840 	}
841 
842 	WRITE_CMD(sc, VMXNET3_CMD_SET_FILTER);
843 	ds->rxmode = mode;
844 	WRITE_CMD(sc, VMXNET3_CMD_SET_RXMODE);
845 }
846 
847 
848 void
849 vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m)
850 {
851 	if (letoh32(rxcd->rxc_word0 & VMXNET3_RXC_NOCSUM))
852 		return;
853 
854 	if ((rxcd->rxc_word3 & (VMXNET3_RXC_IPV4 | VMXNET3_RXC_IPSUM_OK)) ==
855 	    (VMXNET3_RXC_IPV4 | VMXNET3_RXC_IPSUM_OK))
856 		m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
857 
858 	if (rxcd->rxc_word3 & VMXNET3_RXC_FRAGMENT)
859 		return;
860 
861 	if (rxcd->rxc_word3 & (VMXNET3_RXC_TCP | VMXNET3_RXC_UDP)) {
862 		if (rxcd->rxc_word3 & VMXNET3_RXC_CSUM_OK)
863 			m->m_pkthdr.csum_flags |=
864 			    M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
865 	}
866 }
867 
868 int
869 vmxnet3_getbuf(struct vmxnet3_softc *sc, struct vmxnet3_rxring *ring)
870 {
871 	int idx = ring->fill;
872 	struct vmxnet3_rxdesc *rxd = &ring->rxd[idx];
873 	struct mbuf *m;
874 	int btype;
875 
876 	if (ring->m[idx])
877 		panic("vmxnet3_getbuf: buffer has mbuf");
878 
879 #if 1
880 	/* XXX Don't allocate buffers for ring 2 for now. */
881 	if (ring->rid != 0)
882 		return -1;
883 	btype = VMXNET3_BTYPE_HEAD;
884 #else
885 	if (ring->rid == 0)
886 		btype = VMXNET3_BTYPE_HEAD;
887 	else
888 		btype = VMXNET3_BTYPE_BODY;
889 #endif
890 
891 	m = MCLGETI(NULL, M_DONTWAIT, NULL, JUMBO_LEN);
892 	if (m == NULL)
893 		return -1;
894 
895 	m->m_pkthdr.len = m->m_len = JUMBO_LEN;
896 	m_adj(m, ETHER_ALIGN);
897 	ring->m[idx] = m;
898 
899 	if (bus_dmamap_load_mbuf(sc->sc_dmat, ring->dmap[idx], m,
900 	    BUS_DMA_NOWAIT))
901 		panic("load mbuf");
902 	rxd->rx_addr = htole64(DMAADDR(ring->dmap[idx]));
903 	rxd->rx_word2 = htole32(((m->m_pkthdr.len & VMXNET3_RX_LEN_M) <<
904 	    VMXNET3_RX_LEN_S) | ((btype & VMXNET3_RX_BTYPE_M) <<
905 	    VMXNET3_RX_BTYPE_S) | ((ring->gen & VMXNET3_RX_GEN_M) <<
906 	    VMXNET3_RX_GEN_S));
907 	idx++;
908 	if (idx == NRXDESC) {
909 		idx = 0;
910 		ring->gen ^= 1;
911 	}
912 	ring->fill = idx;
913 #ifdef VMXNET3_STAT
914 	vmxstat.rxfill = ring->fill;
915 #endif
916 	return 0;
917 }
918 
919 void
920 vmxnet3_stop(struct ifnet *ifp)
921 {
922 	struct vmxnet3_softc *sc = ifp->if_softc;
923 	int queue;
924 
925 	ifp->if_flags &= ~IFF_RUNNING;
926 	ifq_clr_oactive(&ifp->if_snd);
927 	ifp->if_timer = 0;
928 
929 	vmxnet3_disable_all_intrs(sc);
930 
931 	WRITE_CMD(sc, VMXNET3_CMD_DISABLE);
932 
933 	intr_barrier(sc->sc_ih);
934 
935 	for (queue = 0; queue < NTXQUEUE; queue++)
936 		vmxnet3_txstop(sc, &sc->sc_txq[queue]);
937 	for (queue = 0; queue < NRXQUEUE; queue++)
938 		vmxnet3_rxstop(sc, &sc->sc_rxq[queue]);
939 }
940 
941 void
942 vmxnet3_reset(struct vmxnet3_softc *sc)
943 {
944 	WRITE_CMD(sc, VMXNET3_CMD_RESET);
945 }
946 
947 int
948 vmxnet3_init(struct vmxnet3_softc *sc)
949 {
950 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
951 	int queue;
952 
953 	/*
954 	 * Cancel pending I/O and free all RX/TX buffers.
955 	 */
956 	vmxnet3_stop(ifp);
957 
958 #if 0
959 	/* Put controller into known state. */
960 	vmxnet3_reset(sc);
961 #endif
962 
963 	for (queue = 0; queue < NTXQUEUE; queue++)
964 		vmxnet3_txinit(sc, &sc->sc_txq[queue]);
965 	for (queue = 0; queue < NRXQUEUE; queue++)
966 		vmxnet3_rxinit(sc, &sc->sc_rxq[queue]);
967 
968 	for (queue = 0; queue < NRXQUEUE; queue++) {
969 		WRITE_BAR0(sc, VMXNET3_BAR0_RXH1(queue), 0);
970 		WRITE_BAR0(sc, VMXNET3_BAR0_RXH2(queue), 0);
971 	}
972 
973 	WRITE_CMD(sc, VMXNET3_CMD_ENABLE);
974 	if (READ_BAR1(sc, VMXNET3_BAR1_CMD)) {
975 		printf("%s: failed to initialize\n", ifp->if_xname);
976 		vmxnet3_stop(ifp);
977 		return EIO;
978 	}
979 
980 	/* Program promiscuous mode and multicast filters. */
981 	vmxnet3_iff(sc);
982 
983 	vmxnet3_enable_all_intrs(sc);
984 
985 	vmxnet3_link_state(sc);
986 
987 	ifp->if_flags |= IFF_RUNNING;
988 	ifq_clr_oactive(&ifp->if_snd);
989 
990 	return 0;
991 }
992 
993 int
994 vmxnet3_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
995 {
996 	struct vmxnet3_softc *sc = ifp->if_softc;
997 	struct ifreq *ifr = (struct ifreq *)data;
998 	int error = 0, s;
999 
1000 	s = splnet();
1001 
1002 	switch (cmd) {
1003 	case SIOCSIFADDR:
1004 		ifp->if_flags |= IFF_UP;
1005 		if ((ifp->if_flags & IFF_RUNNING) == 0)
1006 			error = vmxnet3_init(sc);
1007 		break;
1008 	case SIOCSIFFLAGS:
1009 		if (ifp->if_flags & IFF_UP) {
1010 			if (ifp->if_flags & IFF_RUNNING)
1011 				error = ENETRESET;
1012 			else
1013 				error = vmxnet3_init(sc);
1014 		} else {
1015 			if (ifp->if_flags & IFF_RUNNING)
1016 				vmxnet3_stop(ifp);
1017 		}
1018 		break;
1019 	case SIOCSIFMEDIA:
1020 	case SIOCGIFMEDIA:
1021 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1022 		break;
1023 	case SIOCGIFRXR:
1024 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
1025 		    NULL, JUMBO_LEN, &sc->sc_rxq[0].cmd_ring[0].rxr);
1026 		break;
1027 	default:
1028 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1029 	}
1030 
1031 	if (error == ENETRESET) {
1032 		if (ifp->if_flags & IFF_RUNNING)
1033 			vmxnet3_iff(sc);
1034 		error = 0;
1035 	}
1036 
1037 	splx(s);
1038 	return error;
1039 }
1040 
1041 void
1042 vmxnet3_start(struct ifnet *ifp)
1043 {
1044 	struct vmxnet3_softc *sc = ifp->if_softc;
1045 	struct vmxnet3_txqueue *tq = sc->sc_txq;
1046 	struct vmxnet3_txring *ring = &tq->cmd_ring;
1047 	struct vmxnet3_txdesc *txd;
1048 	struct mbuf *m;
1049 	u_int free, used;
1050 	int n;
1051 
1052 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
1053 		return;
1054 
1055 	free = ring->free;
1056 	used = 0;
1057 
1058 	for (;;) {
1059 		if (used + NTXSEGS > free) {
1060 			ifq_set_oactive(&ifp->if_snd);
1061 			break;
1062 		}
1063 
1064 		IFQ_DEQUEUE(&ifp->if_snd, m);
1065 		if (m == NULL)
1066 			break;
1067 
1068 		txd = &ring->txd[ring->prod];
1069 
1070 		n = vmxnet3_load_mbuf(sc, ring, &m);
1071 		if (n == -1) {
1072 			ifp->if_oerrors++;
1073 			continue;
1074 		}
1075 
1076 #if NBPFILTER > 0
1077 		if (ifp->if_bpf)
1078 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1079 #endif
1080 
1081 		/* Change the ownership by flipping the "generation" bit */
1082 		txd->tx_word2 ^= htole32(VMXNET3_TX_GEN_M << VMXNET3_TX_GEN_S);
1083 
1084 		ifp->if_opackets++;
1085 		used += n;
1086 	}
1087 
1088 	if (used > 0) {
1089 		ifp->if_timer = 5;
1090 		atomic_sub_int(&ring->free, used);
1091 		WRITE_BAR0(sc, VMXNET3_BAR0_TXH(0), ring->prod);
1092 	}
1093 }
1094 
1095 int
1096 vmxnet3_load_mbuf(struct vmxnet3_softc *sc, struct vmxnet3_txring *ring,
1097     struct mbuf **mp)
1098 {
1099 	struct vmxnet3_txdesc *txd, *sop;
1100 	struct mbuf *n, *m = *mp;
1101 	bus_dmamap_t map;
1102 	u_int hlen = ETHER_HDR_LEN, csum_off;
1103 	u_int prod;
1104 	int gen, i;
1105 
1106 	prod = ring->prod;
1107 	map = ring->dmap[prod];
1108 #if 0
1109 	if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) {
1110 		printf("%s: IP checksum offloading is not supported\n",
1111 		    sc->sc_dev.dv_xname);
1112 		return -1;
1113 	}
1114 #endif
1115 	if (m->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT|M_UDP_CSUM_OUT)) {
1116 		struct ip *ip;
1117 		int offp;
1118 
1119 		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
1120 			csum_off = offsetof(struct tcphdr, th_sum);
1121 		else
1122 			csum_off = offsetof(struct udphdr, uh_sum);
1123 
1124 		n = m_pulldown(m, hlen, sizeof(*ip), &offp);
1125 		if (n == NULL)
1126 			return (-1);
1127 
1128 		ip = (struct ip *)(n->m_data + offp);
1129 		hlen += ip->ip_hl << 2;
1130 
1131 		*mp = m_pullup(m, hlen + csum_off + 2);
1132 		if (*mp == NULL)
1133 			return (-1);
1134 		m = *mp;
1135 	}
1136 
1137 	switch (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT)) {
1138 	case 0:
1139 		break;
1140 	case EFBIG:
1141 		if (m_defrag(m, M_DONTWAIT) == 0 &&
1142 		    bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1143 		     BUS_DMA_NOWAIT) == 0)
1144 			break;
1145 
1146 		/* FALLTHROUGH */
1147 	default:
1148 		m_freem(m);
1149 		return -1;
1150 	}
1151 
1152 	ring->m[prod] = m;
1153 
1154 	sop = &ring->txd[prod];
1155 	gen = ring->gen ^ 1;		/* owned by cpu (yet) */
1156 
1157 	for (i = 0; i < map->dm_nsegs; i++) {
1158 		txd = &ring->txd[prod];
1159 		txd->tx_addr = htole64(map->dm_segs[i].ds_addr);
1160 		txd->tx_word2 = htole32(((map->dm_segs[i].ds_len &
1161 		    VMXNET3_TX_LEN_M) << VMXNET3_TX_LEN_S) |
1162 		    ((gen & VMXNET3_TX_GEN_M) << VMXNET3_TX_GEN_S));
1163 		txd->tx_word3 = 0;
1164 
1165 		if (++prod == NTXDESC) {
1166 			prod = 0;
1167 			ring->gen ^= 1;
1168 		}
1169 
1170 		gen = ring->gen;
1171 	}
1172 	txd->tx_word3 |= htole32(VMXNET3_TX_EOP | VMXNET3_TX_COMPREQ);
1173 
1174 	if (m->m_flags & M_VLANTAG) {
1175 		sop->tx_word3 |= htole32(VMXNET3_TX_VTAG_MODE);
1176 		sop->tx_word3 |= htole32((m->m_pkthdr.ether_vtag &
1177 		    VMXNET3_TX_VLANTAG_M) << VMXNET3_TX_VLANTAG_S);
1178 	}
1179 	if (m->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT|M_UDP_CSUM_OUT)) {
1180 		sop->tx_word2 |= htole32(((hlen + csum_off) &
1181 		    VMXNET3_TX_OP_M) << VMXNET3_TX_OP_S);
1182 		sop->tx_word3 |= htole32(((hlen & VMXNET3_TX_HLEN_M) <<
1183 		    VMXNET3_TX_HLEN_S) | (VMXNET3_OM_CSUM << VMXNET3_TX_OM_S));
1184 	}
1185 
1186 	/* dmamap_sync map */
1187 
1188 	ring->prod = prod;
1189 
1190 	return (map->dm_nsegs);
1191 }
1192 
1193 void
1194 vmxnet3_watchdog(struct ifnet *ifp)
1195 {
1196 	struct vmxnet3_softc *sc = ifp->if_softc;
1197 	int s;
1198 
1199 	printf("%s: device timeout\n", ifp->if_xname);
1200 	s = splnet();
1201 	vmxnet3_init(sc);
1202 	splx(s);
1203 }
1204 
1205 void
1206 vmxnet3_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1207 {
1208 	struct vmxnet3_softc *sc = ifp->if_softc;
1209 
1210 	vmxnet3_link_state(sc);
1211 
1212 	ifmr->ifm_status = IFM_AVALID;
1213 	ifmr->ifm_active = IFM_ETHER;
1214 
1215 	if (ifp->if_link_state != LINK_STATE_UP)
1216 		return;
1217 
1218 	ifmr->ifm_status |= IFM_ACTIVE;
1219 
1220 	if (ifp->if_baudrate >= IF_Gbps(10))
1221 		ifmr->ifm_active |= IFM_10G_T;
1222 }
1223 
1224 int
1225 vmxnet3_media_change(struct ifnet *ifp)
1226 {
1227 	return 0;
1228 }
1229 
1230 void *
1231 vmxnet3_dma_allocmem(struct vmxnet3_softc *sc, u_int size, u_int align, bus_addr_t *pa)
1232 {
1233 	bus_dma_tag_t t = sc->sc_dmat;
1234 	bus_dma_segment_t segs[1];
1235 	bus_dmamap_t map;
1236 	caddr_t va;
1237 	int n;
1238 
1239 	if (bus_dmamem_alloc(t, size, align, 0, segs, 1, &n, BUS_DMA_NOWAIT))
1240 		return NULL;
1241 	if (bus_dmamem_map(t, segs, 1, size, &va, BUS_DMA_NOWAIT))
1242 		return NULL;
1243 	if (bus_dmamap_create(t, size, 1, size, 0, BUS_DMA_NOWAIT, &map))
1244 		return NULL;
1245 	if (bus_dmamap_load(t, map, va, size, NULL, BUS_DMA_NOWAIT))
1246 		return NULL;
1247 	bzero(va, size);
1248 	*pa = DMAADDR(map);
1249 	bus_dmamap_unload(t, map);
1250 	bus_dmamap_destroy(t, map);
1251 	return va;
1252 }
1253