xref: /openbsd/sys/dev/pci/if_vic.c (revision 76d0caae)
1 /*	$OpenBSD: if_vic.c,v 1.102 2020/12/12 11:48:53 jan Exp $	*/
2 
3 /*
4  * Copyright (c) 2006 Reyk Floeter <reyk@openbsd.org>
5  * Copyright (c) 2006 David Gwynne <dlg@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*
21  * Driver for the VMware Virtual NIC ("vmxnet")
22  */
23 
24 #include "bpfilter.h"
25 
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/sockio.h>
29 #include <sys/mbuf.h>
30 #include <sys/kernel.h>
31 #include <sys/socket.h>
32 #include <sys/malloc.h>
33 #include <sys/timeout.h>
34 #include <sys/device.h>
35 
36 #include <machine/bus.h>
37 #include <machine/intr.h>
38 
39 #include <net/if.h>
40 #include <net/if_media.h>
41 
42 #if NBPFILTER > 0
43 #include <net/bpf.h>
44 #endif
45 
46 #include <netinet/in.h>
47 #include <netinet/if_ether.h>
48 
49 #include <dev/pci/pcireg.h>
50 #include <dev/pci/pcivar.h>
51 #include <dev/pci/pcidevs.h>
52 
53 #define VIC_PCI_BAR		PCI_MAPREG_START /* Base Address Register */
54 
55 #define VIC_LANCE_SIZE		0x20
56 #define VIC_MORPH_SIZE		0x04
57 #define  VIC_MORPH_MASK			0xffff
58 #define  VIC_MORPH_LANCE		0x2934
59 #define  VIC_MORPH_VMXNET		0x4392
60 #define VIC_VMXNET_SIZE		0x40
61 #define VIC_LANCE_MINLEN	(VIC_LANCE_SIZE + VIC_MORPH_SIZE + \
62 				    VIC_VMXNET_SIZE)
63 
64 #define VIC_MAGIC		0xbabe864f
65 
66 /* Register address offsets */
67 #define VIC_DATA_ADDR		0x0000		/* Shared data address */
68 #define VIC_DATA_LENGTH		0x0004		/* Shared data length */
69 #define VIC_Tx_ADDR		0x0008		/* Tx pointer address */
70 
71 /* Command register */
72 #define VIC_CMD			0x000c		/* Command register */
73 #define  VIC_CMD_INTR_ACK	0x0001	/* Acknowledge interrupt */
74 #define  VIC_CMD_MCASTFIL	0x0002	/* Multicast address filter */
75 #define   VIC_CMD_MCASTFIL_LENGTH	2
76 #define  VIC_CMD_IFF		0x0004	/* Interface flags */
77 #define   VIC_CMD_IFF_PROMISC	0x0001		/* Promiscous enabled */
78 #define   VIC_CMD_IFF_BROADCAST	0x0002		/* Broadcast enabled */
79 #define   VIC_CMD_IFF_MULTICAST	0x0004		/* Multicast enabled */
80 #define  VIC_CMD_INTR_DISABLE	0x0020	/* Disable interrupts */
81 #define  VIC_CMD_INTR_ENABLE	0x0040	/* Enable interrupts */
82 #define  VIC_CMD_Tx_DONE	0x0100	/* Tx done register */
83 #define  VIC_CMD_NUM_Rx_BUF	0x0200	/* Number of Rx buffers */
84 #define  VIC_CMD_NUM_Tx_BUF	0x0400	/* Number of Tx buffers */
85 #define  VIC_CMD_NUM_PINNED_BUF	0x0800	/* Number of pinned buffers */
86 #define  VIC_CMD_HWCAP		0x1000	/* Capability register */
87 #define   VIC_CMD_HWCAP_SG		(1<<0) /* Scatter-gather transmits */
88 #define   VIC_CMD_HWCAP_CSUM_IPv4	(1<<1) /* TCP/UDP cksum */
89 #define   VIC_CMD_HWCAP_CSUM_ALL	(1<<3) /* Hardware cksum */
90 #define   VIC_CMD_HWCAP_CSUM \
91 	(VIC_CMD_HWCAP_CSUM_IPv4 | VIC_CMD_HWCAP_CSUM_ALL)
92 #define   VIC_CMD_HWCAP_DMA_HIGH		(1<<4) /* High DMA mapping */
93 #define   VIC_CMD_HWCAP_TOE		(1<<5) /* TCP offload engine */
94 #define   VIC_CMD_HWCAP_TSO		(1<<6) /* TCP segmentation offload */
95 #define   VIC_CMD_HWCAP_TSO_SW		(1<<7) /* Software TCP segmentation */
96 #define   VIC_CMD_HWCAP_VPROM		(1<<8) /* Virtual PROM available */
97 #define   VIC_CMD_HWCAP_VLAN_Tx		(1<<9) /* Hardware VLAN MTU Rx */
98 #define   VIC_CMD_HWCAP_VLAN_Rx		(1<<10) /* Hardware VLAN MTU Tx */
99 #define   VIC_CMD_HWCAP_VLAN_SW		(1<<11)	/* Software VLAN MTU */
100 #define   VIC_CMD_HWCAP_VLAN \
101 	(VIC_CMD_HWCAP_VLAN_Tx | VIC_CMD_HWCAP_VLAN_Rx | \
102 	VIC_CMD_HWCAP_VLAN_SW)
103 #define  VIC_CMD_HWCAP_BITS \
104 	"\20\01SG\02CSUM4\03CSUM\04HDMA\05TOE\06TSO" \
105 	"\07TSOSW\10VPROM\13VLANTx\14VLANRx\15VLANSW"
106 #define  VIC_CMD_FEATURE	0x2000	/* Additional feature register */
107 #define   VIC_CMD_FEATURE_0_Tx		(1<<0)
108 #define   VIC_CMD_FEATURE_TSO		(1<<1)
109 
110 #define VIC_LLADDR		0x0010		/* MAC address register */
111 #define VIC_VERSION_MINOR	0x0018		/* Minor version register */
112 #define VIC_VERSION_MAJOR	0x001c		/* Major version register */
113 #define VIC_VERSION_MAJOR_M	0xffff0000
114 
115 /* Status register */
116 #define VIC_STATUS		0x0020
117 #define  VIC_STATUS_CONNECTED		(1<<0)
118 #define  VIC_STATUS_ENABLED		(1<<1)
119 
120 #define VIC_TOE_ADDR		0x0024		/* TCP offload address */
121 
122 /* Virtual PROM address */
123 #define VIC_VPROM		0x0028
124 #define VIC_VPROM_LENGTH	6
125 
126 /* Shared DMA data structures */
127 
128 struct vic_sg {
129 	u_int32_t	sg_addr_low;
130 	u_int16_t	sg_addr_high;
131 	u_int16_t	sg_length;
132 } __packed;
133 
134 #define VIC_SG_MAX		6
135 #define VIC_SG_ADDR_MACH	0
136 #define VIC_SG_ADDR_PHYS	1
137 #define VIC_SG_ADDR_VIRT	3
138 
139 struct vic_sgarray {
140 	u_int16_t	sa_addr_type;
141 	u_int16_t	sa_length;
142 	struct vic_sg	sa_sg[VIC_SG_MAX];
143 } __packed;
144 
145 struct vic_rxdesc {
146 	u_int64_t	rx_physaddr;
147 	u_int32_t	rx_buflength;
148 	u_int32_t	rx_length;
149 	u_int16_t	rx_owner;
150 	u_int16_t	rx_flags;
151 	u_int32_t	rx_priv;
152 } __packed;
153 
154 #define VIC_RX_FLAGS_CSUMHW_OK	0x0001
155 
156 struct vic_txdesc {
157 	u_int16_t		tx_flags;
158 	u_int16_t		tx_owner;
159 	u_int32_t		tx_priv;
160 	u_int32_t		tx_tsomss;
161 	struct vic_sgarray	tx_sa;
162 } __packed;
163 
164 #define VIC_TX_FLAGS_KEEP	0x0001
165 #define VIC_TX_FLAGS_TXURN	0x0002
166 #define VIC_TX_FLAGS_CSUMHW	0x0004
167 #define VIC_TX_FLAGS_TSO	0x0008
168 #define VIC_TX_FLAGS_PINNED	0x0010
169 #define VIC_TX_FLAGS_QRETRY	0x1000
170 
171 struct vic_stats {
172 	u_int32_t		vs_tx_count;
173 	u_int32_t		vs_tx_packets;
174 	u_int32_t		vs_tx_0copy;
175 	u_int32_t		vs_tx_copy;
176 	u_int32_t		vs_tx_maxpending;
177 	u_int32_t		vs_tx_stopped;
178 	u_int32_t		vs_tx_overrun;
179 	u_int32_t		vs_intr;
180 	u_int32_t		vs_rx_packets;
181 	u_int32_t		vs_rx_underrun;
182 } __packed;
183 
184 #define VIC_NRXRINGS		2
185 
186 struct vic_data {
187 	u_int32_t		vd_magic;
188 
189 	struct {
190 		u_int32_t		length;
191 		u_int32_t		nextidx;
192 	}			vd_rx[VIC_NRXRINGS];
193 
194 	u_int32_t		vd_irq;
195 	u_int32_t		vd_iff;
196 
197 	u_int32_t		vd_mcastfil[VIC_CMD_MCASTFIL_LENGTH];
198 
199 	u_int32_t		vd_reserved1[1];
200 
201 	u_int32_t		vd_tx_length;
202 	u_int32_t		vd_tx_curidx;
203 	u_int32_t		vd_tx_nextidx;
204 	u_int32_t		vd_tx_stopped;
205 	u_int32_t		vd_tx_triggerlvl;
206 	u_int32_t		vd_tx_queued;
207 	u_int32_t		vd_tx_minlength;
208 
209 	u_int32_t		vd_reserved2[6];
210 
211 	u_int32_t		vd_rx_saved_nextidx[VIC_NRXRINGS];
212 	u_int32_t		vd_tx_saved_nextidx;
213 
214 	u_int32_t		vd_length;
215 	u_int32_t		vd_rx_offset[VIC_NRXRINGS];
216 	u_int32_t		vd_tx_offset;
217 	u_int32_t		vd_debug;
218 	u_int32_t		vd_tx_physaddr;
219 	u_int32_t		vd_tx_physaddr_length;
220 	u_int32_t		vd_tx_maxlength;
221 
222 	struct vic_stats	vd_stats;
223 } __packed;
224 
225 #define VIC_OWNER_DRIVER	0
226 #define VIC_OWNER_DRIVER_PEND	1
227 #define VIC_OWNER_NIC		2
228 #define VIC_OWNER_NIC_PEND	3
229 
230 #define VIC_JUMBO_FRAMELEN	9018
231 #define VIC_JUMBO_MTU		(VIC_JUMBO_FRAMELEN - ETHER_HDR_LEN - ETHER_CRC_LEN)
232 
233 #define VIC_NBUF		100
234 #define VIC_NBUF_MAX		128
235 #define VIC_MAX_SCATTER		1	/* 8? */
236 #define VIC_QUEUE_SIZE		VIC_NBUF_MAX
237 #define VIC_INC(_x, _y)		(_x) = ((_x) + 1) % (_y)
238 #define VIC_TX_TIMEOUT		5
239 
240 #define VIC_MIN_FRAMELEN	(ETHER_MIN_LEN - ETHER_CRC_LEN)
241 
242 #define VIC_TXURN_WARN(_sc)	((_sc)->sc_txpending >= ((_sc)->sc_ntxbuf - 5))
243 #define VIC_TXURN(_sc)		((_sc)->sc_txpending >= (_sc)->sc_ntxbuf)
244 
245 struct vic_rxbuf {
246 	bus_dmamap_t		rxb_dmamap;
247 	struct mbuf		*rxb_m;
248 };
249 
250 struct vic_txbuf {
251 	bus_dmamap_t		txb_dmamap;
252 	struct mbuf		*txb_m;
253 };
254 
255 struct vic_softc {
256 	struct device		sc_dev;
257 
258 	pci_chipset_tag_t	sc_pc;
259 	pcitag_t		sc_tag;
260 
261 	bus_space_tag_t		sc_iot;
262 	bus_space_handle_t	sc_ioh;
263 	bus_size_t		sc_ios;
264 	bus_dma_tag_t		sc_dmat;
265 
266 	void			*sc_ih;
267 
268 	struct timeout		sc_tick;
269 
270 	struct arpcom		sc_ac;
271 	struct ifmedia		sc_media;
272 
273 	u_int32_t		sc_nrxbuf;
274 	u_int32_t		sc_ntxbuf;
275 	u_int32_t		sc_cap;
276 	u_int32_t		sc_feature;
277 	u_int8_t		sc_lladdr[ETHER_ADDR_LEN];
278 
279 	bus_dmamap_t		sc_dma_map;
280 	bus_dma_segment_t	sc_dma_seg;
281 	size_t			sc_dma_size;
282 	caddr_t			sc_dma_kva;
283 #define VIC_DMA_DVA(_sc)	((_sc)->sc_dma_map->dm_segs[0].ds_addr)
284 #define VIC_DMA_KVA(_sc)	((void *)(_sc)->sc_dma_kva)
285 
286 	struct vic_data		*sc_data;
287 
288 	struct {
289 		struct if_rxring	ring;
290 		struct vic_rxbuf	*bufs;
291 		struct vic_rxdesc	*slots;
292 		int			end;
293 		u_int			pktlen;
294 	}			sc_rxq[VIC_NRXRINGS];
295 
296 	struct vic_txbuf	*sc_txbuf;
297 	struct vic_txdesc	*sc_txq;
298 	volatile u_int		sc_txpending;
299 };
300 
301 struct cfdriver vic_cd = {
302 	NULL, "vic", DV_IFNET
303 };
304 
305 int		vic_match(struct device *, void *, void *);
306 void		vic_attach(struct device *, struct device *, void *);
307 
308 struct cfattach vic_ca = {
309 	sizeof(struct vic_softc), vic_match, vic_attach
310 };
311 
312 int		vic_intr(void *);
313 
314 int		vic_query(struct vic_softc *);
315 int		vic_alloc_data(struct vic_softc *);
316 int		vic_init_data(struct vic_softc *sc);
317 int		vic_uninit_data(struct vic_softc *sc);
318 
319 u_int32_t	vic_read(struct vic_softc *, bus_size_t);
320 void		vic_write(struct vic_softc *, bus_size_t, u_int32_t);
321 
322 u_int32_t	vic_read_cmd(struct vic_softc *, u_int32_t);
323 
324 int		vic_alloc_dmamem(struct vic_softc *);
325 void		vic_free_dmamem(struct vic_softc *);
326 
327 void		vic_link_state(struct vic_softc *);
328 void		vic_rx_fill(struct vic_softc *, int);
329 void		vic_rx_proc(struct vic_softc *, int);
330 void		vic_tx_proc(struct vic_softc *);
331 void		vic_iff(struct vic_softc *);
332 void		vic_getlladdr(struct vic_softc *);
333 void		vic_setlladdr(struct vic_softc *);
334 int		vic_media_change(struct ifnet *);
335 void		vic_media_status(struct ifnet *, struct ifmediareq *);
336 void		vic_start(struct ifnet *);
337 int		vic_load_txb(struct vic_softc *, struct vic_txbuf *,
338 		    struct mbuf *);
339 void		vic_watchdog(struct ifnet *);
340 int		vic_ioctl(struct ifnet *, u_long, caddr_t);
341 int		vic_rxrinfo(struct vic_softc *, struct if_rxrinfo *);
342 void		vic_init(struct ifnet *);
343 void		vic_stop(struct ifnet *);
344 void		vic_tick(void *);
345 
346 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
347 
348 struct mbuf *vic_alloc_mbuf(struct vic_softc *, bus_dmamap_t, u_int);
349 
350 const struct pci_matchid vic_devices[] = {
351 	{ PCI_VENDOR_VMWARE, PCI_PRODUCT_VMWARE_NET }
352 };
353 
354 int
355 vic_match(struct device *parent, void *match, void *aux)
356 {
357 	struct pci_attach_args		*pa = aux;
358 	pcireg_t			memtype;
359 	bus_size_t			pcisize;
360 	bus_addr_t			pciaddr;
361 
362 	switch (pa->pa_id) {
363 	case PCI_ID_CODE(PCI_VENDOR_VMWARE, PCI_PRODUCT_VMWARE_NET):
364 		return (1);
365 
366 	case PCI_ID_CODE(PCI_VENDOR_AMD, PCI_PRODUCT_AMD_PCNET_PCI):
367 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, VIC_PCI_BAR);
368 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, VIC_PCI_BAR,
369 		    memtype, &pciaddr, &pcisize, NULL) != 0)
370 			break;
371 
372 		if (pcisize > VIC_LANCE_MINLEN)
373 			return (2);
374 
375 		break;
376 	}
377 
378 	return (0);
379 }
380 
381 void
382 vic_attach(struct device *parent, struct device *self, void *aux)
383 {
384 	struct vic_softc		*sc = (struct vic_softc *)self;
385 	struct pci_attach_args		*pa = aux;
386 	bus_space_handle_t		ioh;
387 	pcireg_t			r;
388 	pci_intr_handle_t		ih;
389 	struct ifnet			*ifp;
390 
391 	sc->sc_pc = pa->pa_pc;
392 	sc->sc_tag = pa->pa_tag;
393 	sc->sc_dmat = pa->pa_dmat;
394 
395 	r = pci_mapreg_type(sc->sc_pc, sc->sc_tag, VIC_PCI_BAR);
396 	if (pci_mapreg_map(pa, VIC_PCI_BAR, r, 0, &sc->sc_iot,
397 	    &ioh, NULL, &sc->sc_ios, 0) != 0) {
398 		printf(": unable to map system interface register\n");
399 		return;
400 	}
401 
402 	switch (pa->pa_id) {
403 	case PCI_ID_CODE(PCI_VENDOR_VMWARE, PCI_PRODUCT_VMWARE_NET):
404 		if (bus_space_subregion(sc->sc_iot, ioh, 0, sc->sc_ios,
405 		    &sc->sc_ioh) != 0) {
406 			printf(": unable to map register window\n");
407 			goto unmap;
408 		}
409 		break;
410 
411 	case PCI_ID_CODE(PCI_VENDOR_AMD, PCI_PRODUCT_AMD_PCNET_PCI):
412 		if (bus_space_subregion(sc->sc_iot, ioh,
413 		    VIC_LANCE_SIZE + VIC_MORPH_SIZE, VIC_VMXNET_SIZE,
414 		    &sc->sc_ioh) != 0) {
415 			printf(": unable to map register window\n");
416 			goto unmap;
417 		}
418 
419 		bus_space_barrier(sc->sc_iot, ioh, VIC_LANCE_SIZE, 4,
420 		    BUS_SPACE_BARRIER_READ);
421 		r = bus_space_read_4(sc->sc_iot, ioh, VIC_LANCE_SIZE);
422 
423 		if ((r & VIC_MORPH_MASK) == VIC_MORPH_VMXNET)
424 			break;
425 		if ((r & VIC_MORPH_MASK) != VIC_MORPH_LANCE) {
426 			printf(": unexpect morph value (0x%08x)\n", r);
427 			goto unmap;
428 		}
429 
430 		r &= ~VIC_MORPH_MASK;
431 		r |= VIC_MORPH_VMXNET;
432 
433 		bus_space_write_4(sc->sc_iot, ioh, VIC_LANCE_SIZE, r);
434 		bus_space_barrier(sc->sc_iot, ioh, VIC_LANCE_SIZE, 4,
435 		    BUS_SPACE_BARRIER_WRITE);
436 
437 		bus_space_barrier(sc->sc_iot, ioh, VIC_LANCE_SIZE, 4,
438 		    BUS_SPACE_BARRIER_READ);
439 		r = bus_space_read_4(sc->sc_iot, ioh, VIC_LANCE_SIZE);
440 
441 		if ((r & VIC_MORPH_MASK) != VIC_MORPH_VMXNET) {
442 			printf(": unable to morph vlance chip\n");
443 			goto unmap;
444 		}
445 
446 		break;
447 	}
448 
449 	if (pci_intr_map(pa, &ih) != 0) {
450 		printf(": unable to map interrupt\n");
451 		goto unmap;
452 	}
453 
454 	sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET,
455 	    vic_intr, sc, DEVNAME(sc));
456 	if (sc->sc_ih == NULL) {
457 		printf(": unable to establish interrupt\n");
458 		goto unmap;
459 	}
460 
461 	if (vic_query(sc) != 0) {
462 		/* error printed by vic_query */
463 		goto unmap;
464 	}
465 
466 	if (vic_alloc_data(sc) != 0) {
467 		/* error printed by vic_alloc */
468 		goto unmap;
469 	}
470 
471 	timeout_set(&sc->sc_tick, vic_tick, sc);
472 
473 	bcopy(sc->sc_lladdr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
474 
475 	ifp = &sc->sc_ac.ac_if;
476 	ifp->if_softc = sc;
477 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
478 	ifp->if_ioctl = vic_ioctl;
479 	ifp->if_start = vic_start;
480 	ifp->if_watchdog = vic_watchdog;
481 	ifp->if_hardmtu = VIC_JUMBO_MTU;
482 	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
483 	ifq_set_maxlen(&ifp->if_snd, sc->sc_ntxbuf - 1);
484 
485 	ifp->if_capabilities = IFCAP_VLAN_MTU;
486 
487 #if 0
488 	/* XXX interface capabilities */
489 	if (sc->sc_cap & VIC_CMD_HWCAP_VLAN)
490 		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
491 	if (sc->sc_cap & VIC_CMD_HWCAP_CSUM)
492 		ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
493 		    IFCAP_CSUM_UDPv4;
494 #endif
495 
496 	ifmedia_init(&sc->sc_media, 0, vic_media_change, vic_media_status);
497 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
498 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
499 
500 	if_attach(ifp);
501 	ether_ifattach(ifp);
502 
503 	printf(": %s, address %s\n", pci_intr_string(pa->pa_pc, ih),
504 	    ether_sprintf(sc->sc_lladdr));
505 
506 #ifdef VIC_DEBUG
507 	printf("%s: feature 0x%8x, cap 0x%8x, rx/txbuf %d/%d\n", DEVNAME(sc),
508 	    sc->sc_feature, sc->sc_cap, sc->sc_nrxbuf, sc->sc_ntxbuf);
509 #endif
510 
511 	return;
512 
513 unmap:
514 	bus_space_unmap(sc->sc_iot, ioh, sc->sc_ios);
515 	sc->sc_ios = 0;
516 }
517 
518 int
519 vic_query(struct vic_softc *sc)
520 {
521 	u_int32_t			major, minor;
522 
523 	major = vic_read(sc, VIC_VERSION_MAJOR);
524 	minor = vic_read(sc, VIC_VERSION_MINOR);
525 
526 	/* Check for a supported version */
527 	if ((major & VIC_VERSION_MAJOR_M) !=
528 	    (VIC_MAGIC & VIC_VERSION_MAJOR_M)) {
529 		printf(": magic mismatch\n");
530 		return (1);
531 	}
532 
533 	if (VIC_MAGIC > major || VIC_MAGIC < minor) {
534 		printf(": unsupported version (%X)\n",
535 		    major & ~VIC_VERSION_MAJOR_M);
536 		return (1);
537 	}
538 
539 	sc->sc_nrxbuf = vic_read_cmd(sc, VIC_CMD_NUM_Rx_BUF);
540 	sc->sc_ntxbuf = vic_read_cmd(sc, VIC_CMD_NUM_Tx_BUF);
541 	sc->sc_feature = vic_read_cmd(sc, VIC_CMD_FEATURE);
542 	sc->sc_cap = vic_read_cmd(sc, VIC_CMD_HWCAP);
543 
544 	vic_getlladdr(sc);
545 
546 	if (sc->sc_nrxbuf > VIC_NBUF_MAX || sc->sc_nrxbuf == 0)
547 		sc->sc_nrxbuf = VIC_NBUF;
548 	if (sc->sc_ntxbuf > VIC_NBUF_MAX || sc->sc_ntxbuf == 0)
549 		sc->sc_ntxbuf = VIC_NBUF;
550 
551 	return (0);
552 }
553 
554 int
555 vic_alloc_data(struct vic_softc *sc)
556 {
557 	u_int8_t			*kva;
558 	u_int				offset;
559 	struct vic_rxdesc		*rxd;
560 	int				i, q;
561 
562 	sc->sc_rxq[0].pktlen = MCLBYTES;
563 	sc->sc_rxq[1].pktlen = 4096;
564 
565 	for (q = 0; q < VIC_NRXRINGS; q++) {
566 		sc->sc_rxq[q].bufs = mallocarray(sc->sc_nrxbuf,
567 		    sizeof(struct vic_rxbuf), M_DEVBUF, M_NOWAIT | M_ZERO);
568 		if (sc->sc_rxq[q].bufs == NULL) {
569 			printf(": unable to allocate rxbuf for ring %d\n", q);
570 			goto freerx;
571 		}
572 	}
573 
574 	sc->sc_txbuf = mallocarray(sc->sc_ntxbuf, sizeof(struct vic_txbuf),
575 	    M_DEVBUF, M_NOWAIT);
576 	if (sc->sc_txbuf == NULL) {
577 		printf(": unable to allocate txbuf\n");
578 		goto freerx;
579 	}
580 
581 	sc->sc_dma_size = sizeof(struct vic_data) +
582 	    (sc->sc_nrxbuf * VIC_NRXRINGS) * sizeof(struct vic_rxdesc) +
583 	    sc->sc_ntxbuf * sizeof(struct vic_txdesc);
584 
585 	if (vic_alloc_dmamem(sc) != 0) {
586 		printf(": unable to allocate dma region\n");
587 		goto freetx;
588 	}
589 	kva = VIC_DMA_KVA(sc);
590 
591 	/* set up basic vic data */
592 	sc->sc_data = VIC_DMA_KVA(sc);
593 
594 	sc->sc_data->vd_magic = VIC_MAGIC;
595 	sc->sc_data->vd_length = sc->sc_dma_size;
596 
597 	offset = sizeof(struct vic_data);
598 
599 	/* set up the rx rings */
600 
601 	for (q = 0; q < VIC_NRXRINGS; q++) {
602 		sc->sc_rxq[q].slots = (struct vic_rxdesc *)&kva[offset];
603 		sc->sc_data->vd_rx_offset[q] = offset;
604 		sc->sc_data->vd_rx[q].length = sc->sc_nrxbuf;
605 
606 		for (i = 0; i < sc->sc_nrxbuf; i++) {
607 			rxd = &sc->sc_rxq[q].slots[i];
608 
609 			rxd->rx_physaddr = 0;
610 			rxd->rx_buflength = 0;
611 			rxd->rx_length = 0;
612 			rxd->rx_owner = VIC_OWNER_DRIVER;
613 
614 			offset += sizeof(struct vic_rxdesc);
615 		}
616 	}
617 
618 	/* set up the tx ring */
619 	sc->sc_txq = (struct vic_txdesc *)&kva[offset];
620 
621 	sc->sc_data->vd_tx_offset = offset;
622 	sc->sc_data->vd_tx_length = sc->sc_ntxbuf;
623 
624 	return (0);
625 freetx:
626 	free(sc->sc_txbuf, M_DEVBUF, 0);
627 	q = VIC_NRXRINGS;
628 freerx:
629 	while (q--)
630 		free(sc->sc_rxq[q].bufs, M_DEVBUF, 0);
631 
632 	return (1);
633 }
634 
635 void
636 vic_rx_fill(struct vic_softc *sc, int q)
637 {
638 	struct vic_rxbuf		*rxb;
639 	struct vic_rxdesc		*rxd;
640 	u_int				slots;
641 
642 	for (slots = if_rxr_get(&sc->sc_rxq[q].ring, sc->sc_nrxbuf);
643 	    slots > 0; slots--) {
644 		rxb = &sc->sc_rxq[q].bufs[sc->sc_rxq[q].end];
645 		rxd = &sc->sc_rxq[q].slots[sc->sc_rxq[q].end];
646 
647 		rxb->rxb_m = vic_alloc_mbuf(sc, rxb->rxb_dmamap,
648 		    sc->sc_rxq[q].pktlen);
649 		if (rxb->rxb_m == NULL)
650 			break;
651 
652 		bus_dmamap_sync(sc->sc_dmat, rxb->rxb_dmamap, 0,
653 		    rxb->rxb_m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
654 
655 		rxd->rx_physaddr = rxb->rxb_dmamap->dm_segs[0].ds_addr;
656 		rxd->rx_buflength = rxb->rxb_m->m_pkthdr.len;
657 		rxd->rx_length = 0;
658 		rxd->rx_owner = VIC_OWNER_NIC;
659 
660 		VIC_INC(sc->sc_rxq[q].end, sc->sc_data->vd_rx[q].length);
661 	}
662 	if_rxr_put(&sc->sc_rxq[q].ring, slots);
663 }
664 
665 int
666 vic_init_data(struct vic_softc *sc)
667 {
668 	struct vic_rxbuf		*rxb;
669 	struct vic_rxdesc		*rxd;
670 	struct vic_txbuf		*txb;
671 
672 	int				q, i;
673 
674 	for (q = 0; q < VIC_NRXRINGS; q++) {
675 		for (i = 0; i < sc->sc_nrxbuf; i++) {
676 			rxb = &sc->sc_rxq[q].bufs[i];
677 			rxd = &sc->sc_rxq[q].slots[i];
678 
679 			if (bus_dmamap_create(sc->sc_dmat,
680 			    sc->sc_rxq[q].pktlen, 1, sc->sc_rxq[q].pktlen, 0,
681 			    BUS_DMA_NOWAIT, &rxb->rxb_dmamap) != 0) {
682 				printf("%s: unable to create dmamap for "
683 				    "ring %d slot %d\n", DEVNAME(sc), q, i);
684 				goto freerxbs;
685 			}
686 
687 			/* scrub the ring */
688 			rxd->rx_physaddr = 0;
689 			rxd->rx_buflength = 0;
690 			rxd->rx_length = 0;
691 			rxd->rx_owner = VIC_OWNER_DRIVER;
692 		}
693 		sc->sc_rxq[q].end = 0;
694 
695 		if_rxr_init(&sc->sc_rxq[q].ring, 2, sc->sc_nrxbuf - 1);
696 		vic_rx_fill(sc, q);
697 	}
698 
699 	for (i = 0; i < sc->sc_ntxbuf; i++) {
700 		txb = &sc->sc_txbuf[i];
701 		if (bus_dmamap_create(sc->sc_dmat, VIC_JUMBO_FRAMELEN,
702 		    (sc->sc_cap & VIC_CMD_HWCAP_SG) ? VIC_SG_MAX : 1,
703 		    VIC_JUMBO_FRAMELEN, 0, BUS_DMA_NOWAIT,
704 		    &txb->txb_dmamap) != 0) {
705 			printf("%s: unable to create dmamap for tx %d\n",
706 			    DEVNAME(sc), i);
707 			goto freetxbs;
708 		}
709 		txb->txb_m = NULL;
710 	}
711 
712 	return (0);
713 
714 freetxbs:
715 	while (i--) {
716 		txb = &sc->sc_txbuf[i];
717 		bus_dmamap_destroy(sc->sc_dmat, txb->txb_dmamap);
718 	}
719 
720 	i = sc->sc_nrxbuf;
721 	q = VIC_NRXRINGS - 1;
722 freerxbs:
723 	while (q >= 0) {
724 		while (i--) {
725 			rxb = &sc->sc_rxq[q].bufs[i];
726 
727 			if (rxb->rxb_m != NULL) {
728 				bus_dmamap_sync(sc->sc_dmat, rxb->rxb_dmamap,
729 				    0, rxb->rxb_m->m_pkthdr.len,
730 				    BUS_DMASYNC_POSTREAD);
731 				bus_dmamap_unload(sc->sc_dmat, rxb->rxb_dmamap);
732 				m_freem(rxb->rxb_m);
733 				rxb->rxb_m = NULL;
734 			}
735 			bus_dmamap_destroy(sc->sc_dmat, rxb->rxb_dmamap);
736 		}
737 		q--;
738 	}
739 
740 	return (1);
741 }
742 
743 int
744 vic_uninit_data(struct vic_softc *sc)
745 {
746 	struct vic_rxbuf		*rxb;
747 	struct vic_rxdesc		*rxd;
748 	struct vic_txbuf		*txb;
749 
750 	int				i, q;
751 
752 	for (q = 0; q < VIC_NRXRINGS; q++) {
753 		for (i = 0; i < sc->sc_nrxbuf; i++) {
754 			rxb = &sc->sc_rxq[q].bufs[i];
755 			rxd = &sc->sc_rxq[q].slots[i];
756 
757 			if (rxb->rxb_m != NULL) {
758 				bus_dmamap_sync(sc->sc_dmat, rxb->rxb_dmamap,
759 				    0, rxb->rxb_m->m_pkthdr.len,
760 				    BUS_DMASYNC_POSTREAD);
761 				bus_dmamap_unload(sc->sc_dmat, rxb->rxb_dmamap);
762 				m_freem(rxb->rxb_m);
763 				rxb->rxb_m = NULL;
764 			}
765 			bus_dmamap_destroy(sc->sc_dmat, rxb->rxb_dmamap);
766 		}
767 	}
768 
769 	for (i = 0; i < sc->sc_ntxbuf; i++) {
770 		txb = &sc->sc_txbuf[i];
771 		bus_dmamap_destroy(sc->sc_dmat, txb->txb_dmamap);
772 	}
773 
774 	return (0);
775 }
776 
777 void
778 vic_link_state(struct vic_softc *sc)
779 {
780 	struct ifnet *ifp = &sc->sc_ac.ac_if;
781 	u_int32_t status;
782 	int link_state = LINK_STATE_DOWN;
783 
784 	status = vic_read(sc, VIC_STATUS);
785 	if (status & VIC_STATUS_CONNECTED)
786 		link_state = LINK_STATE_FULL_DUPLEX;
787 	if (ifp->if_link_state != link_state) {
788 		ifp->if_link_state = link_state;
789 		if_link_state_change(ifp);
790 	}
791 }
792 
793 int
794 vic_intr(void *arg)
795 {
796 	struct vic_softc *sc = (struct vic_softc *)arg;
797 	int q;
798 
799 	vic_write(sc, VIC_CMD, VIC_CMD_INTR_ACK);
800 
801 	for (q = 0; q < VIC_NRXRINGS; q++)
802 		vic_rx_proc(sc, q);
803 	vic_tx_proc(sc);
804 
805 	return (-1);
806 }
807 
808 void
809 vic_rx_proc(struct vic_softc *sc, int q)
810 {
811 	struct ifnet			*ifp = &sc->sc_ac.ac_if;
812 	struct vic_rxdesc		*rxd;
813 	struct vic_rxbuf		*rxb;
814 	struct mbuf_list		 ml = MBUF_LIST_INITIALIZER();
815 	struct mbuf			*m;
816 	int				len, idx;
817 
818 	if ((ifp->if_flags & IFF_RUNNING) == 0)
819 		return;
820 
821 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
822 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
823 
824 	while (if_rxr_inuse(&sc->sc_rxq[q].ring) > 0) {
825 		idx = sc->sc_data->vd_rx[q].nextidx;
826 		if (idx >= sc->sc_data->vd_rx[q].length) {
827 			ifp->if_ierrors++;
828 			if (ifp->if_flags & IFF_DEBUG)
829 				printf("%s: receive index error\n",
830 				    sc->sc_dev.dv_xname);
831 			break;
832 		}
833 
834 		rxd = &sc->sc_rxq[q].slots[idx];
835 		if (rxd->rx_owner != VIC_OWNER_DRIVER)
836 			break;
837 
838 		rxb = &sc->sc_rxq[q].bufs[idx];
839 
840 		if (rxb->rxb_m == NULL) {
841 			ifp->if_ierrors++;
842 			printf("%s: rxb %d has no mbuf\n", DEVNAME(sc), idx);
843 			break;
844 		}
845 
846 		bus_dmamap_sync(sc->sc_dmat, rxb->rxb_dmamap, 0,
847 		    rxb->rxb_m->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
848 		bus_dmamap_unload(sc->sc_dmat, rxb->rxb_dmamap);
849 
850 		m = rxb->rxb_m;
851 		rxb->rxb_m = NULL;
852 		len = rxd->rx_length;
853 
854 		if (len < VIC_MIN_FRAMELEN) {
855 			m_freem(m);
856 
857 			ifp->if_iqdrops++;
858 			goto nextp;
859 		}
860 
861 		m->m_pkthdr.len = m->m_len = len;
862 
863 		ml_enqueue(&ml, m);
864 
865  nextp:
866 		if_rxr_put(&sc->sc_rxq[q].ring, 1);
867 		VIC_INC(sc->sc_data->vd_rx[q].nextidx, sc->sc_nrxbuf);
868 	}
869 
870 	if (ifiq_input(&ifp->if_rcv, &ml))
871 		if_rxr_livelocked(&sc->sc_rxq[q].ring);
872 
873 	vic_rx_fill(sc, q);
874 
875 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
876 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
877 }
878 
879 void
880 vic_tx_proc(struct vic_softc *sc)
881 {
882 	struct ifnet			*ifp = &sc->sc_ac.ac_if;
883 	struct vic_txdesc		*txd;
884 	struct vic_txbuf		*txb;
885 	int				idx;
886 
887 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
888 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
889 
890 	while (sc->sc_txpending > 0) {
891 		idx = sc->sc_data->vd_tx_curidx;
892 		if (idx >= sc->sc_data->vd_tx_length) {
893 			ifp->if_oerrors++;
894 			break;
895 		}
896 
897 		txd = &sc->sc_txq[idx];
898 		if (txd->tx_owner != VIC_OWNER_DRIVER)
899 			break;
900 
901 		txb = &sc->sc_txbuf[idx];
902 		if (txb->txb_m == NULL) {
903 			printf("%s: tx ring is corrupt\n", DEVNAME(sc));
904 			ifp->if_oerrors++;
905 			break;
906 		}
907 
908 		bus_dmamap_sync(sc->sc_dmat, txb->txb_dmamap, 0,
909 		    txb->txb_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
910 		bus_dmamap_unload(sc->sc_dmat, txb->txb_dmamap);
911 
912 		m_freem(txb->txb_m);
913 		txb->txb_m = NULL;
914 		ifq_clr_oactive(&ifp->if_snd);
915 
916 		sc->sc_txpending--;
917 		sc->sc_data->vd_tx_stopped = 0;
918 
919 		VIC_INC(sc->sc_data->vd_tx_curidx, sc->sc_data->vd_tx_length);
920 	}
921 
922 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
923 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
924 
925 	vic_start(ifp);
926 }
927 
928 void
929 vic_iff(struct vic_softc *sc)
930 {
931 	struct arpcom *ac = &sc->sc_ac;
932 	struct ifnet *ifp = &sc->sc_ac.ac_if;
933 	struct ether_multi *enm;
934 	struct ether_multistep step;
935 	u_int32_t crc;
936 	u_int16_t *mcastfil = (u_int16_t *)sc->sc_data->vd_mcastfil;
937 	u_int flags;
938 
939 	ifp->if_flags &= ~IFF_ALLMULTI;
940 
941 	/* Always accept broadcast frames. */
942 	flags = VIC_CMD_IFF_BROADCAST;
943 
944 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
945 		ifp->if_flags |= IFF_ALLMULTI;
946 		if (ifp->if_flags & IFF_PROMISC)
947 			flags |= VIC_CMD_IFF_PROMISC;
948 		else
949 			flags |= VIC_CMD_IFF_MULTICAST;
950 		memset(&sc->sc_data->vd_mcastfil, 0xff,
951 		    sizeof(sc->sc_data->vd_mcastfil));
952 	} else {
953 		flags |= VIC_CMD_IFF_MULTICAST;
954 
955 		bzero(&sc->sc_data->vd_mcastfil,
956 		    sizeof(sc->sc_data->vd_mcastfil));
957 
958 		ETHER_FIRST_MULTI(step, ac, enm);
959 		while (enm != NULL) {
960 			crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
961 
962 			crc >>= 26;
963 
964 			mcastfil[crc >> 4] |= htole16(1 << (crc & 0xf));
965 
966 			ETHER_NEXT_MULTI(step, enm);
967 		}
968 	}
969 
970 	vic_write(sc, VIC_CMD, VIC_CMD_MCASTFIL);
971 	sc->sc_data->vd_iff = flags;
972 	vic_write(sc, VIC_CMD, VIC_CMD_IFF);
973 }
974 
975 void
976 vic_getlladdr(struct vic_softc *sc)
977 {
978 	u_int32_t reg;
979 
980 	/* Get MAC address */
981 	reg = (sc->sc_cap & VIC_CMD_HWCAP_VPROM) ? VIC_VPROM : VIC_LLADDR;
982 
983 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, reg, ETHER_ADDR_LEN,
984 	    BUS_SPACE_BARRIER_READ);
985 	bus_space_read_region_1(sc->sc_iot, sc->sc_ioh, reg, sc->sc_lladdr,
986 	    ETHER_ADDR_LEN);
987 
988 	/* Update the MAC address register */
989 	if (reg == VIC_VPROM)
990 		vic_setlladdr(sc);
991 }
992 
993 void
994 vic_setlladdr(struct vic_softc *sc)
995 {
996 	bus_space_write_region_1(sc->sc_iot, sc->sc_ioh, VIC_LLADDR,
997 	    sc->sc_lladdr, ETHER_ADDR_LEN);
998 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, VIC_LLADDR, ETHER_ADDR_LEN,
999 	    BUS_SPACE_BARRIER_WRITE);
1000 }
1001 
1002 int
1003 vic_media_change(struct ifnet *ifp)
1004 {
1005 	/* Ignore */
1006 	return (0);
1007 }
1008 
1009 void
1010 vic_media_status(struct ifnet *ifp, struct ifmediareq *imr)
1011 {
1012 	struct vic_softc *sc = (struct vic_softc *)ifp->if_softc;
1013 
1014 	imr->ifm_active = IFM_ETHER | IFM_AUTO;
1015 	imr->ifm_status = IFM_AVALID;
1016 
1017 	vic_link_state(sc);
1018 
1019 	if (LINK_STATE_IS_UP(ifp->if_link_state) &&
1020 	    ifp->if_flags & IFF_UP)
1021 		imr->ifm_status |= IFM_ACTIVE;
1022 }
1023 
1024 void
1025 vic_start(struct ifnet *ifp)
1026 {
1027 	struct vic_softc		*sc;
1028 	struct mbuf			*m;
1029 	struct vic_txbuf		*txb;
1030 	struct vic_txdesc		*txd;
1031 	struct vic_sg			*sge;
1032 	bus_dmamap_t			dmap;
1033 	int				i, idx;
1034 	int				tx = 0;
1035 
1036 	if (!(ifp->if_flags & IFF_RUNNING))
1037 		return;
1038 
1039 	if (ifq_is_oactive(&ifp->if_snd))
1040 		return;
1041 
1042 	if (ifq_empty(&ifp->if_snd))
1043 		return;
1044 
1045 	sc = (struct vic_softc *)ifp->if_softc;
1046 
1047 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
1048 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1049 
1050 	for (;;) {
1051 		if (VIC_TXURN(sc)) {
1052 			ifq_set_oactive(&ifp->if_snd);
1053 			break;
1054 		}
1055 
1056 		idx = sc->sc_data->vd_tx_nextidx;
1057 		if (idx >= sc->sc_data->vd_tx_length) {
1058 			printf("%s: tx idx is corrupt\n", DEVNAME(sc));
1059 			ifp->if_oerrors++;
1060 			break;
1061 		}
1062 
1063 		txd = &sc->sc_txq[idx];
1064 		txb = &sc->sc_txbuf[idx];
1065 
1066 		if (txb->txb_m != NULL) {
1067 			printf("%s: tx ring is corrupt\n", DEVNAME(sc));
1068 			sc->sc_data->vd_tx_stopped = 1;
1069 			ifp->if_oerrors++;
1070 			break;
1071 		}
1072 
1073 		m = ifq_dequeue(&ifp->if_snd);
1074 		if (m == NULL)
1075 			break;
1076 
1077 		if (vic_load_txb(sc, txb, m) != 0) {
1078 			m_freem(m);
1079 			ifp->if_oerrors++;
1080 			continue;
1081 		}
1082 
1083 #if NBPFILTER > 0
1084 		if (ifp->if_bpf)
1085 			bpf_mtap(ifp->if_bpf, txb->txb_m, BPF_DIRECTION_OUT);
1086 #endif
1087 
1088 		dmap = txb->txb_dmamap;
1089 		txd->tx_flags = VIC_TX_FLAGS_KEEP;
1090 		txd->tx_owner = VIC_OWNER_NIC;
1091 		txd->tx_sa.sa_addr_type = VIC_SG_ADDR_PHYS;
1092 		txd->tx_sa.sa_length = dmap->dm_nsegs;
1093 		for (i = 0; i < dmap->dm_nsegs; i++) {
1094 			sge = &txd->tx_sa.sa_sg[i];
1095 			sge->sg_length = dmap->dm_segs[i].ds_len;
1096 			sge->sg_addr_low = dmap->dm_segs[i].ds_addr;
1097 		}
1098 
1099 		if (VIC_TXURN_WARN(sc)) {
1100 			txd->tx_flags |= VIC_TX_FLAGS_TXURN;
1101 		}
1102 
1103 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1104 		    BUS_DMASYNC_PREWRITE);
1105 
1106 		sc->sc_txpending++;
1107 
1108 		VIC_INC(sc->sc_data->vd_tx_nextidx, sc->sc_data->vd_tx_length);
1109 
1110 		tx = 1;
1111 	}
1112 
1113 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
1114 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1115 
1116 	if (tx)
1117 		vic_read(sc, VIC_Tx_ADDR);
1118 }
1119 
1120 int
1121 vic_load_txb(struct vic_softc *sc, struct vic_txbuf *txb, struct mbuf *m)
1122 {
1123 	bus_dmamap_t			dmap = txb->txb_dmamap;
1124 	int				error;
1125 
1126 	error = bus_dmamap_load_mbuf(sc->sc_dmat, dmap, m, BUS_DMA_NOWAIT);
1127 	switch (error) {
1128 	case 0:
1129 		txb->txb_m = m;
1130 		break;
1131 
1132 	case EFBIG:
1133 		if (m_defrag(m, M_DONTWAIT) == 0 &&
1134 		    bus_dmamap_load_mbuf(sc->sc_dmat, dmap, m,
1135 		    BUS_DMA_NOWAIT) == 0) {
1136 			txb->txb_m = m;
1137 			break;
1138 		}
1139 
1140 		/* FALLTHROUGH */
1141 	default:
1142 		return (ENOBUFS);
1143 	}
1144 
1145 	return (0);
1146 }
1147 
1148 void
1149 vic_watchdog(struct ifnet *ifp)
1150 {
1151 #if 0
1152 	struct vic_softc *sc = (struct vic_softc *)ifp->if_softc;
1153 
1154 	if (sc->sc_txpending && sc->sc_txtimeout > 0) {
1155 		if (--sc->sc_txtimeout == 0) {
1156 			printf("%s: device timeout\n", sc->sc_dev.dv_xname);
1157 			ifp->if_flags &= ~IFF_RUNNING;
1158 			vic_init(ifp);
1159 			ifp->if_oerrors++;
1160 			return;
1161 		}
1162 	}
1163 
1164 	if (!ifq_empty(&ifp->if_snd))
1165 		vic_start(ifp);
1166 #endif
1167 }
1168 
1169 int
1170 vic_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1171 {
1172 	struct vic_softc *sc = (struct vic_softc *)ifp->if_softc;
1173 	struct ifreq *ifr = (struct ifreq *)data;
1174 	int s, error = 0;
1175 
1176 	s = splnet();
1177 
1178 	switch (cmd) {
1179 	case SIOCSIFADDR:
1180 		ifp->if_flags |= IFF_UP;
1181 		/* FALLTHROUGH */
1182 	case SIOCSIFFLAGS:
1183 		if (ifp->if_flags & IFF_UP) {
1184 			if (ifp->if_flags & IFF_RUNNING)
1185 				error = ENETRESET;
1186 			else
1187 				vic_init(ifp);
1188 		} else {
1189 			if (ifp->if_flags & IFF_RUNNING)
1190 				vic_stop(ifp);
1191 		}
1192 		break;
1193 
1194 	case SIOCGIFMEDIA:
1195 	case SIOCSIFMEDIA:
1196 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1197 		break;
1198 
1199 	case SIOCGIFRXR:
1200 		error = vic_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
1201 		break;
1202 
1203 	default:
1204 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
1205 	}
1206 
1207 	if (error == ENETRESET) {
1208 		if (ifp->if_flags & IFF_RUNNING)
1209 			vic_iff(sc);
1210 		error = 0;
1211 	}
1212 
1213 	splx(s);
1214 	return (error);
1215 }
1216 
1217 int
1218 vic_rxrinfo(struct vic_softc *sc, struct if_rxrinfo *ifri)
1219 {
1220 	struct if_rxring_info ifr[2];
1221 
1222 	memset(ifr, 0, sizeof(ifr));
1223 
1224 	ifr[0].ifr_size = MCLBYTES;
1225 	ifr[0].ifr_info = sc->sc_rxq[0].ring;
1226 
1227 	ifr[1].ifr_size = 4096;
1228 	ifr[1].ifr_info = sc->sc_rxq[1].ring;
1229 
1230 	return (if_rxr_info_ioctl(ifri, nitems(ifr), ifr));
1231 }
1232 
1233 void
1234 vic_init(struct ifnet *ifp)
1235 {
1236 	struct vic_softc	*sc = (struct vic_softc *)ifp->if_softc;
1237 	int			q;
1238 	int			s;
1239 
1240 	sc->sc_data->vd_tx_curidx = 0;
1241 	sc->sc_data->vd_tx_nextidx = 0;
1242 	sc->sc_data->vd_tx_stopped = sc->sc_data->vd_tx_queued = 0;
1243 	sc->sc_data->vd_tx_saved_nextidx = 0;
1244 
1245 	for (q = 0; q < VIC_NRXRINGS; q++) {
1246 		sc->sc_data->vd_rx[q].nextidx = 0;
1247 		sc->sc_data->vd_rx_saved_nextidx[q] = 0;
1248 	}
1249 
1250 	if (vic_init_data(sc) != 0)
1251 		return;
1252 
1253 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
1254 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1255 
1256 	s = splnet();
1257 
1258 	vic_write(sc, VIC_DATA_ADDR, VIC_DMA_DVA(sc));
1259 	vic_write(sc, VIC_DATA_LENGTH, sc->sc_dma_size);
1260 
1261 	ifp->if_flags |= IFF_RUNNING;
1262 	ifq_clr_oactive(&ifp->if_snd);
1263 
1264 	vic_iff(sc);
1265 	vic_write(sc, VIC_CMD, VIC_CMD_INTR_ENABLE);
1266 
1267 	splx(s);
1268 
1269 	timeout_add_sec(&sc->sc_tick, 1);
1270 }
1271 
1272 void
1273 vic_stop(struct ifnet *ifp)
1274 {
1275 	struct vic_softc *sc = (struct vic_softc *)ifp->if_softc;
1276 	int s;
1277 
1278 	s = splnet();
1279 
1280 	timeout_del(&sc->sc_tick);
1281 
1282 	ifp->if_flags &= ~IFF_RUNNING;
1283 	ifq_clr_oactive(&ifp->if_snd);
1284 
1285 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
1286 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1287 
1288 	/* XXX wait for tx to complete */
1289 	while (sc->sc_txpending > 0) {
1290 		splx(s);
1291 		delay(1000);
1292 		s = splnet();
1293 	}
1294 
1295 	sc->sc_data->vd_tx_stopped = 1;
1296 
1297 	vic_write(sc, VIC_CMD, VIC_CMD_INTR_DISABLE);
1298 
1299 	sc->sc_data->vd_iff = 0;
1300 	vic_write(sc, VIC_CMD, VIC_CMD_IFF);
1301 
1302 	vic_write(sc, VIC_DATA_ADDR, 0);
1303 
1304 	vic_uninit_data(sc);
1305 
1306 	splx(s);
1307 }
1308 
1309 struct mbuf *
1310 vic_alloc_mbuf(struct vic_softc *sc, bus_dmamap_t map, u_int pktlen)
1311 {
1312 	struct mbuf *m = NULL;
1313 
1314 	m = MCLGETL(NULL, M_DONTWAIT, pktlen);
1315 	if (!m)
1316 		return (NULL);
1317 	m->m_data += ETHER_ALIGN;
1318 	m->m_len = m->m_pkthdr.len = pktlen - ETHER_ALIGN;
1319 
1320 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
1321 		printf("%s: could not load mbuf DMA map\n", DEVNAME(sc));
1322 		m_freem(m);
1323 		return (NULL);
1324 	}
1325 
1326 	return (m);
1327 }
1328 
1329 void
1330 vic_tick(void *arg)
1331 {
1332 	struct vic_softc		*sc = (struct vic_softc *)arg;
1333 
1334 	vic_link_state(sc);
1335 
1336 	timeout_add_sec(&sc->sc_tick, 1);
1337 }
1338 
1339 u_int32_t
1340 vic_read(struct vic_softc *sc, bus_size_t r)
1341 {
1342 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1343 	    BUS_SPACE_BARRIER_READ);
1344 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r));
1345 }
1346 
1347 void
1348 vic_write(struct vic_softc *sc, bus_size_t r, u_int32_t v)
1349 {
1350 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1351 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1352 	    BUS_SPACE_BARRIER_WRITE);
1353 }
1354 
1355 u_int32_t
1356 vic_read_cmd(struct vic_softc *sc, u_int32_t cmd)
1357 {
1358 	vic_write(sc, VIC_CMD, cmd);
1359 	return (vic_read(sc, VIC_CMD));
1360 }
1361 
1362 int
1363 vic_alloc_dmamem(struct vic_softc *sc)
1364 {
1365 	int nsegs;
1366 
1367 	if (bus_dmamap_create(sc->sc_dmat, sc->sc_dma_size, 1,
1368 	    sc->sc_dma_size, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1369 	    &sc->sc_dma_map) != 0)
1370 		goto err;
1371 
1372 	if (bus_dmamem_alloc(sc->sc_dmat, sc->sc_dma_size, 16, 0,
1373 	    &sc->sc_dma_seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
1374 		goto destroy;
1375 
1376 	if (bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_seg, nsegs,
1377 	    sc->sc_dma_size, &sc->sc_dma_kva, BUS_DMA_NOWAIT) != 0)
1378 		goto free;
1379 
1380 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_dma_map, sc->sc_dma_kva,
1381 	    sc->sc_dma_size, NULL, BUS_DMA_NOWAIT) != 0)
1382 		goto unmap;
1383 
1384 	return (0);
1385 
1386 unmap:
1387 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_dma_kva, sc->sc_dma_size);
1388 free:
1389 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_seg, 1);
1390 destroy:
1391 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dma_map);
1392 err:
1393 	return (1);
1394 }
1395 
1396 void
1397 vic_free_dmamem(struct vic_softc *sc)
1398 {
1399 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_map);
1400 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_dma_kva, sc->sc_dma_size);
1401 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_seg, 1);
1402 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dma_map);
1403 }
1404