1 /*
2 * Copyright (c) 2004
3 * Bill Paul <wpaul@windriver.com>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 * $FreeBSD: src/sys/dev/vge/if_vge.c,v 1.24 2006/02/14 12:44:56 glebius Exp $
33 */
34
35 /*
36 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver.
37 *
38 * Written by Bill Paul <wpaul@windriver.com>
39 * Senior Networking Software Engineer
40 * Wind River Systems
41 */
42
43 /*
44 * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that
45 * combines a tri-speed ethernet MAC and PHY, with the following
46 * features:
47 *
48 * o Jumbo frame support up to 16K
49 * o Transmit and receive flow control
50 * o IPv4 checksum offload
51 * o VLAN tag insertion and stripping
52 * o TCP large send
53 * o 64-bit multicast hash table filter
54 * o 64 entry CAM filter
55 * o 16K RX FIFO and 48K TX FIFO memory
56 * o Interrupt moderation
57 *
58 * The VT6122 supports up to four transmit DMA queues. The descriptors
59 * in the transmit ring can address up to 7 data fragments; frames which
60 * span more than 7 data buffers must be coalesced, but in general the
61 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments
62 * long. The receive descriptors address only a single buffer.
63 *
64 * There are two peculiar design issues with the VT6122. One is that
65 * receive data buffers must be aligned on a 32-bit boundary. This is
66 * not a problem where the VT6122 is used as a LOM device in x86-based
67 * systems, but on architectures that generate unaligned access traps, we
68 * have to do some copying.
69 *
70 * The other issue has to do with the way 64-bit addresses are handled.
71 * The DMA descriptors only allow you to specify 48 bits of addressing
72 * information. The remaining 16 bits are specified using one of the
73 * I/O registers. If you only have a 32-bit system, then this isn't
74 * an issue, but if you have a 64-bit system and more than 4GB of
75 * memory, you must have to make sure your network data buffers reside
76 * in the same 48-bit 'segment.'
77 *
78 * Special thanks to Ryan Fu at VIA Networking for providing documentation
79 * and sample NICs for testing.
80 */
81
82 #include "opt_ifpoll.h"
83
84 #include <sys/param.h>
85 #include <sys/endian.h>
86 #include <sys/systm.h>
87 #include <sys/sockio.h>
88 #include <sys/mbuf.h>
89 #include <sys/malloc.h>
90 #include <sys/module.h>
91 #include <sys/kernel.h>
92 #include <sys/socket.h>
93 #include <sys/serialize.h>
94 #include <sys/proc.h>
95 #include <sys/bus.h>
96 #include <sys/rman.h>
97 #include <sys/interrupt.h>
98
99 #include <net/if.h>
100 #include <net/if_arp.h>
101 #include <net/ethernet.h>
102 #include <net/if_dl.h>
103 #include <net/if_media.h>
104 #include <net/if_poll.h>
105 #include <net/ifq_var.h>
106 #include <net/if_types.h>
107 #include <net/vlan/if_vlan_var.h>
108 #include <net/vlan/if_vlan_ether.h>
109
110 #include <net/bpf.h>
111
112 #include <dev/netif/mii_layer/mii.h>
113 #include <dev/netif/mii_layer/miivar.h>
114
115 #include <bus/pci/pcireg.h>
116 #include <bus/pci/pcivar.h>
117 #include "pcidevs.h"
118
119 #include "miibus_if.h"
120
121 #include <dev/netif/vge/if_vgereg.h>
122 #include <dev/netif/vge/if_vgevar.h>
123
124 #define VGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
125
126 /*
127 * Various supported device vendors/types and their names.
128 */
129 static const struct vge_type vge_devs[] = {
130 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT612X,
131 "VIA Networking Gigabit Ethernet" },
132 { 0, 0, NULL }
133 };
134
135 static int vge_probe (device_t);
136 static int vge_attach (device_t);
137 static int vge_detach (device_t);
138
139 static int vge_encap (struct vge_softc *, struct mbuf *, int);
140
141 static void vge_dma_map_addr (void *, bus_dma_segment_t *, int, int);
142 static void vge_dma_map_rx_desc (void *, bus_dma_segment_t *, int,
143 bus_size_t, int);
144 static void vge_dma_map_tx_desc (void *, bus_dma_segment_t *, int,
145 bus_size_t, int);
146 static int vge_dma_alloc (device_t);
147 static void vge_dma_free (struct vge_softc *);
148 static int vge_newbuf (struct vge_softc *, int, struct mbuf *);
149 static int vge_rx_list_init (struct vge_softc *);
150 static int vge_tx_list_init (struct vge_softc *);
151 #ifdef VGE_FIXUP_RX
152 static __inline void vge_fixup_rx
153 (struct mbuf *);
154 #endif
155 static void vge_rxeof (struct vge_softc *, int);
156 static void vge_txeof (struct vge_softc *);
157 static void vge_intr (void *);
158 static void vge_tick (struct vge_softc *);
159 static void vge_start (struct ifnet *, struct ifaltq_subque *);
160 static int vge_ioctl (struct ifnet *, u_long, caddr_t,
161 struct ucred *);
162 static void vge_init (void *);
163 static void vge_stop (struct vge_softc *);
164 static void vge_watchdog (struct ifnet *);
165 static int vge_suspend (device_t);
166 static int vge_resume (device_t);
167 static void vge_shutdown (device_t);
168 static int vge_ifmedia_upd (struct ifnet *);
169 static void vge_ifmedia_sts (struct ifnet *, struct ifmediareq *);
170
171 #ifdef VGE_EEPROM
172 static void vge_eeprom_getword (struct vge_softc *, int, u_int16_t *);
173 #endif
174 static void vge_read_eeprom (struct vge_softc *, uint8_t *, int, int, int);
175
176 static void vge_miipoll_start (struct vge_softc *);
177 static void vge_miipoll_stop (struct vge_softc *);
178 static int vge_miibus_readreg (device_t, int, int);
179 static int vge_miibus_writereg (device_t, int, int, int);
180 static void vge_miibus_statchg (device_t);
181
182 static void vge_cam_clear (struct vge_softc *);
183 static int vge_cam_set (struct vge_softc *, uint8_t *);
184 static void vge_setmulti (struct vge_softc *);
185 static void vge_reset (struct vge_softc *);
186
187 #ifdef IFPOLL_ENABLE
188 static void vge_npoll(struct ifnet *, struct ifpoll_info *);
189 static void vge_npoll_compat(struct ifnet *, void *, int);
190 static void vge_disable_intr(struct vge_softc *);
191 #endif
192 static void vge_enable_intr(struct vge_softc *, uint32_t);
193
194 #define VGE_PCI_LOIO 0x10
195 #define VGE_PCI_LOMEM 0x14
196
197 static device_method_t vge_methods[] = {
198 /* Device interface */
199 DEVMETHOD(device_probe, vge_probe),
200 DEVMETHOD(device_attach, vge_attach),
201 DEVMETHOD(device_detach, vge_detach),
202 DEVMETHOD(device_suspend, vge_suspend),
203 DEVMETHOD(device_resume, vge_resume),
204 DEVMETHOD(device_shutdown, vge_shutdown),
205
206 /* bus interface */
207 DEVMETHOD(bus_print_child, bus_generic_print_child),
208 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
209
210 /* MII interface */
211 DEVMETHOD(miibus_readreg, vge_miibus_readreg),
212 DEVMETHOD(miibus_writereg, vge_miibus_writereg),
213 DEVMETHOD(miibus_statchg, vge_miibus_statchg),
214
215 DEVMETHOD_END
216 };
217
218 static driver_t vge_driver = {
219 "vge",
220 vge_methods,
221 sizeof(struct vge_softc)
222 };
223
224 static devclass_t vge_devclass;
225
226 DECLARE_DUMMY_MODULE(if_vge);
227 MODULE_DEPEND(if_vge, miibus, 1, 1, 1);
228 DRIVER_MODULE(if_vge, pci, vge_driver, vge_devclass, NULL, NULL);
229 DRIVER_MODULE(if_vge, cardbus, vge_driver, vge_devclass, NULL, NULL);
230 DRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, NULL, NULL);
231
232 #ifdef VGE_EEPROM
233 /*
234 * Read a word of data stored in the EEPROM at address 'addr.'
235 */
236 static void
vge_eeprom_getword(struct vge_softc * sc,int addr,uint16_t dest)237 vge_eeprom_getword(struct vge_softc *sc, int addr, uint16_t dest)
238 {
239 uint16_t word = 0;
240 int i;
241
242 /*
243 * Enter EEPROM embedded programming mode. In order to
244 * access the EEPROM at all, we first have to set the
245 * EELOAD bit in the CHIPCFG2 register.
246 */
247 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
248 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
249
250 /* Select the address of the word we want to read */
251 CSR_WRITE_1(sc, VGE_EEADDR, addr);
252
253 /* Issue read command */
254 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD);
255
256 /* Wait for the done bit to be set. */
257 for (i = 0; i < VGE_TIMEOUT; i++) {
258 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE)
259 break;
260 }
261 if (i == VGE_TIMEOUT) {
262 device_printf(sc->vge_dev, "EEPROM read timed out\n");
263 *dest = 0;
264 return;
265 }
266
267 /* Read the result */
268 word = CSR_READ_2(sc, VGE_EERDDAT);
269
270 /* Turn off EEPROM access mode. */
271 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
272 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
273
274 *dest = word;
275 }
276 #endif
277
278 /*
279 * Read a sequence of words from the EEPROM.
280 */
281 static void
vge_read_eeprom(struct vge_softc * sc,uint8_t * dest,int off,int cnt,int swap)282 vge_read_eeprom(struct vge_softc *sc, uint8_t *dest, int off, int cnt, int swap)
283 {
284 int i;
285 #ifdef VGE_EEPROM
286 uint16_t word = 0, *ptr;
287
288 for (i = 0; i < cnt; i++) {
289 vge_eeprom_getword(sc, off + i, &word);
290 ptr = (uint16_t *)(dest + (i * 2));
291 if (swap)
292 *ptr = ntohs(word);
293 else
294 *ptr = word;
295 }
296 #else
297 for (i = 0; i < ETHER_ADDR_LEN; i++)
298 dest[i] = CSR_READ_1(sc, VGE_PAR0 + i);
299 #endif
300 }
301
302 static void
vge_miipoll_stop(struct vge_softc * sc)303 vge_miipoll_stop(struct vge_softc *sc)
304 {
305 int i;
306
307 CSR_WRITE_1(sc, VGE_MIICMD, 0);
308
309 for (i = 0; i < VGE_TIMEOUT; i++) {
310 DELAY(1);
311 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
312 break;
313 }
314 if (i == VGE_TIMEOUT)
315 if_printf(&sc->arpcom.ac_if, "failed to idle MII autopoll\n");
316 }
317
318 static void
vge_miipoll_start(struct vge_softc * sc)319 vge_miipoll_start(struct vge_softc *sc)
320 {
321 int i;
322
323 /* First, make sure we're idle. */
324 CSR_WRITE_1(sc, VGE_MIICMD, 0);
325 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL);
326
327 for (i = 0; i < VGE_TIMEOUT; i++) {
328 DELAY(1);
329 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
330 break;
331 }
332 if (i == VGE_TIMEOUT) {
333 if_printf(&sc->arpcom.ac_if, "failed to idle MII autopoll\n");
334 return;
335 }
336
337 /* Now enable auto poll mode. */
338 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO);
339
340 /* And make sure it started. */
341 for (i = 0; i < VGE_TIMEOUT; i++) {
342 DELAY(1);
343 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0)
344 break;
345 }
346 if (i == VGE_TIMEOUT)
347 if_printf(&sc->arpcom.ac_if, "failed to start MII autopoll\n");
348 }
349
350 static int
vge_miibus_readreg(device_t dev,int phy,int reg)351 vge_miibus_readreg(device_t dev, int phy, int reg)
352 {
353 struct vge_softc *sc;
354 int i;
355 uint16_t rval = 0;
356
357 sc = device_get_softc(dev);
358
359 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
360 return(0);
361
362 vge_miipoll_stop(sc);
363
364 /* Specify the register we want to read. */
365 CSR_WRITE_1(sc, VGE_MIIADDR, reg);
366
367 /* Issue read command. */
368 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD);
369
370 /* Wait for the read command bit to self-clear. */
371 for (i = 0; i < VGE_TIMEOUT; i++) {
372 DELAY(1);
373 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0)
374 break;
375 }
376 if (i == VGE_TIMEOUT)
377 if_printf(&sc->arpcom.ac_if, "MII read timed out\n");
378 else
379 rval = CSR_READ_2(sc, VGE_MIIDATA);
380
381 vge_miipoll_start(sc);
382
383 return (rval);
384 }
385
386 static int
vge_miibus_writereg(device_t dev,int phy,int reg,int data)387 vge_miibus_writereg(device_t dev, int phy, int reg, int data)
388 {
389 struct vge_softc *sc;
390 int i, rval = 0;
391
392 sc = device_get_softc(dev);
393
394 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
395 return(0);
396
397 vge_miipoll_stop(sc);
398
399 /* Specify the register we want to write. */
400 CSR_WRITE_1(sc, VGE_MIIADDR, reg);
401
402 /* Specify the data we want to write. */
403 CSR_WRITE_2(sc, VGE_MIIDATA, data);
404
405 /* Issue write command. */
406 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD);
407
408 /* Wait for the write command bit to self-clear. */
409 for (i = 0; i < VGE_TIMEOUT; i++) {
410 DELAY(1);
411 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0)
412 break;
413 }
414 if (i == VGE_TIMEOUT) {
415 if_printf(&sc->arpcom.ac_if, "MII write timed out\n");
416 rval = EIO;
417 }
418
419 vge_miipoll_start(sc);
420
421 return (rval);
422 }
423
424 static void
vge_cam_clear(struct vge_softc * sc)425 vge_cam_clear(struct vge_softc *sc)
426 {
427 int i;
428
429 /*
430 * Turn off all the mask bits. This tells the chip
431 * that none of the entries in the CAM filter are valid.
432 * desired entries will be enabled as we fill the filter in.
433 */
434 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
435 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
436 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE);
437 for (i = 0; i < 8; i++)
438 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
439
440 /* Clear the VLAN filter too. */
441 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0);
442 for (i = 0; i < 8; i++)
443 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
444
445 CSR_WRITE_1(sc, VGE_CAMADDR, 0);
446 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
447 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
448
449 sc->vge_camidx = 0;
450 }
451
452 static int
vge_cam_set(struct vge_softc * sc,uint8_t * addr)453 vge_cam_set(struct vge_softc *sc, uint8_t *addr)
454 {
455 int i, error = 0;
456
457 if (sc->vge_camidx == VGE_CAM_MAXADDRS)
458 return(ENOSPC);
459
460 /* Select the CAM data page. */
461 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
462 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA);
463
464 /* Set the filter entry we want to update and enable writing. */
465 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx);
466
467 /* Write the address to the CAM registers */
468 for (i = 0; i < ETHER_ADDR_LEN; i++)
469 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]);
470
471 /* Issue a write command. */
472 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE);
473
474 /* Wake for it to clear. */
475 for (i = 0; i < VGE_TIMEOUT; i++) {
476 DELAY(1);
477 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0)
478 break;
479 }
480 if (i == VGE_TIMEOUT) {
481 if_printf(&sc->arpcom.ac_if, "setting CAM filter failed\n");
482 error = EIO;
483 goto fail;
484 }
485
486 /* Select the CAM mask page. */
487 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
488 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
489
490 /* Set the mask bit that enables this filter. */
491 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8),
492 1<<(sc->vge_camidx & 7));
493
494 sc->vge_camidx++;
495
496 fail:
497 /* Turn off access to CAM. */
498 CSR_WRITE_1(sc, VGE_CAMADDR, 0);
499 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
500 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
501
502 return (error);
503 }
504
505 /*
506 * Program the multicast filter. We use the 64-entry CAM filter
507 * for perfect filtering. If there's more than 64 multicast addresses,
508 * we use the hash filter insted.
509 */
510 static void
vge_setmulti(struct vge_softc * sc)511 vge_setmulti(struct vge_softc *sc)
512 {
513 struct ifnet *ifp = &sc->arpcom.ac_if;
514 int error = 0;
515 struct ifmultiaddr *ifma;
516 uint32_t h, hashes[2] = { 0, 0 };
517
518 /* First, zot all the multicast entries. */
519 vge_cam_clear(sc);
520 CSR_WRITE_4(sc, VGE_MAR0, 0);
521 CSR_WRITE_4(sc, VGE_MAR1, 0);
522
523 /*
524 * If the user wants allmulti or promisc mode, enable reception
525 * of all multicast frames.
526 */
527 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
528 CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF);
529 CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF);
530 return;
531 }
532
533 /* Now program new ones */
534 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
535 if (ifma->ifma_addr->sa_family != AF_LINK)
536 continue;
537 error = vge_cam_set(sc,
538 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
539 if (error)
540 break;
541 }
542
543 /* If there were too many addresses, use the hash filter. */
544 if (error) {
545 vge_cam_clear(sc);
546
547 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
548 if (ifma->ifma_addr->sa_family != AF_LINK)
549 continue;
550 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
551 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
552 if (h < 32)
553 hashes[0] |= (1 << h);
554 else
555 hashes[1] |= (1 << (h - 32));
556 }
557
558 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]);
559 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]);
560 }
561 }
562
563 static void
vge_reset(struct vge_softc * sc)564 vge_reset(struct vge_softc *sc)
565 {
566 int i;
567
568 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET);
569
570 for (i = 0; i < VGE_TIMEOUT; i++) {
571 DELAY(5);
572 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0)
573 break;
574 }
575
576 if (i == VGE_TIMEOUT) {
577 if_printf(&sc->arpcom.ac_if, "soft reset timed out");
578 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE);
579 DELAY(2000);
580 }
581
582 DELAY(5000);
583
584 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD);
585
586 for (i = 0; i < VGE_TIMEOUT; i++) {
587 DELAY(5);
588 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0)
589 break;
590 }
591 if (i == VGE_TIMEOUT) {
592 if_printf(&sc->arpcom.ac_if, "EEPROM reload timed out\n");
593 return;
594 }
595
596 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI);
597 }
598
599 /*
600 * Probe for a VIA gigabit chip. Check the PCI vendor and device
601 * IDs against our list and return a device name if we find a match.
602 */
603 static int
vge_probe(device_t dev)604 vge_probe(device_t dev)
605 {
606 const struct vge_type *t;
607 uint16_t did, vid;
608
609 did = pci_get_device(dev);
610 vid = pci_get_vendor(dev);
611 for (t = vge_devs; t->vge_name != NULL; ++t) {
612 if (vid == t->vge_vid && did == t->vge_did) {
613 device_set_desc(dev, t->vge_name);
614 return 0;
615 }
616 }
617 return (ENXIO);
618 }
619
620 static void
vge_dma_map_rx_desc(void * arg,bus_dma_segment_t * segs,int nseg,bus_size_t mapsize,int error)621 vge_dma_map_rx_desc(void *arg, bus_dma_segment_t *segs, int nseg,
622 bus_size_t mapsize, int error)
623 {
624
625 struct vge_dmaload_arg *ctx;
626 struct vge_rx_desc *d = NULL;
627
628 if (error)
629 return;
630
631 ctx = arg;
632
633 /* Signal error to caller if there's too many segments */
634 if (nseg > ctx->vge_maxsegs) {
635 ctx->vge_maxsegs = 0;
636 return;
637 }
638
639 /*
640 * Map the segment array into descriptors.
641 */
642 d = &ctx->sc->vge_ldata.vge_rx_list[ctx->vge_idx];
643
644 /* If this descriptor is still owned by the chip, bail. */
645 if (le32toh(d->vge_sts) & VGE_RDSTS_OWN) {
646 if_printf(&ctx->sc->arpcom.ac_if,
647 "tried to map busy descriptor\n");
648 ctx->vge_maxsegs = 0;
649 return;
650 }
651
652 d->vge_buflen = htole16(VGE_BUFLEN(segs[0].ds_len) | VGE_RXDESC_I);
653 d->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr));
654 d->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF);
655 d->vge_sts = 0;
656 d->vge_ctl = 0;
657
658 ctx->vge_maxsegs = 1;
659 }
660
661 static void
vge_dma_map_tx_desc(void * arg,bus_dma_segment_t * segs,int nseg,bus_size_t mapsize,int error)662 vge_dma_map_tx_desc(void *arg, bus_dma_segment_t *segs, int nseg,
663 bus_size_t mapsize, int error)
664 {
665 struct vge_dmaload_arg *ctx;
666 struct vge_tx_desc *d = NULL;
667 struct vge_tx_frag *f;
668 int i = 0;
669
670 if (error)
671 return;
672
673 ctx = arg;
674
675 /* Signal error to caller if there's too many segments */
676 if (nseg > ctx->vge_maxsegs) {
677 ctx->vge_maxsegs = 0;
678 return;
679 }
680
681 /* Map the segment array into descriptors. */
682 d = &ctx->sc->vge_ldata.vge_tx_list[ctx->vge_idx];
683
684 /* If this descriptor is still owned by the chip, bail. */
685 if (le32toh(d->vge_sts) & VGE_TDSTS_OWN) {
686 ctx->vge_maxsegs = 0;
687 return;
688 }
689
690 for (i = 0; i < nseg; i++) {
691 f = &d->vge_frag[i];
692 f->vge_buflen = htole16(VGE_BUFLEN(segs[i].ds_len));
693 f->vge_addrlo = htole32(VGE_ADDR_LO(segs[i].ds_addr));
694 f->vge_addrhi = htole16(VGE_ADDR_HI(segs[i].ds_addr) & 0xFFFF);
695 }
696
697 /* Argh. This chip does not autopad short frames */
698 if (ctx->vge_m0->m_pkthdr.len < VGE_MIN_FRAMELEN) {
699 f = &d->vge_frag[i];
700 f->vge_buflen = htole16(VGE_BUFLEN(VGE_MIN_FRAMELEN -
701 ctx->vge_m0->m_pkthdr.len));
702 f->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr));
703 f->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF);
704 ctx->vge_m0->m_pkthdr.len = VGE_MIN_FRAMELEN;
705 i++;
706 }
707
708 /*
709 * When telling the chip how many segments there are, we
710 * must use nsegs + 1 instead of just nsegs. Darned if I
711 * know why.
712 */
713 i++;
714
715 d->vge_sts = ctx->vge_m0->m_pkthdr.len << 16;
716 d->vge_ctl = ctx->vge_flags|(i << 28)|VGE_TD_LS_NORM;
717
718 if (ctx->vge_m0->m_pkthdr.len > ETHERMTU + ETHER_HDR_LEN)
719 d->vge_ctl |= VGE_TDCTL_JUMBO;
720
721 ctx->vge_maxsegs = nseg;
722 }
723
724 /*
725 * Map a single buffer address.
726 */
727
728 static void
vge_dma_map_addr(void * arg,bus_dma_segment_t * segs,int nseg,int error)729 vge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
730 {
731 if (error)
732 return;
733
734 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
735 *((bus_addr_t *)arg) = segs->ds_addr;
736 }
737
738 static int
vge_dma_alloc(device_t dev)739 vge_dma_alloc(device_t dev)
740 {
741 struct vge_softc *sc = device_get_softc(dev);
742 int error, nseg, i, tx_pos = 0, rx_pos = 0;
743
744 /*
745 * Allocate the parent bus DMA tag appropriate for PCI.
746 */
747 #define VGE_NSEG_NEW 32
748 error = bus_dma_tag_create(NULL, /* parent */
749 1, 0, /* alignment, boundary */
750 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
751 BUS_SPACE_MAXADDR, /* highaddr */
752 MAXBSIZE, VGE_NSEG_NEW, /* maxsize, nsegments */
753 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
754 BUS_DMA_ALLOCNOW, /* flags */
755 &sc->vge_parent_tag);
756 if (error) {
757 device_printf(dev, "can't create parent dma tag\n");
758 return error;
759 }
760
761 /*
762 * Allocate map for RX mbufs.
763 */
764 nseg = 32;
765 error = bus_dma_tag_create(sc->vge_parent_tag, ETHER_ALIGN, 0,
766 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
767 MCLBYTES * nseg, nseg, MCLBYTES,
768 BUS_DMA_ALLOCNOW, &sc->vge_ldata.vge_mtag);
769 if (error) {
770 device_printf(dev, "could not allocate mbuf dma tag\n");
771 return error;
772 }
773
774 /*
775 * Allocate map for TX descriptor list.
776 */
777 error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN, 0,
778 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
779 VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ,
780 BUS_DMA_ALLOCNOW,
781 &sc->vge_ldata.vge_tx_list_tag);
782 if (error) {
783 device_printf(dev, "could not allocate tx list dma tag\n");
784 return error;
785 }
786
787 /* Allocate DMA'able memory for the TX ring */
788 error = bus_dmamem_alloc(sc->vge_ldata.vge_tx_list_tag,
789 (void **)&sc->vge_ldata.vge_tx_list,
790 BUS_DMA_WAITOK | BUS_DMA_ZERO,
791 &sc->vge_ldata.vge_tx_list_map);
792 if (error) {
793 device_printf(dev, "could not allocate tx list dma memory\n");
794 return error;
795 }
796
797 /* Load the map for the TX ring. */
798 error = bus_dmamap_load(sc->vge_ldata.vge_tx_list_tag,
799 sc->vge_ldata.vge_tx_list_map,
800 sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ,
801 vge_dma_map_addr,
802 &sc->vge_ldata.vge_tx_list_addr,
803 BUS_DMA_WAITOK);
804 if (error) {
805 device_printf(dev, "could not load tx list\n");
806 bus_dmamem_free(sc->vge_ldata.vge_tx_list_tag,
807 sc->vge_ldata.vge_tx_list,
808 sc->vge_ldata.vge_tx_list_map);
809 sc->vge_ldata.vge_tx_list = NULL;
810 return error;
811 }
812
813 /* Create DMA maps for TX buffers */
814 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
815 error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0,
816 &sc->vge_ldata.vge_tx_dmamap[i]);
817 if (error) {
818 device_printf(dev, "can't create DMA map for TX\n");
819 tx_pos = i;
820 goto map_fail;
821 }
822 }
823 tx_pos = VGE_TX_DESC_CNT;
824
825 /*
826 * Allocate map for RX descriptor list.
827 */
828 error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN, 0,
829 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
830 VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ,
831 BUS_DMA_ALLOCNOW,
832 &sc->vge_ldata.vge_rx_list_tag);
833 if (error) {
834 device_printf(dev, "could not allocate rx list dma tag\n");
835 return error;
836 }
837
838 /* Allocate DMA'able memory for the RX ring */
839 error = bus_dmamem_alloc(sc->vge_ldata.vge_rx_list_tag,
840 (void **)&sc->vge_ldata.vge_rx_list,
841 BUS_DMA_WAITOK | BUS_DMA_ZERO,
842 &sc->vge_ldata.vge_rx_list_map);
843 if (error) {
844 device_printf(dev, "could not allocate rx list dma memory\n");
845 return error;
846 }
847
848 /* Load the map for the RX ring. */
849 error = bus_dmamap_load(sc->vge_ldata.vge_rx_list_tag,
850 sc->vge_ldata.vge_rx_list_map,
851 sc->vge_ldata.vge_rx_list, VGE_TX_LIST_SZ,
852 vge_dma_map_addr,
853 &sc->vge_ldata.vge_rx_list_addr,
854 BUS_DMA_WAITOK);
855 if (error) {
856 device_printf(dev, "could not load rx list\n");
857 bus_dmamem_free(sc->vge_ldata.vge_rx_list_tag,
858 sc->vge_ldata.vge_rx_list,
859 sc->vge_ldata.vge_rx_list_map);
860 sc->vge_ldata.vge_rx_list = NULL;
861 return error;
862 }
863
864 /* Create DMA maps for RX buffers */
865 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
866 error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0,
867 &sc->vge_ldata.vge_rx_dmamap[i]);
868 if (error) {
869 device_printf(dev, "can't create DMA map for RX\n");
870 rx_pos = i;
871 goto map_fail;
872 }
873 }
874 return (0);
875
876 map_fail:
877 for (i = 0; i < tx_pos; ++i) {
878 error = bus_dmamap_destroy(sc->vge_ldata.vge_mtag,
879 sc->vge_ldata.vge_tx_dmamap[i]);
880 }
881 for (i = 0; i < rx_pos; ++i) {
882 error = bus_dmamap_destroy(sc->vge_ldata.vge_mtag,
883 sc->vge_ldata.vge_rx_dmamap[i]);
884 }
885 bus_dma_tag_destroy(sc->vge_ldata.vge_mtag);
886 sc->vge_ldata.vge_mtag = NULL;
887
888 return error;
889 }
890
891 static void
vge_dma_free(struct vge_softc * sc)892 vge_dma_free(struct vge_softc *sc)
893 {
894 /* Unload and free the RX DMA ring memory and map */
895 if (sc->vge_ldata.vge_rx_list_tag) {
896 bus_dmamap_unload(sc->vge_ldata.vge_rx_list_tag,
897 sc->vge_ldata.vge_rx_list_map);
898 bus_dmamem_free(sc->vge_ldata.vge_rx_list_tag,
899 sc->vge_ldata.vge_rx_list,
900 sc->vge_ldata.vge_rx_list_map);
901 }
902
903 if (sc->vge_ldata.vge_rx_list_tag)
904 bus_dma_tag_destroy(sc->vge_ldata.vge_rx_list_tag);
905
906 /* Unload and free the TX DMA ring memory and map */
907 if (sc->vge_ldata.vge_tx_list_tag) {
908 bus_dmamap_unload(sc->vge_ldata.vge_tx_list_tag,
909 sc->vge_ldata.vge_tx_list_map);
910 bus_dmamem_free(sc->vge_ldata.vge_tx_list_tag,
911 sc->vge_ldata.vge_tx_list,
912 sc->vge_ldata.vge_tx_list_map);
913 }
914
915 if (sc->vge_ldata.vge_tx_list_tag)
916 bus_dma_tag_destroy(sc->vge_ldata.vge_tx_list_tag);
917
918 /* Destroy all the RX and TX buffer maps */
919 if (sc->vge_ldata.vge_mtag) {
920 int i;
921
922 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
923 bus_dmamap_destroy(sc->vge_ldata.vge_mtag,
924 sc->vge_ldata.vge_tx_dmamap[i]);
925 }
926 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
927 bus_dmamap_destroy(sc->vge_ldata.vge_mtag,
928 sc->vge_ldata.vge_rx_dmamap[i]);
929 }
930 bus_dma_tag_destroy(sc->vge_ldata.vge_mtag);
931 }
932
933 if (sc->vge_parent_tag)
934 bus_dma_tag_destroy(sc->vge_parent_tag);
935 }
936
937 /*
938 * Attach the interface. Allocate softc structures, do ifmedia
939 * setup and ethernet/BPF attach.
940 */
941 static int
vge_attach(device_t dev)942 vge_attach(device_t dev)
943 {
944 uint8_t eaddr[ETHER_ADDR_LEN];
945 struct vge_softc *sc;
946 struct ifnet *ifp;
947 int error = 0;
948
949 sc = device_get_softc(dev);
950 ifp = &sc->arpcom.ac_if;
951
952 /* Initialize if_xname early, so if_printf() can be used */
953 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
954
955 /*
956 * Map control/status registers.
957 */
958 pci_enable_busmaster(dev);
959
960 sc->vge_res_rid = VGE_PCI_LOMEM;
961 sc->vge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
962 &sc->vge_res_rid, RF_ACTIVE);
963 if (sc->vge_res == NULL) {
964 device_printf(dev, "couldn't map ports/memory\n");
965 return ENXIO;
966 }
967
968 sc->vge_btag = rman_get_bustag(sc->vge_res);
969 sc->vge_bhandle = rman_get_bushandle(sc->vge_res);
970
971 /* Allocate interrupt */
972 sc->vge_irq_rid = 0;
973 sc->vge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->vge_irq_rid,
974 RF_SHAREABLE | RF_ACTIVE);
975 if (sc->vge_irq == NULL) {
976 device_printf(dev, "couldn't map interrupt\n");
977 error = ENXIO;
978 goto fail;
979 }
980
981 /* Reset the adapter. */
982 vge_reset(sc);
983
984 /*
985 * Get station address from the EEPROM.
986 */
987 vge_read_eeprom(sc, eaddr, VGE_EE_EADDR, 3, 0);
988
989 /* Allocate DMA related stuffs */
990 error = vge_dma_alloc(dev);
991 if (error)
992 goto fail;
993
994 /* Do MII setup */
995 error = mii_phy_probe(dev, &sc->vge_miibus, vge_ifmedia_upd,
996 vge_ifmedia_sts);
997 if (error) {
998 device_printf(dev, "MII without any phy!\n");
999 goto fail;
1000 }
1001
1002 ifp->if_softc = sc;
1003 ifp->if_mtu = ETHERMTU;
1004 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1005 ifp->if_init = vge_init;
1006 ifp->if_start = vge_start;
1007 ifp->if_watchdog = vge_watchdog;
1008 ifp->if_ioctl = vge_ioctl;
1009 #ifdef IFPOLL_ENABLE
1010 ifp->if_npoll = vge_npoll;
1011 #endif
1012 ifp->if_hwassist = VGE_CSUM_FEATURES;
1013 ifp->if_capabilities = IFCAP_VLAN_MTU |
1014 IFCAP_HWCSUM |
1015 IFCAP_VLAN_HWTAGGING;
1016 ifp->if_capenable = ifp->if_capabilities;
1017 ifq_set_maxlen(&ifp->if_snd, VGE_IFQ_MAXLEN);
1018 ifq_set_ready(&ifp->if_snd);
1019
1020 /*
1021 * Call MI attach routine.
1022 */
1023 ether_ifattach(ifp, eaddr, NULL);
1024
1025 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->vge_irq));
1026
1027 #ifdef IFPOLL_ENABLE
1028 ifpoll_compat_setup(&sc->vge_npoll, NULL, NULL, device_get_unit(dev),
1029 ifp->if_serializer);
1030 #endif
1031
1032 /* Hook interrupt last to avoid having to lock softc */
1033 error = bus_setup_intr(dev, sc->vge_irq, INTR_MPSAFE, vge_intr, sc,
1034 &sc->vge_intrhand, ifp->if_serializer);
1035 if (error) {
1036 device_printf(dev, "couldn't set up irq\n");
1037 ether_ifdetach(ifp);
1038 goto fail;
1039 }
1040
1041 return 0;
1042 fail:
1043 vge_detach(dev);
1044 return error;
1045 }
1046
1047 /*
1048 * Shutdown hardware and free up resources. This can be called any
1049 * time after the mutex has been initialized. It is called in both
1050 * the error case in attach and the normal detach case so it needs
1051 * to be careful about only freeing resources that have actually been
1052 * allocated.
1053 */
1054 static int
vge_detach(device_t dev)1055 vge_detach(device_t dev)
1056 {
1057 struct vge_softc *sc = device_get_softc(dev);
1058 struct ifnet *ifp = &sc->arpcom.ac_if;
1059
1060 /* These should only be active if attach succeeded */
1061 if (device_is_attached(dev)) {
1062 lwkt_serialize_enter(ifp->if_serializer);
1063
1064 vge_stop(sc);
1065 bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand);
1066 /*
1067 * Force off the IFF_UP flag here, in case someone
1068 * still had a BPF descriptor attached to this
1069 * interface. If they do, ether_ifattach() will cause
1070 * the BPF code to try and clear the promisc mode
1071 * flag, which will bubble down to vge_ioctl(),
1072 * which will try to call vge_init() again. This will
1073 * turn the NIC back on and restart the MII ticker,
1074 * which will panic the system when the kernel tries
1075 * to invoke the vge_tick() function that isn't there
1076 * anymore.
1077 */
1078 ifp->if_flags &= ~IFF_UP;
1079
1080 lwkt_serialize_exit(ifp->if_serializer);
1081
1082 ether_ifdetach(ifp);
1083 }
1084
1085 if (sc->vge_miibus)
1086 device_delete_child(dev, sc->vge_miibus);
1087 bus_generic_detach(dev);
1088
1089 if (sc->vge_irq) {
1090 bus_release_resource(dev, SYS_RES_IRQ, sc->vge_irq_rid,
1091 sc->vge_irq);
1092 }
1093
1094 if (sc->vge_res) {
1095 bus_release_resource(dev, SYS_RES_MEMORY, sc->vge_res_rid,
1096 sc->vge_res);
1097 }
1098
1099 vge_dma_free(sc);
1100 return (0);
1101 }
1102
1103 static int
vge_newbuf(struct vge_softc * sc,int idx,struct mbuf * m)1104 vge_newbuf(struct vge_softc *sc, int idx, struct mbuf *m)
1105 {
1106 struct vge_dmaload_arg arg;
1107 struct mbuf *n = NULL;
1108 int i, error;
1109
1110 if (m == NULL) {
1111 n = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1112 if (n == NULL)
1113 return (ENOBUFS);
1114 m = n;
1115 } else {
1116 m->m_data = m->m_ext.ext_buf;
1117 }
1118
1119
1120 #ifdef VGE_FIXUP_RX
1121 /*
1122 * This is part of an evil trick to deal with non-x86 platforms.
1123 * The VIA chip requires RX buffers to be aligned on 32-bit
1124 * boundaries, but that will hose non-x86 machines. To get around
1125 * this, we leave some empty space at the start of each buffer
1126 * and for non-x86 hosts, we copy the buffer back two bytes
1127 * to achieve word alignment. This is slightly more efficient
1128 * than allocating a new buffer, copying the contents, and
1129 * discarding the old buffer.
1130 */
1131 m->m_len = m->m_pkthdr.len = MCLBYTES - VGE_ETHER_ALIGN;
1132 m_adj(m, VGE_ETHER_ALIGN);
1133 #else
1134 m->m_len = m->m_pkthdr.len = MCLBYTES;
1135 #endif
1136
1137 arg.sc = sc;
1138 arg.vge_idx = idx;
1139 arg.vge_maxsegs = 1;
1140 arg.vge_flags = 0;
1141
1142 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag,
1143 sc->vge_ldata.vge_rx_dmamap[idx], m,
1144 vge_dma_map_rx_desc, &arg, BUS_DMA_NOWAIT);
1145 if (error || arg.vge_maxsegs != 1) {
1146 if (n != NULL)
1147 m_freem(n);
1148 return (ENOMEM);
1149 }
1150
1151 /*
1152 * Note: the manual fails to document the fact that for
1153 * proper opration, the driver needs to replentish the RX
1154 * DMA ring 4 descriptors at a time (rather than one at a
1155 * time, like most chips). We can allocate the new buffers
1156 * but we should not set the OWN bits until we're ready
1157 * to hand back 4 of them in one shot.
1158 */
1159
1160 #define VGE_RXCHUNK 4
1161 sc->vge_rx_consumed++;
1162 if (sc->vge_rx_consumed == VGE_RXCHUNK) {
1163 for (i = idx; i != idx - sc->vge_rx_consumed; i--) {
1164 sc->vge_ldata.vge_rx_list[i].vge_sts |=
1165 htole32(VGE_RDSTS_OWN);
1166 }
1167 sc->vge_rx_consumed = 0;
1168 }
1169
1170 sc->vge_ldata.vge_rx_mbuf[idx] = m;
1171
1172 bus_dmamap_sync(sc->vge_ldata.vge_mtag,
1173 sc->vge_ldata.vge_rx_dmamap[idx], BUS_DMASYNC_PREREAD);
1174
1175 return (0);
1176 }
1177
1178 static int
vge_tx_list_init(struct vge_softc * sc)1179 vge_tx_list_init(struct vge_softc *sc)
1180 {
1181 bzero ((char *)sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ);
1182 bzero ((char *)&sc->vge_ldata.vge_tx_mbuf,
1183 (VGE_TX_DESC_CNT * sizeof(struct mbuf *)));
1184
1185 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag,
1186 sc->vge_ldata.vge_tx_list_map, BUS_DMASYNC_PREWRITE);
1187 sc->vge_ldata.vge_tx_prodidx = 0;
1188 sc->vge_ldata.vge_tx_considx = 0;
1189 sc->vge_ldata.vge_tx_free = VGE_TX_DESC_CNT;
1190
1191 return (0);
1192 }
1193
1194 static int
vge_rx_list_init(struct vge_softc * sc)1195 vge_rx_list_init(struct vge_softc *sc)
1196 {
1197 int i;
1198
1199 bzero(sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ);
1200 bzero(&sc->vge_ldata.vge_rx_mbuf,
1201 VGE_RX_DESC_CNT * sizeof(struct mbuf *));
1202
1203 sc->vge_rx_consumed = 0;
1204
1205 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
1206 if (vge_newbuf(sc, i, NULL) == ENOBUFS)
1207 return (ENOBUFS);
1208 }
1209
1210 /* Flush the RX descriptors */
1211 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag,
1212 sc->vge_ldata.vge_rx_list_map,
1213 BUS_DMASYNC_PREWRITE);
1214
1215 sc->vge_ldata.vge_rx_prodidx = 0;
1216 sc->vge_rx_consumed = 0;
1217 sc->vge_head = sc->vge_tail = NULL;
1218 return (0);
1219 }
1220
1221 #ifdef VGE_FIXUP_RX
1222 static __inline void
vge_fixup_rx(struct mbuf * m)1223 vge_fixup_rx(struct mbuf *m)
1224 {
1225 uint16_t *src, *dst;
1226 int i;
1227
1228 src = mtod(m, uint16_t *);
1229 dst = src - 1;
1230
1231 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1232 *dst++ = *src++;
1233
1234 m->m_data -= ETHER_ALIGN;
1235 }
1236 #endif
1237
1238 /*
1239 * RX handler. We support the reception of jumbo frames that have
1240 * been fragmented across multiple 2K mbuf cluster buffers.
1241 */
1242 static void
vge_rxeof(struct vge_softc * sc,int count)1243 vge_rxeof(struct vge_softc *sc, int count)
1244 {
1245 struct ifnet *ifp = &sc->arpcom.ac_if;
1246 struct mbuf *m;
1247 int i, total_len, lim = 0;
1248 struct vge_rx_desc *cur_rx;
1249 uint32_t rxstat, rxctl;
1250
1251 ASSERT_SERIALIZED(ifp->if_serializer);
1252
1253 i = sc->vge_ldata.vge_rx_prodidx;
1254
1255 /* Invalidate the descriptor memory */
1256
1257 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag,
1258 sc->vge_ldata.vge_rx_list_map, BUS_DMASYNC_POSTREAD);
1259
1260 while (!VGE_OWN(&sc->vge_ldata.vge_rx_list[i])) {
1261 #ifdef IFPOLL_ENABLE
1262 if (count >= 0 && count-- == 0)
1263 break;
1264 #endif
1265
1266 cur_rx = &sc->vge_ldata.vge_rx_list[i];
1267 m = sc->vge_ldata.vge_rx_mbuf[i];
1268 total_len = VGE_RXBYTES(cur_rx);
1269 rxstat = le32toh(cur_rx->vge_sts);
1270 rxctl = le32toh(cur_rx->vge_ctl);
1271
1272 /* Invalidate the RX mbuf and unload its map */
1273 bus_dmamap_sync(sc->vge_ldata.vge_mtag,
1274 sc->vge_ldata.vge_rx_dmamap[i],
1275 BUS_DMASYNC_POSTWRITE);
1276 bus_dmamap_unload(sc->vge_ldata.vge_mtag,
1277 sc->vge_ldata.vge_rx_dmamap[i]);
1278
1279 /*
1280 * If the 'start of frame' bit is set, this indicates
1281 * either the first fragment in a multi-fragment receive,
1282 * or an intermediate fragment. Either way, we want to
1283 * accumulate the buffers.
1284 */
1285 if (rxstat & VGE_RXPKT_SOF) {
1286 m->m_len = MCLBYTES - VGE_ETHER_ALIGN;
1287 if (sc->vge_head == NULL) {
1288 sc->vge_head = sc->vge_tail = m;
1289 } else {
1290 m->m_flags &= ~M_PKTHDR;
1291 sc->vge_tail->m_next = m;
1292 sc->vge_tail = m;
1293 }
1294 vge_newbuf(sc, i, NULL);
1295 VGE_RX_DESC_INC(i);
1296 continue;
1297 }
1298
1299 /*
1300 * Bad/error frames will have the RXOK bit cleared.
1301 * However, there's one error case we want to allow:
1302 * if a VLAN tagged frame arrives and the chip can't
1303 * match it against the CAM filter, it considers this
1304 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit.
1305 * We don't want to drop the frame though: our VLAN
1306 * filtering is done in software.
1307 */
1308 if (!(rxstat & VGE_RDSTS_RXOK) && !(rxstat & VGE_RDSTS_VIDM) &&
1309 !(rxstat & VGE_RDSTS_CSUMERR)) {
1310 IFNET_STAT_INC(ifp, ierrors, 1);
1311 /*
1312 * If this is part of a multi-fragment packet,
1313 * discard all the pieces.
1314 */
1315 if (sc->vge_head != NULL) {
1316 m_freem(sc->vge_head);
1317 sc->vge_head = sc->vge_tail = NULL;
1318 }
1319 vge_newbuf(sc, i, m);
1320 VGE_RX_DESC_INC(i);
1321 continue;
1322 }
1323
1324 /*
1325 * If allocating a replacement mbuf fails,
1326 * reload the current one.
1327 */
1328 if (vge_newbuf(sc, i, NULL)) {
1329 IFNET_STAT_INC(ifp, ierrors, 1);
1330 if (sc->vge_head != NULL) {
1331 m_freem(sc->vge_head);
1332 sc->vge_head = sc->vge_tail = NULL;
1333 }
1334 vge_newbuf(sc, i, m);
1335 VGE_RX_DESC_INC(i);
1336 continue;
1337 }
1338
1339 VGE_RX_DESC_INC(i);
1340
1341 if (sc->vge_head != NULL) {
1342 m->m_len = total_len % (MCLBYTES - VGE_ETHER_ALIGN);
1343 /*
1344 * Special case: if there's 4 bytes or less
1345 * in this buffer, the mbuf can be discarded:
1346 * the last 4 bytes is the CRC, which we don't
1347 * care about anyway.
1348 */
1349 if (m->m_len <= ETHER_CRC_LEN) {
1350 sc->vge_tail->m_len -=
1351 (ETHER_CRC_LEN - m->m_len);
1352 m_freem(m);
1353 } else {
1354 m->m_len -= ETHER_CRC_LEN;
1355 m->m_flags &= ~M_PKTHDR;
1356 sc->vge_tail->m_next = m;
1357 }
1358 m = sc->vge_head;
1359 sc->vge_head = sc->vge_tail = NULL;
1360 m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1361 } else {
1362 m->m_pkthdr.len = m->m_len =
1363 (total_len - ETHER_CRC_LEN);
1364 }
1365
1366 #ifdef VGE_FIXUP_RX
1367 vge_fixup_rx(m);
1368 #endif
1369 IFNET_STAT_INC(ifp, ipackets, 1);
1370 m->m_pkthdr.rcvif = ifp;
1371
1372 /* Do RX checksumming if enabled */
1373 if (ifp->if_capenable & IFCAP_RXCSUM) {
1374 /* Check IP header checksum */
1375 if (rxctl & VGE_RDCTL_IPPKT)
1376 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1377 if (rxctl & VGE_RDCTL_IPCSUMOK)
1378 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1379
1380 /* Check TCP/UDP checksum */
1381 if (rxctl & (VGE_RDCTL_TCPPKT|VGE_RDCTL_UDPPKT) &&
1382 rxctl & VGE_RDCTL_PROTOCSUMOK) {
1383 m->m_pkthdr.csum_flags |=
1384 CSUM_DATA_VALID|CSUM_PSEUDO_HDR|
1385 CSUM_FRAG_NOT_CHECKED;
1386 m->m_pkthdr.csum_data = 0xffff;
1387 }
1388 }
1389
1390 if (rxstat & VGE_RDSTS_VTAG) {
1391 m->m_flags |= M_VLANTAG;
1392 m->m_pkthdr.ether_vlantag =
1393 ntohs((rxctl & VGE_RDCTL_VLANID));
1394 }
1395 ifp->if_input(ifp, m, NULL, -1);
1396
1397 lim++;
1398 if (lim == VGE_RX_DESC_CNT)
1399 break;
1400 }
1401
1402 /* Flush the RX DMA ring */
1403 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag,
1404 sc->vge_ldata.vge_rx_list_map,
1405 BUS_DMASYNC_PREWRITE);
1406
1407 sc->vge_ldata.vge_rx_prodidx = i;
1408 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim);
1409 }
1410
1411 static void
vge_txeof(struct vge_softc * sc)1412 vge_txeof(struct vge_softc *sc)
1413 {
1414 struct ifnet *ifp = &sc->arpcom.ac_if;
1415 uint32_t txstat;
1416 int idx;
1417
1418 idx = sc->vge_ldata.vge_tx_considx;
1419
1420 /* Invalidate the TX descriptor list */
1421
1422 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag,
1423 sc->vge_ldata.vge_tx_list_map, BUS_DMASYNC_POSTREAD);
1424
1425 while (idx != sc->vge_ldata.vge_tx_prodidx) {
1426
1427 txstat = le32toh(sc->vge_ldata.vge_tx_list[idx].vge_sts);
1428 if (txstat & VGE_TDSTS_OWN)
1429 break;
1430
1431 m_freem(sc->vge_ldata.vge_tx_mbuf[idx]);
1432 sc->vge_ldata.vge_tx_mbuf[idx] = NULL;
1433 bus_dmamap_unload(sc->vge_ldata.vge_mtag,
1434 sc->vge_ldata.vge_tx_dmamap[idx]);
1435 if (txstat & (VGE_TDSTS_EXCESSCOLL|VGE_TDSTS_COLL))
1436 IFNET_STAT_INC(ifp, collisions, 1);
1437 if (txstat & VGE_TDSTS_TXERR)
1438 IFNET_STAT_INC(ifp, oerrors, 1);
1439 else
1440 IFNET_STAT_INC(ifp, opackets, 1);
1441
1442 sc->vge_ldata.vge_tx_free++;
1443 VGE_TX_DESC_INC(idx);
1444 }
1445
1446 /* No changes made to the TX ring, so no flush needed */
1447 if (idx != sc->vge_ldata.vge_tx_considx) {
1448 sc->vge_ldata.vge_tx_considx = idx;
1449 ifq_clr_oactive(&ifp->if_snd);
1450 ifp->if_timer = 0;
1451 }
1452
1453 /*
1454 * If not all descriptors have been released reaped yet,
1455 * reload the timer so that we will eventually get another
1456 * interrupt that will cause us to re-enter this routine.
1457 * This is done in case the transmitter has gone idle.
1458 */
1459 if (sc->vge_ldata.vge_tx_free != VGE_TX_DESC_CNT)
1460 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
1461 }
1462
1463 static void
vge_tick(struct vge_softc * sc)1464 vge_tick(struct vge_softc *sc)
1465 {
1466 struct ifnet *ifp = &sc->arpcom.ac_if;
1467 struct mii_data *mii;
1468
1469 mii = device_get_softc(sc->vge_miibus);
1470
1471 mii_tick(mii);
1472 if (sc->vge_link) {
1473 if (!(mii->mii_media_status & IFM_ACTIVE))
1474 sc->vge_link = 0;
1475 } else {
1476 if (mii->mii_media_status & IFM_ACTIVE &&
1477 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1478 sc->vge_link = 1;
1479 if (!ifq_is_empty(&ifp->if_snd))
1480 if_devstart(ifp);
1481 }
1482 }
1483 }
1484
1485 #ifdef IFPOLL_ENABLE
1486
1487 static void
vge_npoll_compat(struct ifnet * ifp,void * arg __unused,int count)1488 vge_npoll_compat(struct ifnet *ifp, void *arg __unused, int count)
1489 {
1490 struct vge_softc *sc = ifp->if_softc;
1491
1492 ASSERT_SERIALIZED(ifp->if_serializer);
1493
1494 vge_rxeof(sc, count);
1495 vge_txeof(sc);
1496
1497 if (!ifq_is_empty(&ifp->if_snd))
1498 if_devstart(ifp);
1499
1500 /* XXX copy & paste from vge_intr */
1501 if (sc->vge_npoll.ifpc_stcount-- == 0) {
1502 uint32_t status;
1503
1504 sc->vge_npoll.ifpc_stcount = sc->vge_npoll.ifpc_stfrac;
1505
1506 status = CSR_READ_4(sc, VGE_ISR);
1507 if (status == 0xffffffff)
1508 return;
1509
1510 if (status)
1511 CSR_WRITE_4(sc, VGE_ISR, status);
1512
1513 if (status & (VGE_ISR_TXDMA_STALL |
1514 VGE_ISR_RXDMA_STALL))
1515 vge_init(sc);
1516
1517 if (status & (VGE_ISR_RXOFLOW | VGE_ISR_RXNODESC)) {
1518 IFNET_STAT_INC(ifp, ierrors, 1);
1519 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1520 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1521 }
1522 }
1523 }
1524
1525 static void
vge_npoll(struct ifnet * ifp,struct ifpoll_info * info)1526 vge_npoll(struct ifnet *ifp, struct ifpoll_info *info)
1527 {
1528 struct vge_softc *sc = ifp->if_softc;
1529
1530 ASSERT_SERIALIZED(ifp->if_serializer);
1531
1532 if (info != NULL) {
1533 int cpuid = sc->vge_npoll.ifpc_cpuid;
1534
1535 info->ifpi_rx[cpuid].poll_func = vge_npoll_compat;
1536 info->ifpi_rx[cpuid].arg = NULL;
1537 info->ifpi_rx[cpuid].serializer = ifp->if_serializer;
1538
1539 if (ifp->if_flags & IFF_RUNNING)
1540 vge_disable_intr(sc);
1541 ifq_set_cpuid(&ifp->if_snd, cpuid);
1542 } else {
1543 if (ifp->if_flags & IFF_RUNNING)
1544 vge_enable_intr(sc, 0xffffffff);
1545 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->vge_irq));
1546 }
1547 }
1548
1549 #endif /* IFPOLL_ENABLE */
1550
1551 static void
vge_intr(void * arg)1552 vge_intr(void *arg)
1553 {
1554 struct vge_softc *sc = arg;
1555 struct ifnet *ifp = &sc->arpcom.ac_if;
1556 uint32_t status;
1557
1558 if (sc->suspended || !(ifp->if_flags & IFF_UP))
1559 return;
1560
1561 /* Disable interrupts */
1562 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1563
1564 for (;;) {
1565 status = CSR_READ_4(sc, VGE_ISR);
1566 /* If the card has gone away the read returns 0xffff. */
1567 if (status == 0xFFFFFFFF)
1568 break;
1569
1570 if (status)
1571 CSR_WRITE_4(sc, VGE_ISR, status);
1572
1573 if ((status & VGE_INTRS) == 0)
1574 break;
1575
1576 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO))
1577 vge_rxeof(sc, -1);
1578
1579 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1580 vge_rxeof(sc, -1);
1581 IFNET_STAT_INC(ifp, ierrors, 1);
1582 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1583 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1584 }
1585
1586 if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0))
1587 vge_txeof(sc);
1588
1589 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL))
1590 vge_init(sc);
1591
1592 if (status & VGE_ISR_LINKSTS)
1593 vge_tick(sc);
1594 }
1595
1596 /* Re-enable interrupts */
1597 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1598
1599 if (!ifq_is_empty(&ifp->if_snd))
1600 if_devstart(ifp);
1601 }
1602
1603 static int
vge_encap(struct vge_softc * sc,struct mbuf * m_head,int idx)1604 vge_encap(struct vge_softc *sc, struct mbuf *m_head, int idx)
1605 {
1606 struct vge_dmaload_arg arg;
1607 bus_dmamap_t map;
1608 int error;
1609
1610 arg.vge_flags = 0;
1611
1612 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
1613 arg.vge_flags |= VGE_TDCTL_IPCSUM;
1614 if (m_head->m_pkthdr.csum_flags & CSUM_TCP)
1615 arg.vge_flags |= VGE_TDCTL_TCPCSUM;
1616 if (m_head->m_pkthdr.csum_flags & CSUM_UDP)
1617 arg.vge_flags |= VGE_TDCTL_UDPCSUM;
1618
1619 arg.sc = sc;
1620 arg.vge_idx = idx;
1621 arg.vge_m0 = m_head;
1622 arg.vge_maxsegs = VGE_TX_FRAGS;
1623
1624 map = sc->vge_ldata.vge_tx_dmamap[idx];
1625 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, map, m_head,
1626 vge_dma_map_tx_desc, &arg, BUS_DMA_NOWAIT);
1627 if (error && error != EFBIG) {
1628 if_printf(&sc->arpcom.ac_if, "can't map mbuf (error %d)\n",
1629 error);
1630 goto fail;
1631 }
1632
1633 /* Too many segments to map, coalesce into a single mbuf */
1634 if (error || arg.vge_maxsegs == 0) {
1635 struct mbuf *m_new;
1636
1637 m_new = m_defrag(m_head, M_NOWAIT);
1638 if (m_new == NULL) {
1639 error = ENOBUFS;
1640 goto fail;
1641 } else {
1642 m_head = m_new;
1643 }
1644
1645 arg.sc = sc;
1646 arg.vge_m0 = m_head;
1647 arg.vge_idx = idx;
1648 arg.vge_maxsegs = 1;
1649
1650 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, map,
1651 m_head, vge_dma_map_tx_desc, &arg,
1652 BUS_DMA_NOWAIT);
1653 if (error) {
1654 if_printf(&sc->arpcom.ac_if,
1655 "can't map mbuf (error %d)\n", error);
1656 goto fail;
1657 }
1658 }
1659
1660 sc->vge_ldata.vge_tx_mbuf[idx] = m_head;
1661 sc->vge_ldata.vge_tx_free--;
1662
1663 /*
1664 * Set up hardware VLAN tagging.
1665 */
1666 if (m_head->m_flags & M_VLANTAG) {
1667 sc->vge_ldata.vge_tx_list[idx].vge_ctl |=
1668 htole32(htons(m_head->m_pkthdr.ether_vlantag) |
1669 VGE_TDCTL_VTAG);
1670 }
1671
1672 sc->vge_ldata.vge_tx_list[idx].vge_sts |= htole32(VGE_TDSTS_OWN);
1673 return (0);
1674
1675 fail:
1676 m_freem(m_head);
1677 return error;
1678 }
1679
1680 /*
1681 * Main transmit routine.
1682 */
1683
1684 static void
vge_start(struct ifnet * ifp,struct ifaltq_subque * ifsq)1685 vge_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
1686 {
1687 struct vge_softc *sc = ifp->if_softc;
1688 struct mbuf *m_head = NULL;
1689 int idx, pidx = 0;
1690
1691 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
1692 ASSERT_SERIALIZED(ifp->if_serializer);
1693
1694 if (!sc->vge_link) {
1695 ifq_purge(&ifp->if_snd);
1696 return;
1697 }
1698
1699 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd))
1700 return;
1701
1702 idx = sc->vge_ldata.vge_tx_prodidx;
1703
1704 pidx = idx - 1;
1705 if (pidx < 0)
1706 pidx = VGE_TX_DESC_CNT - 1;
1707
1708 while (sc->vge_ldata.vge_tx_mbuf[idx] == NULL) {
1709 if (sc->vge_ldata.vge_tx_free <= 2) {
1710 ifq_set_oactive(&ifp->if_snd);
1711 break;
1712 }
1713
1714 m_head = ifq_dequeue(&ifp->if_snd);
1715 if (m_head == NULL)
1716 break;
1717
1718 if (vge_encap(sc, m_head, idx)) {
1719 /* If vge_encap() failed, it will free m_head for us */
1720 ifq_set_oactive(&ifp->if_snd);
1721 break;
1722 }
1723
1724 sc->vge_ldata.vge_tx_list[pidx].vge_frag[0].vge_buflen |=
1725 htole16(VGE_TXDESC_Q);
1726
1727 pidx = idx;
1728 VGE_TX_DESC_INC(idx);
1729
1730 /*
1731 * If there's a BPF listener, bounce a copy of this frame
1732 * to him.
1733 */
1734 ETHER_BPF_MTAP(ifp, m_head);
1735 }
1736
1737 if (idx == sc->vge_ldata.vge_tx_prodidx)
1738 return;
1739
1740 /* Flush the TX descriptors */
1741 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag,
1742 sc->vge_ldata.vge_tx_list_map,
1743 BUS_DMASYNC_PREWRITE);
1744
1745 /* Issue a transmit command. */
1746 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0);
1747
1748 sc->vge_ldata.vge_tx_prodidx = idx;
1749
1750 /*
1751 * Use the countdown timer for interrupt moderation.
1752 * 'TX done' interrupts are disabled. Instead, we reset the
1753 * countdown timer, which will begin counting until it hits
1754 * the value in the SSTIMER register, and then trigger an
1755 * interrupt. Each time we set the TIMER0_ENABLE bit, the
1756 * the timer count is reloaded. Only when the transmitter
1757 * is idle will the timer hit 0 and an interrupt fire.
1758 */
1759 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
1760
1761 /*
1762 * Set a timeout in case the chip goes out to lunch.
1763 */
1764 ifp->if_timer = 5;
1765 }
1766
1767 static void
vge_init(void * xsc)1768 vge_init(void *xsc)
1769 {
1770 struct vge_softc *sc = xsc;
1771 struct ifnet *ifp = &sc->arpcom.ac_if;
1772 struct mii_data *mii;
1773 int i;
1774
1775 ASSERT_SERIALIZED(ifp->if_serializer);
1776
1777 mii = device_get_softc(sc->vge_miibus);
1778
1779 /*
1780 * Cancel pending I/O and free all RX/TX buffers.
1781 */
1782 vge_stop(sc);
1783 vge_reset(sc);
1784
1785 /*
1786 * Initialize the RX and TX descriptors and mbufs.
1787 */
1788 vge_rx_list_init(sc);
1789 vge_tx_list_init(sc);
1790
1791 /* Set our station address */
1792 for (i = 0; i < ETHER_ADDR_LEN; i++)
1793 CSR_WRITE_1(sc, VGE_PAR0 + i, IF_LLADDR(ifp)[i]);
1794
1795 /*
1796 * Set receive FIFO threshold. Also allow transmission and
1797 * reception of VLAN tagged frames.
1798 */
1799 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT);
1800 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2);
1801
1802 /* Set DMA burst length */
1803 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN);
1804 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128);
1805
1806 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK);
1807
1808 /* Set collision backoff algorithm */
1809 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM|
1810 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT);
1811 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET);
1812
1813 /* Disable LPSEL field in priority resolution */
1814 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS);
1815
1816 /*
1817 * Load the addresses of the DMA queues into the chip.
1818 * Note that we only use one transmit queue.
1819 */
1820 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0,
1821 VGE_ADDR_LO(sc->vge_ldata.vge_tx_list_addr));
1822 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1);
1823
1824 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO,
1825 VGE_ADDR_LO(sc->vge_ldata.vge_rx_list_addr));
1826 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1);
1827 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT);
1828
1829 /* Enable and wake up the RX descriptor queue */
1830 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1831 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1832
1833 /* Enable the TX descriptor queue */
1834 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0);
1835
1836 /* Set up the receive filter -- allow large frames for VLANs. */
1837 CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT);
1838
1839 /* If we want promiscuous mode, set the allframes bit. */
1840 if (ifp->if_flags & IFF_PROMISC)
1841 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC);
1842
1843 /* Set capture broadcast bit to capture broadcast frames. */
1844 if (ifp->if_flags & IFF_BROADCAST)
1845 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST);
1846
1847 /* Set multicast bit to capture multicast frames. */
1848 if (ifp->if_flags & IFF_MULTICAST)
1849 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST);
1850
1851 /* Init the cam filter. */
1852 vge_cam_clear(sc);
1853
1854 /* Init the multicast filter. */
1855 vge_setmulti(sc);
1856
1857 /* Enable flow control */
1858
1859 CSR_WRITE_1(sc, VGE_CRS2, 0x8B);
1860
1861 /* Enable jumbo frame reception (if desired) */
1862
1863 /* Start the MAC. */
1864 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP);
1865 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL);
1866 CSR_WRITE_1(sc, VGE_CRS0,
1867 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START);
1868
1869 /*
1870 * Configure one-shot timer for microsecond
1871 * resulution and load it for 500 usecs.
1872 */
1873 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES);
1874 CSR_WRITE_2(sc, VGE_SSTIMER, 400);
1875
1876 /*
1877 * Configure interrupt moderation for receive. Enable
1878 * the holdoff counter and load it, and set the RX
1879 * suppression count to the number of descriptors we
1880 * want to allow before triggering an interrupt.
1881 * The holdoff timer is in units of 20 usecs.
1882 */
1883
1884 #ifdef notyet
1885 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE);
1886 /* Select the interrupt holdoff timer page. */
1887 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
1888 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF);
1889 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */
1890
1891 /* Enable use of the holdoff timer. */
1892 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF);
1893 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD);
1894
1895 /* Select the RX suppression threshold page. */
1896 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
1897 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR);
1898 CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */
1899
1900 /* Restore the page select bits. */
1901 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
1902 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
1903 #endif
1904
1905 #ifdef IFPOLL_ENABLE
1906 /* Disable intr if polling(4) is enabled */
1907 if (ifp->if_flags & IFF_NPOLLING)
1908 vge_disable_intr(sc);
1909 else
1910 #endif
1911 vge_enable_intr(sc, 0);
1912
1913 mii_mediachg(mii);
1914
1915 ifp->if_flags |= IFF_RUNNING;
1916 ifq_clr_oactive(&ifp->if_snd);
1917
1918 sc->vge_if_flags = 0;
1919 sc->vge_link = 0;
1920 }
1921
1922 /*
1923 * Set media options.
1924 */
1925 static int
vge_ifmedia_upd(struct ifnet * ifp)1926 vge_ifmedia_upd(struct ifnet *ifp)
1927 {
1928 struct vge_softc *sc = ifp->if_softc;
1929 struct mii_data *mii = device_get_softc(sc->vge_miibus);
1930
1931 mii_mediachg(mii);
1932
1933 return (0);
1934 }
1935
1936 /*
1937 * Report current media status.
1938 */
1939 static void
vge_ifmedia_sts(struct ifnet * ifp,struct ifmediareq * ifmr)1940 vge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1941 {
1942 struct vge_softc *sc = ifp->if_softc;
1943 struct mii_data *mii = device_get_softc(sc->vge_miibus);
1944
1945 mii_pollstat(mii);
1946 ifmr->ifm_active = mii->mii_media_active;
1947 ifmr->ifm_status = mii->mii_media_status;
1948 }
1949
1950 static void
vge_miibus_statchg(device_t dev)1951 vge_miibus_statchg(device_t dev)
1952 {
1953 struct vge_softc *sc;
1954 struct mii_data *mii;
1955 struct ifmedia_entry *ife;
1956
1957 sc = device_get_softc(dev);
1958 mii = device_get_softc(sc->vge_miibus);
1959 ife = mii->mii_media.ifm_cur;
1960
1961 /*
1962 * If the user manually selects a media mode, we need to turn
1963 * on the forced MAC mode bit in the DIAGCTL register. If the
1964 * user happens to choose a full duplex mode, we also need to
1965 * set the 'force full duplex' bit. This applies only to
1966 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC
1967 * mode is disabled, and in 1000baseT mode, full duplex is
1968 * always implied, so we turn on the forced mode bit but leave
1969 * the FDX bit cleared.
1970 */
1971
1972 switch (IFM_SUBTYPE(ife->ifm_media)) {
1973 case IFM_AUTO:
1974 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
1975 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1976 break;
1977 case IFM_1000_T:
1978 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
1979 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1980 break;
1981 case IFM_100_TX:
1982 case IFM_10_T:
1983 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
1984 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX)
1985 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1986 else
1987 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1988 break;
1989 default:
1990 device_printf(dev, "unknown media type: %x\n",
1991 IFM_SUBTYPE(ife->ifm_media));
1992 break;
1993 }
1994 }
1995
1996 static int
vge_ioctl(struct ifnet * ifp,u_long command,caddr_t data,struct ucred * cr)1997 vge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
1998 {
1999 struct vge_softc *sc = ifp->if_softc;
2000 struct ifreq *ifr = (struct ifreq *)data;
2001 struct mii_data *mii;
2002 int error = 0;
2003
2004 switch (command) {
2005 case SIOCSIFMTU:
2006 if (ifr->ifr_mtu > VGE_JUMBO_MTU)
2007 error = EINVAL;
2008 ifp->if_mtu = ifr->ifr_mtu;
2009 break;
2010 case SIOCSIFFLAGS:
2011 if (ifp->if_flags & IFF_UP) {
2012 if ((ifp->if_flags & IFF_RUNNING) &&
2013 (ifp->if_flags & IFF_PROMISC) &&
2014 !(sc->vge_if_flags & IFF_PROMISC)) {
2015 CSR_SETBIT_1(sc, VGE_RXCTL,
2016 VGE_RXCTL_RX_PROMISC);
2017 vge_setmulti(sc);
2018 } else if ((ifp->if_flags & IFF_RUNNING) &&
2019 !(ifp->if_flags & IFF_PROMISC) &&
2020 (sc->vge_if_flags & IFF_PROMISC)) {
2021 CSR_CLRBIT_1(sc, VGE_RXCTL,
2022 VGE_RXCTL_RX_PROMISC);
2023 vge_setmulti(sc);
2024 } else {
2025 vge_init(sc);
2026 }
2027 } else {
2028 if (ifp->if_flags & IFF_RUNNING)
2029 vge_stop(sc);
2030 }
2031 sc->vge_if_flags = ifp->if_flags;
2032 break;
2033 case SIOCADDMULTI:
2034 case SIOCDELMULTI:
2035 vge_setmulti(sc);
2036 break;
2037 case SIOCGIFMEDIA:
2038 case SIOCSIFMEDIA:
2039 mii = device_get_softc(sc->vge_miibus);
2040 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2041 break;
2042 case SIOCSIFCAP:
2043 {
2044 uint32_t mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2045
2046 if (mask & IFCAP_HWCSUM) {
2047 ifp->if_capenable |= ifr->ifr_reqcap & (IFCAP_HWCSUM);
2048 if (ifp->if_capenable & IFCAP_TXCSUM)
2049 ifp->if_hwassist = VGE_CSUM_FEATURES;
2050 else
2051 ifp->if_hwassist = 0;
2052 if (ifp->if_flags & IFF_RUNNING)
2053 vge_init(sc);
2054 }
2055 }
2056 break;
2057 default:
2058 error = ether_ioctl(ifp, command, data);
2059 break;
2060 }
2061 return (error);
2062 }
2063
2064 static void
vge_watchdog(struct ifnet * ifp)2065 vge_watchdog(struct ifnet *ifp)
2066 {
2067 struct vge_softc *sc = ifp->if_softc;
2068
2069 if_printf(ifp, "watchdog timeout\n");
2070 IFNET_STAT_INC(ifp, oerrors, 1);
2071
2072 vge_txeof(sc);
2073 vge_rxeof(sc, -1);
2074
2075 vge_init(sc);
2076 }
2077
2078 /*
2079 * Stop the adapter and free any mbufs allocated to the
2080 * RX and TX lists.
2081 */
2082 static void
vge_stop(struct vge_softc * sc)2083 vge_stop(struct vge_softc *sc)
2084 {
2085 struct ifnet *ifp = &sc->arpcom.ac_if;
2086 int i;
2087
2088 ASSERT_SERIALIZED(ifp->if_serializer);
2089
2090 ifp->if_timer = 0;
2091
2092 ifp->if_flags &= ~IFF_RUNNING;
2093 ifq_clr_oactive(&ifp->if_snd);
2094
2095 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2096 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP);
2097 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2098 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF);
2099 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF);
2100 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0);
2101
2102 if (sc->vge_head != NULL) {
2103 m_freem(sc->vge_head);
2104 sc->vge_head = sc->vge_tail = NULL;
2105 }
2106
2107 /* Free the TX list buffers. */
2108 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
2109 if (sc->vge_ldata.vge_tx_mbuf[i] != NULL) {
2110 bus_dmamap_unload(sc->vge_ldata.vge_mtag,
2111 sc->vge_ldata.vge_tx_dmamap[i]);
2112 m_freem(sc->vge_ldata.vge_tx_mbuf[i]);
2113 sc->vge_ldata.vge_tx_mbuf[i] = NULL;
2114 }
2115 }
2116
2117 /* Free the RX list buffers. */
2118 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
2119 if (sc->vge_ldata.vge_rx_mbuf[i] != NULL) {
2120 bus_dmamap_unload(sc->vge_ldata.vge_mtag,
2121 sc->vge_ldata.vge_rx_dmamap[i]);
2122 m_freem(sc->vge_ldata.vge_rx_mbuf[i]);
2123 sc->vge_ldata.vge_rx_mbuf[i] = NULL;
2124 }
2125 }
2126 }
2127
2128 /*
2129 * Device suspend routine. Stop the interface and save some PCI
2130 * settings in case the BIOS doesn't restore them properly on
2131 * resume.
2132 */
2133 static int
vge_suspend(device_t dev)2134 vge_suspend(device_t dev)
2135 {
2136 struct vge_softc *sc = device_get_softc(dev);
2137 struct ifnet *ifp = &sc->arpcom.ac_if;
2138
2139 lwkt_serialize_enter(ifp->if_serializer);
2140 vge_stop(sc);
2141 sc->suspended = 1;
2142 lwkt_serialize_exit(ifp->if_serializer);
2143
2144 return (0);
2145 }
2146
2147 /*
2148 * Device resume routine. Restore some PCI settings in case the BIOS
2149 * doesn't, re-enable busmastering, and restart the interface if
2150 * appropriate.
2151 */
2152 static int
vge_resume(device_t dev)2153 vge_resume(device_t dev)
2154 {
2155 struct vge_softc *sc = device_get_softc(dev);
2156 struct ifnet *ifp = &sc->arpcom.ac_if;
2157
2158 /* reenable busmastering */
2159 pci_enable_busmaster(dev);
2160 pci_enable_io(dev, SYS_RES_MEMORY);
2161
2162 lwkt_serialize_enter(ifp->if_serializer);
2163 /* reinitialize interface if necessary */
2164 if (ifp->if_flags & IFF_UP)
2165 vge_init(sc);
2166
2167 sc->suspended = 0;
2168 lwkt_serialize_exit(ifp->if_serializer);
2169
2170 return (0);
2171 }
2172
2173 /*
2174 * Stop all chip I/O so that the kernel's probe routines don't
2175 * get confused by errant DMAs when rebooting.
2176 */
2177 static void
vge_shutdown(device_t dev)2178 vge_shutdown(device_t dev)
2179 {
2180 struct vge_softc *sc = device_get_softc(dev);
2181 struct ifnet *ifp = &sc->arpcom.ac_if;
2182
2183 lwkt_serialize_enter(ifp->if_serializer);
2184 vge_stop(sc);
2185 lwkt_serialize_exit(ifp->if_serializer);
2186 }
2187
2188 static void
vge_enable_intr(struct vge_softc * sc,uint32_t isr)2189 vge_enable_intr(struct vge_softc *sc, uint32_t isr)
2190 {
2191 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
2192 CSR_WRITE_4(sc, VGE_ISR, isr);
2193 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
2194 }
2195
2196 #ifdef IFPOLL_ENABLE
2197
2198 static void
vge_disable_intr(struct vge_softc * sc)2199 vge_disable_intr(struct vge_softc *sc)
2200 {
2201 CSR_WRITE_4(sc, VGE_IMR, 0);
2202 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2203 sc->vge_npoll.ifpc_stcount = 0;
2204 }
2205
2206 #endif /* IFPOLL_ENABLE */
2207