xref: /dragonfly/sys/dev/netif/sis/if_sis.c (revision 95893fe4)
1 /*
2  * Copyright (c) 1997, 1998, 1999
3  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Bill Paul.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  *
32  * $FreeBSD: src/sys/pci/if_sis.c,v 1.13.4.24 2003/03/05 18:42:33 njl Exp $
33  * $DragonFly: src/sys/dev/netif/sis/if_sis.c,v 1.38 2008/08/17 04:32:34 sephe Exp $
34  */
35 
36 /*
37  * SiS 900/SiS 7016 fast ethernet PCI NIC driver. Datasheets are
38  * available from http://www.sis.com.tw.
39  *
40  * This driver also supports the NatSemi DP83815. Datasheets are
41  * available from http://www.national.com.
42  *
43  * Written by Bill Paul <wpaul@ee.columbia.edu>
44  * Electrical Engineering Department
45  * Columbia University, New York City
46  */
47 
48 /*
49  * The SiS 900 is a fairly simple chip. It uses bus master DMA with
50  * simple TX and RX descriptors of 3 longwords in size. The receiver
51  * has a single perfect filter entry for the station address and a
52  * 128-bit multicast hash table. The SiS 900 has a built-in MII-based
53  * transceiver while the 7016 requires an external transceiver chip.
54  * Both chips offer the standard bit-bang MII interface as well as
55  * an enchanced PHY interface which simplifies accessing MII registers.
56  *
57  * The only downside to this chipset is that RX descriptors must be
58  * longword aligned.
59  */
60 
61 #include "opt_polling.h"
62 
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/sockio.h>
66 #include <sys/mbuf.h>
67 #include <sys/malloc.h>
68 #include <sys/kernel.h>
69 #include <sys/socket.h>
70 #include <sys/sysctl.h>
71 #include <sys/serialize.h>
72 #include <sys/thread2.h>
73 #include <sys/bus.h>
74 #include <sys/rman.h>
75 #include <sys/interrupt.h>
76 
77 #include <net/if.h>
78 #include <net/ifq_var.h>
79 #include <net/if_arp.h>
80 #include <net/ethernet.h>
81 #include <net/if_dl.h>
82 #include <net/if_media.h>
83 #include <net/if_types.h>
84 #include <net/vlan/if_vlan_var.h>
85 
86 #include <net/bpf.h>
87 
88 #include <dev/netif/mii_layer/mii.h>
89 #include <dev/netif/mii_layer/miivar.h>
90 
91 #include <bus/pci/pcidevs.h>
92 #include <bus/pci/pcireg.h>
93 #include <bus/pci/pcivar.h>
94 
95 #define SIS_USEIOSPACE
96 
97 #include "if_sisreg.h"
98 
99 /* "controller miibus0" required.  See GENERIC if you get errors here. */
100 #include "miibus_if.h"
101 
102 /*
103  * Various supported device vendors/types and their names.
104  */
105 static struct sis_type sis_devs[] = {
106 	{ PCI_VENDOR_SIS, PCI_PRODUCT_SIS_900, "SiS 900 10/100BaseTX" },
107 	{ PCI_VENDOR_SIS, PCI_PRODUCT_SIS_7016, "SiS 7016 10/100BaseTX" },
108 	{ PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815, "NatSemi DP8381[56] 10/100BaseTX" },
109 	{ 0, 0, NULL }
110 };
111 
112 static int	sis_probe(device_t);
113 static int	sis_attach(device_t);
114 static int	sis_detach(device_t);
115 
116 static int	sis_newbuf(struct sis_softc *, struct sis_desc *,
117 			   struct mbuf *);
118 static int	sis_encap(struct sis_softc *, struct mbuf *, uint32_t *);
119 static void	sis_rxeof(struct sis_softc *);
120 static void	sis_rxeoc(struct sis_softc *);
121 static void	sis_txeof(struct sis_softc *);
122 static void	sis_intr(void *);
123 static void	sis_tick(void *);
124 static void	sis_start(struct ifnet *);
125 static int	sis_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
126 static void	sis_init(void *);
127 static void	sis_stop(struct sis_softc *);
128 static void	sis_watchdog(struct ifnet *);
129 static void	sis_shutdown(device_t);
130 static int	sis_ifmedia_upd(struct ifnet *);
131 static void	sis_ifmedia_sts(struct ifnet *, struct ifmediareq *);
132 
133 static uint16_t	sis_reverse(uint16_t);
134 static void	sis_delay(struct sis_softc *);
135 static void	sis_eeprom_idle(struct sis_softc *);
136 static void	sis_eeprom_putbyte(struct sis_softc *, int);
137 static void	sis_eeprom_getword(struct sis_softc *, int, uint16_t *);
138 static void	sis_read_eeprom(struct sis_softc *, caddr_t, int, int, int);
139 #ifdef __i386__
140 static void	sis_read_cmos(struct sis_softc *, device_t, caddr_t, int, int);
141 static void	sis_read_mac(struct sis_softc *, device_t, caddr_t);
142 static device_t	sis_find_bridge(device_t);
143 #endif
144 
145 static void	sis_mii_sync(struct sis_softc *);
146 static void	sis_mii_send(struct sis_softc *, uint32_t, int);
147 static int	sis_mii_readreg(struct sis_softc *, struct sis_mii_frame *);
148 static int	sis_mii_writereg(struct sis_softc *, struct sis_mii_frame *);
149 static int	sis_miibus_readreg(device_t, int, int);
150 static int	sis_miibus_writereg(device_t, int, int, int);
151 static void	sis_miibus_statchg(device_t);
152 
153 static void	sis_setmulti_sis(struct sis_softc *);
154 static void	sis_setmulti_ns(struct sis_softc *);
155 static uint32_t	sis_mchash(struct sis_softc *, const uint8_t *);
156 static void	sis_reset(struct sis_softc *);
157 static int	sis_list_rx_init(struct sis_softc *);
158 static int	sis_list_tx_init(struct sis_softc *);
159 
160 static void	sis_dma_map_desc_ptr(void *, bus_dma_segment_t *, int, int);
161 static void	sis_dma_map_desc_next(void *, bus_dma_segment_t *, int, int);
162 static void	sis_dma_map_ring(void *, bus_dma_segment_t *, int, int);
163 #ifdef DEVICE_POLLING
164 static poll_handler_t sis_poll;
165 #endif
166 #ifdef SIS_USEIOSPACE
167 #define SIS_RES			SYS_RES_IOPORT
168 #define SIS_RID			SIS_PCI_LOIO
169 #else
170 #define SIS_RES			SYS_RES_MEMORY
171 #define SIS_RID			SIS_PCI_LOMEM
172 #endif
173 
174 static device_method_t sis_methods[] = {
175 	/* Device interface */
176 	DEVMETHOD(device_probe,		sis_probe),
177 	DEVMETHOD(device_attach,	sis_attach),
178 	DEVMETHOD(device_detach,	sis_detach),
179 	DEVMETHOD(device_shutdown,	sis_shutdown),
180 
181 	/* bus interface */
182 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
183 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
184 
185 	/* MII interface */
186 	DEVMETHOD(miibus_readreg,	sis_miibus_readreg),
187 	DEVMETHOD(miibus_writereg,	sis_miibus_writereg),
188 	DEVMETHOD(miibus_statchg,	sis_miibus_statchg),
189 
190 	{ 0, 0 }
191 };
192 
193 static driver_t sis_driver = {
194 	"sis",
195 	sis_methods,
196 	sizeof(struct sis_softc)
197 };
198 
199 static devclass_t sis_devclass;
200 
201 DECLARE_DUMMY_MODULE(if_sis);
202 DRIVER_MODULE(if_sis, pci, sis_driver, sis_devclass, 0, 0);
203 DRIVER_MODULE(miibus, sis, miibus_driver, miibus_devclass, 0, 0);
204 
205 #define SIS_SETBIT(sc, reg, x)				\
206 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x))
207 
208 #define SIS_CLRBIT(sc, reg, x)				\
209 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x))
210 
211 #define SIO_SET(x)					\
212 	CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) | x)
213 
214 #define SIO_CLR(x)					\
215 	CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) & ~x)
216 
217 static void
218 sis_dma_map_desc_next(void *arg, bus_dma_segment_t *segs, int nseg, int error)
219 {
220 	struct sis_desc	*r;
221 
222 	r = arg;
223 	r->sis_next = segs->ds_addr;
224 }
225 
226 static void
227 sis_dma_map_desc_ptr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
228 {
229 	struct sis_desc	*r;
230 
231 	r = arg;
232 	r->sis_ptr = segs->ds_addr;
233 }
234 
235 static void
236 sis_dma_map_ring(void *arg, bus_dma_segment_t *segs, int nseg, int error)
237 {
238 	uint32_t *p;
239 
240 	p = arg;
241 	*p = segs->ds_addr;
242 }
243 
244 /*
245  * Routine to reverse the bits in a word. Stolen almost
246  * verbatim from /usr/games/fortune.
247  */
248 static uint16_t
249 sis_reverse(uint16_t n)
250 {
251 	n = ((n >>  1) & 0x5555) | ((n <<  1) & 0xaaaa);
252 	n = ((n >>  2) & 0x3333) | ((n <<  2) & 0xcccc);
253 	n = ((n >>  4) & 0x0f0f) | ((n <<  4) & 0xf0f0);
254 	n = ((n >>  8) & 0x00ff) | ((n <<  8) & 0xff00);
255 
256 	return(n);
257 }
258 
259 static void
260 sis_delay(struct sis_softc *sc)
261 {
262 	int idx;
263 
264 	for (idx = (300 / 33) + 1; idx > 0; idx--)
265 		CSR_READ_4(sc, SIS_CSR);
266 }
267 
268 static void
269 sis_eeprom_idle(struct sis_softc *sc)
270 {
271 	int i;
272 
273 	SIO_SET(SIS_EECTL_CSEL);
274 	sis_delay(sc);
275 	SIO_SET(SIS_EECTL_CLK);
276 	sis_delay(sc);
277 
278 	for (i = 0; i < 25; i++) {
279 		SIO_CLR(SIS_EECTL_CLK);
280 		sis_delay(sc);
281 		SIO_SET(SIS_EECTL_CLK);
282 		sis_delay(sc);
283 	}
284 
285 	SIO_CLR(SIS_EECTL_CLK);
286 	sis_delay(sc);
287 	SIO_CLR(SIS_EECTL_CSEL);
288 	sis_delay(sc);
289 	CSR_WRITE_4(sc, SIS_EECTL, 0x00000000);
290 }
291 
292 /*
293  * Send a read command and address to the EEPROM, check for ACK.
294  */
295 static void
296 sis_eeprom_putbyte(struct sis_softc *sc, int addr)
297 {
298 	int d, i;
299 
300 	d = addr | SIS_EECMD_READ;
301 
302 	/*
303 	 * Feed in each bit and stobe the clock.
304 	 */
305 	for (i = 0x400; i; i >>= 1) {
306 		if (d & i)
307 			SIO_SET(SIS_EECTL_DIN);
308 		else
309 			SIO_CLR(SIS_EECTL_DIN);
310 		sis_delay(sc);
311 		SIO_SET(SIS_EECTL_CLK);
312 		sis_delay(sc);
313 		SIO_CLR(SIS_EECTL_CLK);
314 		sis_delay(sc);
315 	}
316 }
317 
318 /*
319  * Read a word of data stored in the EEPROM at address 'addr.'
320  */
321 static void
322 sis_eeprom_getword(struct sis_softc *sc, int addr, uint16_t *dest)
323 {
324 	int i;
325 	uint16_t word = 0;
326 
327 	/* Force EEPROM to idle state. */
328 	sis_eeprom_idle(sc);
329 
330 	/* Enter EEPROM access mode. */
331 	sis_delay(sc);
332 	SIO_CLR(SIS_EECTL_CLK);
333 	sis_delay(sc);
334 	SIO_SET(SIS_EECTL_CSEL);
335 	sis_delay(sc);
336 
337 	/*
338 	 * Send address of word we want to read.
339 	 */
340 	sis_eeprom_putbyte(sc, addr);
341 
342 	/*
343 	 * Start reading bits from EEPROM.
344 	 */
345 	for (i = 0x8000; i; i >>= 1) {
346 		SIO_SET(SIS_EECTL_CLK);
347 		sis_delay(sc);
348 		if (CSR_READ_4(sc, SIS_EECTL) & SIS_EECTL_DOUT)
349 			word |= i;
350 		sis_delay(sc);
351 		SIO_CLR(SIS_EECTL_CLK);
352 		sis_delay(sc);
353 	}
354 
355 	/* Turn off EEPROM access mode. */
356 	sis_eeprom_idle(sc);
357 
358 	*dest = word;
359 }
360 
361 /*
362  * Read a sequence of words from the EEPROM.
363  */
364 static void
365 sis_read_eeprom(struct sis_softc *sc, caddr_t dest, int off, int cnt, int swap)
366 {
367 	int i;
368 	uint16_t word = 0, *ptr;
369 
370 	for (i = 0; i < cnt; i++) {
371 		sis_eeprom_getword(sc, off + i, &word);
372 		ptr = (uint16_t *)(dest + (i * 2));
373 		if (swap)
374 			*ptr = ntohs(word);
375 		else
376 			*ptr = word;
377 	}
378 }
379 
380 #ifdef __i386__
381 static device_t
382 sis_find_bridge(device_t dev)
383 {
384 	devclass_t pci_devclass;
385 	device_t *pci_devices;
386 	int pci_count = 0;
387 	device_t *pci_children;
388 	int pci_childcount = 0;
389 	device_t *busp, *childp;
390 	device_t child = NULL;
391 	int i, j;
392 
393 	if ((pci_devclass = devclass_find("pci")) == NULL)
394 		return(NULL);
395 
396 	devclass_get_devices(pci_devclass, &pci_devices, &pci_count);
397 
398 	for (i = 0, busp = pci_devices; i < pci_count; i++, busp++) {
399 		pci_childcount = 0;
400 		device_get_children(*busp, &pci_children, &pci_childcount);
401 		for (j = 0, childp = pci_children; j < pci_childcount;
402 		     j++, childp++) {
403 			if (pci_get_vendor(*childp) == PCI_VENDOR_SIS &&
404 			    pci_get_device(*childp) == 0x0008) {
405 				child = *childp;
406 				goto done;
407 			}
408 		}
409 	}
410 
411 done:
412 	kfree(pci_devices, M_TEMP);
413 	kfree(pci_children, M_TEMP);
414 	return(child);
415 }
416 
417 static void
418 sis_read_cmos(struct sis_softc *sc, device_t dev, caddr_t dest, int off,
419 	      int cnt)
420 {
421 	device_t bridge;
422 	uint8_t reg;
423 	int i;
424 	bus_space_tag_t	btag;
425 
426 	bridge = sis_find_bridge(dev);
427 	if (bridge == NULL)
428 		return;
429 	reg = pci_read_config(bridge, 0x48, 1);
430 	pci_write_config(bridge, 0x48, reg|0x40, 1);
431 
432 	/* XXX */
433 	btag = I386_BUS_SPACE_IO;
434 
435 	for (i = 0; i < cnt; i++) {
436 		bus_space_write_1(btag, 0x0, 0x70, i + off);
437 		*(dest + i) = bus_space_read_1(btag, 0x0, 0x71);
438 	}
439 
440 	pci_write_config(bridge, 0x48, reg & ~0x40, 1);
441 }
442 
443 static void
444 sis_read_mac(struct sis_softc *sc, device_t dev, caddr_t dest)
445 {
446 	uint32_t filtsave, csrsave;
447 
448 	filtsave = CSR_READ_4(sc, SIS_RXFILT_CTL);
449 	csrsave = CSR_READ_4(sc, SIS_CSR);
450 
451 	CSR_WRITE_4(sc, SIS_CSR, SIS_CSR_RELOAD | filtsave);
452 	CSR_WRITE_4(sc, SIS_CSR, 0);
453 
454 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave & ~SIS_RXFILTCTL_ENABLE);
455 
456 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0);
457 	((uint16_t *)dest)[0] = CSR_READ_2(sc, SIS_RXFILT_DATA);
458 	CSR_WRITE_4(sc, SIS_RXFILT_CTL,SIS_FILTADDR_PAR1);
459 	((uint16_t *)dest)[1] = CSR_READ_2(sc, SIS_RXFILT_DATA);
460 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2);
461 	((uint16_t *)dest)[2] = CSR_READ_2(sc, SIS_RXFILT_DATA);
462 
463 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave);
464 	CSR_WRITE_4(sc, SIS_CSR, csrsave);
465 }
466 #endif
467 
468 /*
469  * Sync the PHYs by setting data bit and strobing the clock 32 times.
470  */
471 static void
472 sis_mii_sync(struct sis_softc *sc)
473 {
474 	int i;
475 
476 	SIO_SET(SIS_MII_DIR|SIS_MII_DATA);
477 
478 	for (i = 0; i < 32; i++) {
479 		SIO_SET(SIS_MII_CLK);
480 		DELAY(1);
481 		SIO_CLR(SIS_MII_CLK);
482 		DELAY(1);
483 	}
484 }
485 
486 /*
487  * Clock a series of bits through the MII.
488  */
489 static void
490 sis_mii_send(struct sis_softc *sc, uint32_t bits, int cnt)
491 {
492 	int i;
493 
494 	SIO_CLR(SIS_MII_CLK);
495 
496 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
497 		if (bits & i)
498 			SIO_SET(SIS_MII_DATA);
499 		else
500 			SIO_CLR(SIS_MII_DATA);
501 		DELAY(1);
502 		SIO_CLR(SIS_MII_CLK);
503 		DELAY(1);
504 		SIO_SET(SIS_MII_CLK);
505 	}
506 }
507 
508 /*
509  * Read an PHY register through the MII.
510  */
511 static int
512 sis_mii_readreg(struct sis_softc *sc, struct sis_mii_frame *frame)
513 {
514 	int i, ack;
515 
516 	/*
517 	 * Set up frame for RX.
518 	 */
519 	frame->mii_stdelim = SIS_MII_STARTDELIM;
520 	frame->mii_opcode = SIS_MII_READOP;
521 	frame->mii_turnaround = 0;
522 	frame->mii_data = 0;
523 
524 	/*
525  	 * Turn on data xmit.
526 	 */
527 	SIO_SET(SIS_MII_DIR);
528 
529 	sis_mii_sync(sc);
530 
531 	/*
532 	 * Send command/address info.
533 	 */
534 	sis_mii_send(sc, frame->mii_stdelim, 2);
535 	sis_mii_send(sc, frame->mii_opcode, 2);
536 	sis_mii_send(sc, frame->mii_phyaddr, 5);
537 	sis_mii_send(sc, frame->mii_regaddr, 5);
538 
539 	/* Idle bit */
540 	SIO_CLR((SIS_MII_CLK|SIS_MII_DATA));
541 	DELAY(1);
542 	SIO_SET(SIS_MII_CLK);
543 	DELAY(1);
544 
545 	/* Turn off xmit. */
546 	SIO_CLR(SIS_MII_DIR);
547 
548 	/* Check for ack */
549 	SIO_CLR(SIS_MII_CLK);
550 	DELAY(1);
551 	ack = CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA;
552 	SIO_SET(SIS_MII_CLK);
553 	DELAY(1);
554 
555 	/*
556 	 * Now try reading data bits. If the ack failed, we still
557 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
558 	 */
559 	if (ack) {
560 		for(i = 0; i < 16; i++) {
561 			SIO_CLR(SIS_MII_CLK);
562 			DELAY(1);
563 			SIO_SET(SIS_MII_CLK);
564 			DELAY(1);
565 		}
566 		goto fail;
567 	}
568 
569 	for (i = 0x8000; i; i >>= 1) {
570 		SIO_CLR(SIS_MII_CLK);
571 		DELAY(1);
572 		if (!ack) {
573 			if (CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA)
574 				frame->mii_data |= i;
575 			DELAY(1);
576 		}
577 		SIO_SET(SIS_MII_CLK);
578 		DELAY(1);
579 	}
580 
581 fail:
582 
583 	SIO_CLR(SIS_MII_CLK);
584 	DELAY(1);
585 	SIO_SET(SIS_MII_CLK);
586 	DELAY(1);
587 
588 	if (ack)
589 		return(1);
590 	return(0);
591 }
592 
593 /*
594  * Write to a PHY register through the MII.
595  */
596 static int
597 sis_mii_writereg(struct sis_softc *sc, struct sis_mii_frame *frame)
598 {
599 	/*
600 	 * Set up frame for TX.
601 	 */
602 
603 	frame->mii_stdelim = SIS_MII_STARTDELIM;
604 	frame->mii_opcode = SIS_MII_WRITEOP;
605 	frame->mii_turnaround = SIS_MII_TURNAROUND;
606 
607 	/*
608 	 * Turn on data output.
609 	 */
610 	SIO_SET(SIS_MII_DIR);
611 
612 	sis_mii_sync(sc);
613 
614 	sis_mii_send(sc, frame->mii_stdelim, 2);
615 	sis_mii_send(sc, frame->mii_opcode, 2);
616 	sis_mii_send(sc, frame->mii_phyaddr, 5);
617 	sis_mii_send(sc, frame->mii_regaddr, 5);
618 	sis_mii_send(sc, frame->mii_turnaround, 2);
619 	sis_mii_send(sc, frame->mii_data, 16);
620 
621 	/* Idle bit. */
622 	SIO_SET(SIS_MII_CLK);
623 	DELAY(1);
624 	SIO_CLR(SIS_MII_CLK);
625 	DELAY(1);
626 
627 	/*
628 	 * Turn off xmit.
629 	 */
630 	SIO_CLR(SIS_MII_DIR);
631 
632 	return(0);
633 }
634 
635 static int
636 sis_miibus_readreg(device_t dev, int phy, int reg)
637 {
638 	struct sis_softc *sc;
639 	struct sis_mii_frame frame;
640 
641 	sc = device_get_softc(dev);
642 
643 	if (sc->sis_type == SIS_TYPE_83815) {
644 		if (phy != 0)
645 			return(0);
646 		/*
647 		 * The NatSemi chip can take a while after
648 		 * a reset to come ready, during which the BMSR
649 		 * returns a value of 0. This is *never* supposed
650 		 * to happen: some of the BMSR bits are meant to
651 		 * be hardwired in the on position, and this can
652 		 * confuse the miibus code a bit during the probe
653 		 * and attach phase. So we make an effort to check
654 		 * for this condition and wait for it to clear.
655 		 */
656 		if (!CSR_READ_4(sc, NS_BMSR))
657 			DELAY(1000);
658 		return CSR_READ_4(sc, NS_BMCR + (reg * 4));
659 	}
660 	/*
661 	 * Chipsets < SIS_635 seem not to be able to read/write
662 	 * through mdio. Use the enhanced PHY access register
663 	 * again for them.
664 	 */
665 	if (sc->sis_type == SIS_TYPE_900 &&
666 	    sc->sis_rev < SIS_REV_635) {
667 		int i, val = 0;
668 
669 		if (phy != 0)
670 			return(0);
671 
672 		CSR_WRITE_4(sc, SIS_PHYCTL,
673 		    (phy << 11) | (reg << 6) | SIS_PHYOP_READ);
674 		SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS);
675 
676 		for (i = 0; i < SIS_TIMEOUT; i++) {
677 			if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS))
678 				break;
679 		}
680 
681 		if (i == SIS_TIMEOUT) {
682 			device_printf(dev, "PHY failed to come ready\n");
683 			return(0);
684 		}
685 
686 		val = (CSR_READ_4(sc, SIS_PHYCTL) >> 16) & 0xFFFF;
687 
688 		if (val == 0xFFFF)
689 			return(0);
690 
691 		return(val);
692 	} else {
693 		bzero((char *)&frame, sizeof(frame));
694 
695 		frame.mii_phyaddr = phy;
696 		frame.mii_regaddr = reg;
697 		sis_mii_readreg(sc, &frame);
698 
699 		return(frame.mii_data);
700 	}
701 }
702 
703 static int
704 sis_miibus_writereg(device_t dev, int phy, int reg, int data)
705 {
706 	struct sis_softc *sc;
707 	struct sis_mii_frame frame;
708 
709 	sc = device_get_softc(dev);
710 
711 	if (sc->sis_type == SIS_TYPE_83815) {
712 		if (phy != 0)
713 			return(0);
714 		CSR_WRITE_4(sc, NS_BMCR + (reg * 4), data);
715 		return(0);
716 	}
717 
718 	if (sc->sis_type == SIS_TYPE_900 &&
719 	    sc->sis_rev < SIS_REV_635) {
720 		int i;
721 
722 		if (phy != 0)
723 			return(0);
724 
725 		CSR_WRITE_4(sc, SIS_PHYCTL, (data << 16) | (phy << 11) |
726 		    (reg << 6) | SIS_PHYOP_WRITE);
727 		SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS);
728 
729 		for (i = 0; i < SIS_TIMEOUT; i++) {
730 			if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS))
731 				break;
732 		}
733 
734 		if (i == SIS_TIMEOUT)
735 			device_printf(dev, "PHY failed to come ready\n");
736 	} else {
737 		bzero((char *)&frame, sizeof(frame));
738 
739 		frame.mii_phyaddr = phy;
740 		frame.mii_regaddr = reg;
741 		frame.mii_data = data;
742 		sis_mii_writereg(sc, &frame);
743 	}
744 	return(0);
745 }
746 
747 static void
748 sis_miibus_statchg(device_t dev)
749 {
750 	struct sis_softc *sc;
751 
752 	sc = device_get_softc(dev);
753 	sis_init(sc);
754 }
755 
756 static uint32_t
757 sis_mchash(struct sis_softc *sc, const uint8_t *addr)
758 {
759 	uint32_t crc, carry;
760 	int i, j;
761 	uint8_t c;
762 
763 	/* Compute CRC for the address value. */
764 	crc = 0xFFFFFFFF; /* initial value */
765 
766 	for (i = 0; i < 6; i++) {
767 		c = *(addr + i);
768 		for (j = 0; j < 8; j++) {
769 			carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
770 			crc <<= 1;
771 			c >>= 1;
772 			if (carry)
773 				crc = (crc ^ 0x04c11db6) | carry;
774 		}
775 	}
776 
777 	/*
778 	 * return the filter bit position
779 	 *
780 	 * The NatSemi chip has a 512-bit filter, which is
781 	 * different than the SiS, so we special-case it.
782 	 */
783 	if (sc->sis_type == SIS_TYPE_83815)
784 		return (crc >> 23);
785 	else if (sc->sis_rev >= SIS_REV_635 || sc->sis_rev == SIS_REV_900B)
786 		return (crc >> 24);
787 	else
788 		return (crc >> 25);
789 }
790 
791 static void
792 sis_setmulti_ns(struct sis_softc *sc)
793 {
794 	struct ifnet *ifp;
795 	struct ifmultiaddr *ifma;
796 	uint32_t h = 0, i, filtsave;
797 	int bit, index;
798 
799 	ifp = &sc->arpcom.ac_if;
800 
801 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
802 		SIS_CLRBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_MCHASH);
803 		SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLMULTI);
804 		return;
805 	}
806 
807 	/*
808 	 * We have to explicitly enable the multicast hash table
809 	 * on the NatSemi chip if we want to use it, which we do.
810 	 */
811 	SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_MCHASH);
812 	SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLMULTI);
813 
814 	filtsave = CSR_READ_4(sc, SIS_RXFILT_CTL);
815 
816 	/* first, zot all the existing hash bits */
817 	for (i = 0; i < 32; i++) {
818 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + (i*2));
819 		CSR_WRITE_4(sc, SIS_RXFILT_DATA, 0);
820 	}
821 
822 	LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
823 		if (ifma->ifma_addr->sa_family != AF_LINK)
824 			continue;
825 		h = sis_mchash(sc,
826 			       LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
827 		index = h >> 3;
828 		bit = h & 0x1F;
829 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + index);
830 		if (bit > 0xF)
831 			bit -= 0x10;
832 		SIS_SETBIT(sc, SIS_RXFILT_DATA, (1 << bit));
833 	}
834 
835 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave);
836 }
837 
838 static void
839 sis_setmulti_sis(struct sis_softc *sc)
840 {
841 	struct ifnet *ifp;
842 	struct ifmultiaddr *ifma;
843 	uint32_t h, i, n, ctl;
844 	uint16_t hashes[16];
845 
846 	ifp = &sc->arpcom.ac_if;
847 
848 	/* hash table size */
849 	if (sc->sis_rev >= SIS_REV_635 || sc->sis_rev == SIS_REV_900B)
850 		n = 16;
851 	else
852 		n = 8;
853 
854 	ctl = CSR_READ_4(sc, SIS_RXFILT_CTL) & SIS_RXFILTCTL_ENABLE;
855 
856 	if (ifp->if_flags & IFF_BROADCAST)
857 		ctl |= SIS_RXFILTCTL_BROAD;
858 
859 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
860 		ctl |= SIS_RXFILTCTL_ALLMULTI;
861 		if (ifp->if_flags & IFF_PROMISC)
862 			ctl |= SIS_RXFILTCTL_BROAD|SIS_RXFILTCTL_ALLPHYS;
863 		for (i = 0; i < n; i++)
864 			hashes[i] = ~0;
865 	} else {
866 		for (i = 0; i < n; i++)
867 			hashes[i] = 0;
868 		i = 0;
869 		LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
870 			if (ifma->ifma_addr->sa_family != AF_LINK)
871 				continue;
872 			h = sis_mchash(sc,
873 			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
874 			hashes[h >> 4] |= 1 << (h & 0xf);
875 			i++;
876 		}
877 		if (i > n) {
878 			ctl |= SIS_RXFILTCTL_ALLMULTI;
879 			for (i = 0; i < n; i++)
880 				hashes[i] = ~0;
881 		}
882 	}
883 
884 	for (i = 0; i < n; i++) {
885 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, (4 + i) << 16);
886 		CSR_WRITE_4(sc, SIS_RXFILT_DATA, hashes[i]);
887 	}
888 
889 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, ctl);
890 }
891 
892 static void
893 sis_reset(struct sis_softc *sc)
894 {
895 	struct ifnet *ifp = &sc->arpcom.ac_if;
896 	int i;
897 
898 	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RESET);
899 
900 	for (i = 0; i < SIS_TIMEOUT; i++) {
901 		if (!(CSR_READ_4(sc, SIS_CSR) & SIS_CSR_RESET))
902 			break;
903 	}
904 
905 	if (i == SIS_TIMEOUT)
906 		if_printf(ifp, "reset never completed\n");
907 
908 	/* Wait a little while for the chip to get its brains in order. */
909 	DELAY(1000);
910 
911 	/*
912 	 * If this is a NetSemi chip, make sure to clear
913 	 * PME mode.
914 	 */
915 	if (sc->sis_type == SIS_TYPE_83815) {
916 		CSR_WRITE_4(sc, NS_CLKRUN, NS_CLKRUN_PMESTS);
917 		CSR_WRITE_4(sc, NS_CLKRUN, 0);
918 	}
919 }
920 
921 /*
922  * Probe for an SiS chip. Check the PCI vendor and device
923  * IDs against our list and return a device name if we find a match.
924  */
925 static int
926 sis_probe(device_t dev)
927 {
928 	struct sis_type *t;
929 
930 	t = sis_devs;
931 
932 	while(t->sis_name != NULL) {
933 		if ((pci_get_vendor(dev) == t->sis_vid) &&
934 		    (pci_get_device(dev) == t->sis_did)) {
935 			device_set_desc(dev, t->sis_name);
936 			return(0);
937 		}
938 		t++;
939 	}
940 
941 	return(ENXIO);
942 }
943 
944 /*
945  * Attach the interface. Allocate softc structures, do ifmedia
946  * setup and ethernet/BPF attach.
947  */
948 static int
949 sis_attach(device_t dev)
950 {
951 	uint8_t eaddr[ETHER_ADDR_LEN];
952 	uint32_t command;
953 	struct sis_softc *sc;
954 	struct ifnet *ifp;
955 	int error, rid, waittime;
956 
957 	error = waittime = 0;
958 	sc = device_get_softc(dev);
959 
960 	if (pci_get_device(dev) == PCI_PRODUCT_SIS_900)
961 		sc->sis_type = SIS_TYPE_900;
962 	if (pci_get_device(dev) == PCI_PRODUCT_SIS_7016)
963 		sc->sis_type = SIS_TYPE_7016;
964 	if (pci_get_vendor(dev) == PCI_VENDOR_NS)
965 		sc->sis_type = SIS_TYPE_83815;
966 
967 	sc->sis_rev = pci_read_config(dev, PCIR_REVID, 1);
968 
969 	/*
970 	 * Handle power management nonsense.
971 	 */
972 
973 	command = pci_read_config(dev, SIS_PCI_CAPID, 4) & 0x000000FF;
974 	if (command == 0x01) {
975 
976 		command = pci_read_config(dev, SIS_PCI_PWRMGMTCTRL, 4);
977 		if (command & SIS_PSTATE_MASK) {
978 			uint32_t		iobase, membase, irq;
979 
980 			/* Save important PCI config data. */
981 			iobase = pci_read_config(dev, SIS_PCI_LOIO, 4);
982 			membase = pci_read_config(dev, SIS_PCI_LOMEM, 4);
983 			irq = pci_read_config(dev, SIS_PCI_INTLINE, 4);
984 
985 			/* Reset the power state. */
986 			device_printf(dev, "chip is in D%d power mode "
987 			    "-- setting to D0\n", command & SIS_PSTATE_MASK);
988 			command &= 0xFFFFFFFC;
989 			pci_write_config(dev, SIS_PCI_PWRMGMTCTRL, command, 4);
990 
991 			/* Restore PCI config data. */
992 			pci_write_config(dev, SIS_PCI_LOIO, iobase, 4);
993 			pci_write_config(dev, SIS_PCI_LOMEM, membase, 4);
994 			pci_write_config(dev, SIS_PCI_INTLINE, irq, 4);
995 		}
996 	}
997 
998 	/*
999 	 * Map control/status registers.
1000 	 */
1001 	command = pci_read_config(dev, PCIR_COMMAND, 4);
1002 	command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
1003 	pci_write_config(dev, PCIR_COMMAND, command, 4);
1004 	command = pci_read_config(dev, PCIR_COMMAND, 4);
1005 
1006 #ifdef SIS_USEIOSPACE
1007 	if (!(command & PCIM_CMD_PORTEN)) {
1008 		device_printf(dev, "failed to enable I/O ports!\n");
1009 		error = ENXIO;
1010 		goto fail;
1011 	}
1012 #else
1013 	if (!(command & PCIM_CMD_MEMEN)) {
1014 		device_printf(dev, "failed to enable memory mapping!\n");
1015 		error = ENXIO;
1016 		goto fail;
1017 	}
1018 #endif
1019 
1020 	rid = SIS_RID;
1021 	sc->sis_res = bus_alloc_resource_any(dev, SIS_RES, &rid, RF_ACTIVE);
1022 
1023 	if (sc->sis_res == NULL) {
1024 		device_printf(dev, "couldn't map ports/memory\n");
1025 		error = ENXIO;
1026 		goto fail;
1027 	}
1028 
1029 	sc->sis_btag = rman_get_bustag(sc->sis_res);
1030 	sc->sis_bhandle = rman_get_bushandle(sc->sis_res);
1031 
1032 	/* Allocate interrupt */
1033 	rid = 0;
1034 	sc->sis_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1035 	    RF_SHAREABLE | RF_ACTIVE);
1036 
1037 	if (sc->sis_irq == NULL) {
1038 		device_printf(dev, "couldn't map interrupt\n");
1039 		error = ENXIO;
1040 		goto fail;
1041 	}
1042 
1043 	/* Reset the adapter. */
1044 	sis_reset(sc);
1045 
1046 	if (sc->sis_type == SIS_TYPE_900 &&
1047             (sc->sis_rev == SIS_REV_635 ||
1048              sc->sis_rev == SIS_REV_900B)) {
1049 		SIO_SET(SIS_CFG_RND_CNT);
1050 		SIO_SET(SIS_CFG_PERR_DETECT);
1051 	}
1052 
1053 	/*
1054 	 * Get station address from the EEPROM.
1055 	 */
1056 	switch (pci_get_vendor(dev)) {
1057 	case PCI_VENDOR_NS:
1058 		/*
1059 		 * Reading the MAC address out of the EEPROM on
1060 		 * the NatSemi chip takes a bit more work than
1061 		 * you'd expect. The address spans 4 16-bit words,
1062 		 * with the first word containing only a single bit.
1063 		 * You have to shift everything over one bit to
1064 		 * get it aligned properly. Also, the bits are
1065 		 * stored backwards (the LSB is really the MSB,
1066 		 * and so on) so you have to reverse them in order
1067 		 * to get the MAC address into the form we want.
1068 		 * Why? Who the hell knows.
1069 		 */
1070 		{
1071 			uint16_t		tmp[4];
1072 
1073 			sis_read_eeprom(sc, (caddr_t)&tmp,
1074 			    NS_EE_NODEADDR, 4, 0);
1075 
1076 			/* Shift everything over one bit. */
1077 			tmp[3] = tmp[3] >> 1;
1078 			tmp[3] |= tmp[2] << 15;
1079 			tmp[2] = tmp[2] >> 1;
1080 			tmp[2] |= tmp[1] << 15;
1081 			tmp[1] = tmp[1] >> 1;
1082 			tmp[1] |= tmp[0] << 15;
1083 
1084 			/* Now reverse all the bits. */
1085 			tmp[3] = sis_reverse(tmp[3]);
1086 			tmp[2] = sis_reverse(tmp[2]);
1087 			tmp[1] = sis_reverse(tmp[1]);
1088 
1089 			bcopy((char *)&tmp[1], eaddr, ETHER_ADDR_LEN);
1090 		}
1091 		break;
1092 	case PCI_VENDOR_SIS:
1093 	default:
1094 #ifdef __i386__
1095 		/*
1096 		 * If this is a SiS 630E chipset with an embedded
1097 		 * SiS 900 controller, we have to read the MAC address
1098 		 * from the APC CMOS RAM. Our method for doing this
1099 		 * is very ugly since we have to reach out and grab
1100 		 * ahold of hardware for which we cannot properly
1101 		 * allocate resources. This code is only compiled on
1102 		 * the i386 architecture since the SiS 630E chipset
1103 		 * is for x86 motherboards only. Note that there are
1104 		 * a lot of magic numbers in this hack. These are
1105 		 * taken from SiS's Linux driver. I'd like to replace
1106 		 * them with proper symbolic definitions, but that
1107 		 * requires some datasheets that I don't have access
1108 		 * to at the moment.
1109 		 */
1110 		if (sc->sis_rev == SIS_REV_630S ||
1111 		    sc->sis_rev == SIS_REV_630E ||
1112 		    sc->sis_rev == SIS_REV_630EA1)
1113 			sis_read_cmos(sc, dev, (caddr_t)&eaddr, 0x9, 6);
1114 
1115 		else if (sc->sis_rev == SIS_REV_635 ||
1116 			 sc->sis_rev == SIS_REV_630ET)
1117 			sis_read_mac(sc, dev, (caddr_t)&eaddr);
1118 		else if (sc->sis_rev == SIS_REV_96x) {
1119 			/*
1120 			 * Allow to read EEPROM from LAN. It is shared
1121 			 * between a 1394 controller and the NIC and each
1122 			 * time we access it, we need to set SIS_EECMD_REQ.
1123 			 */
1124 			SIO_SET(SIS_EECMD_REQ);
1125 			for (waittime = 0; waittime < SIS_TIMEOUT;
1126 			    waittime++) {
1127 				/* Force EEPROM to idle state. */
1128 				sis_eeprom_idle(sc);
1129 				if (CSR_READ_4(sc, SIS_EECTL) & SIS_EECMD_GNT) {
1130 					sis_read_eeprom(sc, (caddr_t)&eaddr,
1131 					    SIS_EE_NODEADDR, 3, 0);
1132 					break;
1133 				}
1134 				DELAY(1);
1135 			}
1136 			/*
1137 			 * Set SIS_EECTL_CLK to high, so a other master
1138 			 * can operate on the i2c bus.
1139 			 */
1140 			SIO_SET(SIS_EECTL_CLK);
1141 			/* Refuse EEPROM access by LAN */
1142 			SIO_SET(SIS_EECMD_DONE);
1143 		} else
1144 #endif
1145 			sis_read_eeprom(sc, (caddr_t)&eaddr,
1146 			    SIS_EE_NODEADDR, 3, 0);
1147 		break;
1148 	}
1149 
1150 	callout_init(&sc->sis_timer);
1151 
1152 	/*
1153 	 * Allocate the parent bus DMA tag appropriate for PCI.
1154 	 */
1155 #define SIS_NSEG_NEW 32
1156 	error = bus_dma_tag_create(NULL,	/* parent */
1157 			1, 0,			/* alignment, boundary */
1158 			BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1159 			BUS_SPACE_MAXADDR,	/* highaddr */
1160 			NULL, NULL,		/* filter, filterarg */
1161 			MAXBSIZE, SIS_NSEG_NEW,	/* maxsize, nsegments */
1162 			BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1163 			BUS_DMA_ALLOCNOW,	/* flags */
1164 			&sc->sis_parent_tag);
1165 	if (error)
1166 		goto fail;
1167 
1168 	/*
1169 	 * Now allocate a tag for the DMA descriptor lists and a chunk
1170 	 * of DMA-able memory based on the tag. Also obtain the physical
1171 	 * addresses of the RX and TX ring, which we'll need later.
1172 	 * All of our lists are allocated as a contiguous block of memory.
1173 	 */
1174 	error = bus_dma_tag_create(sc->sis_parent_tag,	/* parent */
1175 			1, 0,			/* alignment, boundary */
1176 			BUS_SPACE_MAXADDR,	/* lowaddr */
1177 			BUS_SPACE_MAXADDR,	/* highaddr */
1178 			NULL, NULL,		/* filter, filterarg */
1179 			SIS_RX_LIST_SZ, 1,	/* maxsize, nsegments */
1180 			BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1181 			0,			/* flags */
1182 			&sc->sis_ldata.sis_rx_tag);
1183 	if (error)
1184 		goto fail;
1185 
1186 	error = bus_dmamem_alloc(sc->sis_ldata.sis_rx_tag,
1187 				 (void **)&sc->sis_ldata.sis_rx_list,
1188 				 BUS_DMA_WAITOK | BUS_DMA_ZERO,
1189 				 &sc->sis_ldata.sis_rx_dmamap);
1190 
1191 	if (error) {
1192 		device_printf(dev, "no memory for rx list buffers!\n");
1193 		bus_dma_tag_destroy(sc->sis_ldata.sis_rx_tag);
1194 		sc->sis_ldata.sis_rx_tag = NULL;
1195 		goto fail;
1196 	}
1197 
1198 	error = bus_dmamap_load(sc->sis_ldata.sis_rx_tag,
1199 				sc->sis_ldata.sis_rx_dmamap,
1200 				sc->sis_ldata.sis_rx_list,
1201 				sizeof(struct sis_desc), sis_dma_map_ring,
1202 				&sc->sis_cdata.sis_rx_paddr, 0);
1203 
1204 	if (error) {
1205 		device_printf(dev, "cannot get address of the rx ring!\n");
1206 		bus_dmamem_free(sc->sis_ldata.sis_rx_tag,
1207 				sc->sis_ldata.sis_rx_list,
1208 				sc->sis_ldata.sis_rx_dmamap);
1209 		bus_dma_tag_destroy(sc->sis_ldata.sis_rx_tag);
1210 		sc->sis_ldata.sis_rx_tag = NULL;
1211 		goto fail;
1212 	}
1213 
1214 	error = bus_dma_tag_create(sc->sis_parent_tag,	/* parent */
1215 			1, 0,			/* alignment, boundary */
1216 			BUS_SPACE_MAXADDR,	/* lowaddr */
1217 			BUS_SPACE_MAXADDR,	/* highaddr */
1218 			NULL, NULL,		/* filter, filterarg */
1219 			SIS_TX_LIST_SZ, 1,	/* maxsize, nsegments */
1220 			BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1221 			0,			/* flags */
1222 			&sc->sis_ldata.sis_tx_tag);
1223 	if (error)
1224 		goto fail;
1225 
1226 	error = bus_dmamem_alloc(sc->sis_ldata.sis_tx_tag,
1227 				 (void **)&sc->sis_ldata.sis_tx_list,
1228 				 BUS_DMA_WAITOK | BUS_DMA_ZERO,
1229 				 &sc->sis_ldata.sis_tx_dmamap);
1230 
1231 	if (error) {
1232 		device_printf(dev, "no memory for tx list buffers!\n");
1233 		bus_dma_tag_destroy(sc->sis_ldata.sis_tx_tag);
1234 		sc->sis_ldata.sis_tx_tag = NULL;
1235 		goto fail;
1236 	}
1237 
1238 	error = bus_dmamap_load(sc->sis_ldata.sis_tx_tag,
1239 				sc->sis_ldata.sis_tx_dmamap,
1240 				sc->sis_ldata.sis_tx_list,
1241 				sizeof(struct sis_desc), sis_dma_map_ring,
1242 				&sc->sis_cdata.sis_tx_paddr, 0);
1243 
1244 	if (error) {
1245 		device_printf(dev, "cannot get address of the tx ring!\n");
1246 		bus_dmamem_free(sc->sis_ldata.sis_tx_tag,
1247 				sc->sis_ldata.sis_tx_list,
1248 				sc->sis_ldata.sis_tx_dmamap);
1249 		bus_dma_tag_destroy(sc->sis_ldata.sis_tx_tag);
1250 		sc->sis_ldata.sis_tx_tag = NULL;
1251 		goto fail;
1252 	}
1253 
1254 	error = bus_dma_tag_create(sc->sis_parent_tag,	/* parent */
1255 			1, 0,			/* alignment, boundary */
1256 			BUS_SPACE_MAXADDR,	/* lowaddr */
1257 			BUS_SPACE_MAXADDR,	/* highaddr */
1258 			NULL, NULL,		/* filter, filterarg */
1259 			MCLBYTES, 1,		/* maxsize, nsegments */
1260 			BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1261 			0,			/* flags */
1262 			&sc->sis_tag);
1263 	if (error)
1264 		goto fail;
1265 
1266 	ifp = &sc->arpcom.ac_if;
1267 	ifp->if_softc = sc;
1268 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1269 	ifp->if_mtu = ETHERMTU;
1270 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1271 	ifp->if_ioctl = sis_ioctl;
1272 	ifp->if_start = sis_start;
1273 	ifp->if_watchdog = sis_watchdog;
1274 	ifp->if_init = sis_init;
1275 	ifp->if_baudrate = 10000000;
1276 	ifq_set_maxlen(&ifp->if_snd, SIS_TX_LIST_CNT - 1);
1277 	ifq_set_ready(&ifp->if_snd);
1278 #ifdef DEVICE_POLLING
1279 	ifp->if_poll = sis_poll;
1280 #endif
1281 	ifp->if_capenable = ifp->if_capabilities;
1282 
1283 	/*
1284 	 * Do MII setup.
1285 	 */
1286 	if (mii_phy_probe(dev, &sc->sis_miibus,
1287 	    sis_ifmedia_upd, sis_ifmedia_sts)) {
1288 		device_printf(dev, "MII without any PHY!\n");
1289 		error = ENXIO;
1290 		goto fail;
1291 	}
1292 
1293 	/*
1294 	 * Call MI attach routine.
1295 	 */
1296 	ether_ifattach(ifp, eaddr, NULL);
1297 
1298 	/*
1299 	 * Tell the upper layer(s) we support long frames.
1300 	 */
1301 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1302 
1303 	error = bus_setup_intr(dev, sc->sis_irq, INTR_MPSAFE,
1304 			       sis_intr, sc,
1305 			       &sc->sis_intrhand,
1306 			       ifp->if_serializer);
1307 
1308 	if (error) {
1309 		device_printf(dev, "couldn't set up irq\n");
1310 		ether_ifdetach(ifp);
1311 		goto fail;
1312 	}
1313 
1314 	ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->sis_irq));
1315 	KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
1316 
1317 fail:
1318 	if (error)
1319 		sis_detach(dev);
1320 
1321 	return(error);
1322 }
1323 
1324 /*
1325  * Shutdown hardware and free up resources. It is called in both the error case
1326  * and the normal detach case so it needs to be careful about only freeing
1327  * resources that have actually been allocated.
1328  */
1329 static int
1330 sis_detach(device_t dev)
1331 {
1332 	struct sis_softc *sc = device_get_softc(dev);
1333 	struct ifnet *ifp = &sc->arpcom.ac_if;
1334 
1335 
1336 	if (device_is_attached(dev)) {
1337 		lwkt_serialize_enter(ifp->if_serializer);
1338 		sis_reset(sc);
1339 		sis_stop(sc);
1340 		bus_teardown_intr(dev, sc->sis_irq, sc->sis_intrhand);
1341 		lwkt_serialize_exit(ifp->if_serializer);
1342 
1343 		ether_ifdetach(ifp);
1344 	}
1345 	if (sc->sis_miibus)
1346 		device_delete_child(dev, sc->sis_miibus);
1347 	bus_generic_detach(dev);
1348 
1349 	if (sc->sis_irq)
1350 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sis_irq);
1351 	if (sc->sis_res)
1352 		bus_release_resource(dev, SIS_RES, SIS_RID, sc->sis_res);
1353 
1354 	if (sc->sis_ldata.sis_rx_tag) {
1355 		bus_dmamap_unload(sc->sis_ldata.sis_rx_tag,
1356 				  sc->sis_ldata.sis_rx_dmamap);
1357 		bus_dmamem_free(sc->sis_ldata.sis_rx_tag,
1358 				sc->sis_ldata.sis_rx_list,
1359 				sc->sis_ldata.sis_rx_dmamap);
1360 		bus_dma_tag_destroy(sc->sis_ldata.sis_rx_tag);
1361 	}
1362 
1363 	if (sc->sis_ldata.sis_tx_tag) {
1364 		bus_dmamap_unload(sc->sis_ldata.sis_tx_tag,
1365 				  sc->sis_ldata.sis_tx_dmamap);
1366 		bus_dmamem_free(sc->sis_ldata.sis_tx_tag,
1367 				sc->sis_ldata.sis_tx_list,
1368 				sc->sis_ldata.sis_tx_dmamap);
1369 		bus_dma_tag_destroy(sc->sis_ldata.sis_tx_tag);
1370 	}
1371 	if (sc->sis_tag)
1372 		bus_dma_tag_destroy(sc->sis_tag);
1373 	if (sc->sis_parent_tag)
1374 		bus_dma_tag_destroy(sc->sis_parent_tag);
1375 
1376 	return(0);
1377 }
1378 
1379 /*
1380  * Initialize the transmit descriptors.
1381  */
1382 static int
1383 sis_list_tx_init(struct sis_softc *sc)
1384 {
1385 	struct sis_list_data *ld;
1386 	struct sis_ring_data *cd;
1387 	int i, nexti;
1388 
1389 	cd = &sc->sis_cdata;
1390 	ld = &sc->sis_ldata;
1391 
1392 	for (i = 0; i < SIS_TX_LIST_CNT; i++) {
1393 		nexti = (i == (SIS_TX_LIST_CNT - 1)) ? 0 : i+1;
1394 		ld->sis_tx_list[i].sis_nextdesc =
1395 			    &ld->sis_tx_list[nexti];
1396 		bus_dmamap_load(sc->sis_ldata.sis_tx_tag,
1397 				sc->sis_ldata.sis_tx_dmamap,
1398 				&ld->sis_tx_list[nexti],
1399 				sizeof(struct sis_desc), sis_dma_map_desc_next,
1400 				&ld->sis_tx_list[i], 0);
1401 		ld->sis_tx_list[i].sis_mbuf = NULL;
1402 		ld->sis_tx_list[i].sis_ptr = 0;
1403 		ld->sis_tx_list[i].sis_ctl = 0;
1404 	}
1405 
1406 	cd->sis_tx_prod = cd->sis_tx_cons = cd->sis_tx_cnt = 0;
1407 
1408 	bus_dmamap_sync(sc->sis_ldata.sis_tx_tag, sc->sis_ldata.sis_tx_dmamap,
1409 			BUS_DMASYNC_PREWRITE);
1410 
1411 	return(0);
1412 }
1413 
1414 /*
1415  * Initialize the RX descriptors and allocate mbufs for them. Note that
1416  * we arrange the descriptors in a closed ring, so that the last descriptor
1417  * points back to the first.
1418  */
1419 static int
1420 sis_list_rx_init(struct sis_softc *sc)
1421 {
1422 	struct sis_list_data *ld;
1423 	struct sis_ring_data *cd;
1424 	int i, nexti;
1425 
1426 	ld = &sc->sis_ldata;
1427 	cd = &sc->sis_cdata;
1428 
1429 	for (i = 0; i < SIS_RX_LIST_CNT; i++) {
1430 		if (sis_newbuf(sc, &ld->sis_rx_list[i], NULL) == ENOBUFS)
1431 			return(ENOBUFS);
1432 		nexti = (i == (SIS_RX_LIST_CNT - 1)) ? 0 : i+1;
1433 		ld->sis_rx_list[i].sis_nextdesc =
1434 			    &ld->sis_rx_list[nexti];
1435 		bus_dmamap_load(sc->sis_ldata.sis_rx_tag,
1436 				sc->sis_ldata.sis_rx_dmamap,
1437 				&ld->sis_rx_list[nexti],
1438 				sizeof(struct sis_desc), sis_dma_map_desc_next,
1439 				&ld->sis_rx_list[i], 0);
1440 	}
1441 
1442 	bus_dmamap_sync(sc->sis_ldata.sis_rx_tag, sc->sis_ldata.sis_rx_dmamap,
1443 			BUS_DMASYNC_PREWRITE);
1444 
1445 	cd->sis_rx_prod = 0;
1446 
1447 	return(0);
1448 }
1449 
1450 /*
1451  * Initialize an RX descriptor and attach an MBUF cluster.
1452  */
1453 static int
1454 sis_newbuf(struct sis_softc *sc, struct sis_desc *c, struct mbuf *m)
1455 {
1456 	if (m == NULL) {
1457 		m = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
1458 		if (m == NULL)
1459 			return(ENOBUFS);
1460 	} else {
1461 		m->m_data = m->m_ext.ext_buf;
1462 	}
1463 
1464 	c->sis_mbuf = m;
1465 	c->sis_ctl = SIS_RXLEN;
1466 
1467 	bus_dmamap_create(sc->sis_tag, 0, &c->sis_map);
1468 	bus_dmamap_load(sc->sis_tag, c->sis_map, mtod(m, void *), MCLBYTES,
1469 			sis_dma_map_desc_ptr, c, 0);
1470 	bus_dmamap_sync(sc->sis_tag, c->sis_map, BUS_DMASYNC_PREWRITE);
1471 
1472 	return(0);
1473 }
1474 
1475 /*
1476  * A frame has been uploaded: pass the resulting mbuf chain up to
1477  * the higher level protocols.
1478  */
1479 static void
1480 sis_rxeof(struct sis_softc *sc)
1481 {
1482 	struct mbuf *m;
1483 	struct ifnet *ifp;
1484 	struct sis_desc	*cur_rx;
1485 	int i, total_len = 0;
1486 	uint32_t rxstat;
1487 
1488 	ifp = &sc->arpcom.ac_if;
1489 	i = sc->sis_cdata.sis_rx_prod;
1490 
1491 	while(SIS_OWNDESC(&sc->sis_ldata.sis_rx_list[i])) {
1492 
1493 #ifdef DEVICE_POLLING
1494 		if (ifp->if_flags & IFF_POLLING) {
1495 			if (sc->rxcycles <= 0)
1496 				break;
1497 			sc->rxcycles--;
1498 		}
1499 #endif /* DEVICE_POLLING */
1500 		cur_rx = &sc->sis_ldata.sis_rx_list[i];
1501 		rxstat = cur_rx->sis_rxstat;
1502 		bus_dmamap_sync(sc->sis_tag, cur_rx->sis_map,
1503 				BUS_DMASYNC_POSTWRITE);
1504 		bus_dmamap_unload(sc->sis_tag, cur_rx->sis_map);
1505 		bus_dmamap_destroy(sc->sis_tag, cur_rx->sis_map);
1506 		m = cur_rx->sis_mbuf;
1507 		cur_rx->sis_mbuf = NULL;
1508 		total_len = SIS_RXBYTES(cur_rx);
1509 		SIS_INC(i, SIS_RX_LIST_CNT);
1510 
1511 		/*
1512 		 * If an error occurs, update stats, clear the
1513 		 * status word and leave the mbuf cluster in place:
1514 		 * it should simply get re-used next time this descriptor
1515 	 	 * comes up in the ring.
1516 		 */
1517 		if (!(rxstat & SIS_CMDSTS_PKT_OK)) {
1518 			ifp->if_ierrors++;
1519 			if (rxstat & SIS_RXSTAT_COLL)
1520 				ifp->if_collisions++;
1521 			sis_newbuf(sc, cur_rx, m);
1522 			continue;
1523 		}
1524 
1525 		/* No errors; receive the packet. */
1526 #ifdef __i386__
1527 		/*
1528 		 * On the x86 we do not have alignment problems, so try to
1529 		 * allocate a new buffer for the receive ring, and pass up
1530 		 * the one where the packet is already, saving the expensive
1531 		 * copy done in m_devget().
1532 		 * If we are on an architecture with alignment problems, or
1533 		 * if the allocation fails, then use m_devget and leave the
1534 		 * existing buffer in the receive ring.
1535 		 */
1536 		if (sis_newbuf(sc, cur_rx, NULL) == 0)
1537 			m->m_pkthdr.len = m->m_len = total_len;
1538 		else
1539 #endif
1540 		{
1541 			struct mbuf *m0;
1542 			m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
1543 				total_len + ETHER_ALIGN, 0, ifp, NULL);
1544 			sis_newbuf(sc, cur_rx, m);
1545 			if (m0 == NULL) {
1546 				ifp->if_ierrors++;
1547 				continue;
1548 			}
1549 			m_adj(m0, ETHER_ALIGN);
1550 			m = m0;
1551 		}
1552 
1553 		ifp->if_ipackets++;
1554 		ifp->if_input(ifp, m);
1555 	}
1556 
1557 	sc->sis_cdata.sis_rx_prod = i;
1558 }
1559 
1560 static void
1561 sis_rxeoc(struct sis_softc *sc)
1562 {
1563 	sis_rxeof(sc);
1564 	sis_init(sc);
1565 }
1566 
1567 /*
1568  * A frame was downloaded to the chip. It's safe for us to clean up
1569  * the list buffers.
1570  */
1571 
1572 static void
1573 sis_txeof(struct sis_softc *sc)
1574 {
1575 	struct sis_desc *cur_tx;
1576 	struct ifnet *ifp;
1577 	uint32_t idx;
1578 
1579 	ifp = &sc->arpcom.ac_if;
1580 
1581 	/*
1582 	 * Go through our tx list and free mbufs for those
1583 	 * frames that have been transmitted.
1584 	 */
1585 	for (idx = sc->sis_cdata.sis_tx_cons; sc->sis_cdata.sis_tx_cnt > 0;
1586 	     sc->sis_cdata.sis_tx_cnt--, SIS_INC(idx, SIS_TX_LIST_CNT) ) {
1587 		cur_tx = &sc->sis_ldata.sis_tx_list[idx];
1588 
1589 		if (SIS_OWNDESC(cur_tx))
1590 			break;
1591 
1592 		if (cur_tx->sis_ctl & SIS_CMDSTS_MORE)
1593 			continue;
1594 
1595 		if (!(cur_tx->sis_ctl & SIS_CMDSTS_PKT_OK)) {
1596 			ifp->if_oerrors++;
1597 			if (cur_tx->sis_txstat & SIS_TXSTAT_EXCESSCOLLS)
1598 				ifp->if_collisions++;
1599 			if (cur_tx->sis_txstat & SIS_TXSTAT_OUTOFWINCOLL)
1600 				ifp->if_collisions++;
1601 		}
1602 
1603 		ifp->if_collisions +=
1604 		    (cur_tx->sis_txstat & SIS_TXSTAT_COLLCNT) >> 16;
1605 
1606 		ifp->if_opackets++;
1607 		if (cur_tx->sis_mbuf != NULL) {
1608 			m_freem(cur_tx->sis_mbuf);
1609 			cur_tx->sis_mbuf = NULL;
1610 			bus_dmamap_unload(sc->sis_tag, cur_tx->sis_map);
1611 			bus_dmamap_destroy(sc->sis_tag, cur_tx->sis_map);
1612 		}
1613 	}
1614 
1615 	if (idx != sc->sis_cdata.sis_tx_cons) {
1616 		/* we freed up some buffers */
1617 		sc->sis_cdata.sis_tx_cons = idx;
1618 		ifp->if_flags &= ~IFF_OACTIVE;
1619 	}
1620 
1621 	ifp->if_timer = (sc->sis_cdata.sis_tx_cnt == 0) ? 0 : 5;
1622 }
1623 
1624 static void
1625 sis_tick(void *xsc)
1626 {
1627 	struct sis_softc *sc = xsc;
1628 	struct mii_data *mii;
1629 	struct ifnet *ifp = &sc->arpcom.ac_if;
1630 
1631 	lwkt_serialize_enter(ifp->if_serializer);
1632 
1633 	mii = device_get_softc(sc->sis_miibus);
1634 	mii_tick(mii);
1635 
1636 	if (!sc->sis_link) {
1637 		mii_pollstat(mii);
1638 		if (mii->mii_media_status & IFM_ACTIVE &&
1639 		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
1640 			sc->sis_link++;
1641 		if (!ifq_is_empty(&ifp->if_snd))
1642 			if_devstart(ifp);
1643 	}
1644 
1645 	callout_reset(&sc->sis_timer, hz, sis_tick, sc);
1646 	lwkt_serialize_exit(ifp->if_serializer);
1647 }
1648 
1649 #ifdef DEVICE_POLLING
1650 
1651 static void
1652 sis_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1653 {
1654 	struct  sis_softc *sc = ifp->if_softc;
1655 
1656 	switch(cmd) {
1657 	case POLL_REGISTER:
1658 		/* disable interrupts */
1659 		CSR_WRITE_4(sc, SIS_IER, 0);
1660 		break;
1661 	case POLL_DEREGISTER:
1662 		/* enable interrupts */
1663 		CSR_WRITE_4(sc, SIS_IER, 1);
1664 		break;
1665 	default:
1666 		/*
1667 		 * On the sis, reading the status register also clears it.
1668 		 * So before returning to intr mode we must make sure that all
1669 		 * possible pending sources of interrupts have been served.
1670 		 * In practice this means run to completion the *eof routines,
1671 		 * and then call the interrupt routine
1672 		 */
1673 		sc->rxcycles = count;
1674 		sis_rxeof(sc);
1675 		sis_txeof(sc);
1676 		if (!ifq_is_empty(&ifp->if_snd))
1677 			if_devstart(ifp);
1678 
1679 		if (sc->rxcycles > 0 || cmd == POLL_AND_CHECK_STATUS) {
1680 			uint32_t status;
1681 
1682 			/* Reading the ISR register clears all interrupts. */
1683 			status = CSR_READ_4(sc, SIS_ISR);
1684 
1685 			if (status & (SIS_ISR_RX_ERR|SIS_ISR_RX_OFLOW))
1686 				sis_rxeoc(sc);
1687 
1688 			if (status & (SIS_ISR_RX_IDLE))
1689 				SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE);
1690 
1691 			if (status & SIS_ISR_SYSERR) {
1692 				sis_reset(sc);
1693 				sis_init(sc);
1694 			}
1695 		}
1696 		break;
1697 	}
1698 }
1699 #endif /* DEVICE_POLLING */
1700 
1701 static void
1702 sis_intr(void *arg)
1703 {
1704 	struct sis_softc *sc;
1705 	struct ifnet *ifp;
1706 	uint32_t status;
1707 
1708 	sc = arg;
1709 	ifp = &sc->arpcom.ac_if;
1710 
1711 	/* Supress unwanted interrupts */
1712 	if (!(ifp->if_flags & IFF_UP)) {
1713 		sis_stop(sc);
1714 		return;
1715 	}
1716 
1717 	/* Disable interrupts. */
1718 	CSR_WRITE_4(sc, SIS_IER, 0);
1719 
1720 	for (;;) {
1721 		/* Reading the ISR register clears all interrupts. */
1722 		status = CSR_READ_4(sc, SIS_ISR);
1723 
1724 		if ((status & SIS_INTRS) == 0)
1725 			break;
1726 
1727 		if (status &
1728 		    (SIS_ISR_TX_DESC_OK | SIS_ISR_TX_ERR | SIS_ISR_TX_OK |
1729 		     SIS_ISR_TX_IDLE) )
1730 			sis_txeof(sc);
1731 
1732 		if (status &
1733 		    (SIS_ISR_RX_DESC_OK | SIS_ISR_RX_OK | SIS_ISR_RX_IDLE))
1734 			sis_rxeof(sc);
1735 
1736 		if (status & (SIS_ISR_RX_ERR | SIS_ISR_RX_OFLOW))
1737 			sis_rxeoc(sc);
1738 
1739 		if (status & (SIS_ISR_RX_IDLE))
1740 			SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE);
1741 
1742 		if (status & SIS_ISR_SYSERR) {
1743 			sis_reset(sc);
1744 			sis_init(sc);
1745 		}
1746 	}
1747 
1748 	/* Re-enable interrupts. */
1749 	CSR_WRITE_4(sc, SIS_IER, 1);
1750 
1751 	if (!ifq_is_empty(&ifp->if_snd))
1752 		if_devstart(ifp);
1753 }
1754 
1755 /*
1756  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1757  * pointers to the fragment pointers.
1758  */
1759 static int
1760 sis_encap(struct sis_softc *sc, struct mbuf *m_head, uint32_t *txidx)
1761 {
1762 	struct sis_desc *f = NULL;
1763 	struct mbuf *m;
1764 	int frag, cur, cnt = 0;
1765 
1766 	/*
1767  	 * Start packing the mbufs in this chain into
1768 	 * the fragment pointers. Stop when we run out
1769  	 * of fragments or hit the end of the mbuf chain.
1770 	 */
1771 	cur = frag = *txidx;
1772 
1773 	for (m = m_head; m != NULL; m = m->m_next) {
1774 		if (m->m_len != 0) {
1775 			if ((SIS_TX_LIST_CNT -
1776 			    (sc->sis_cdata.sis_tx_cnt + cnt)) < 2)
1777 				break;
1778 			f = &sc->sis_ldata.sis_tx_list[frag];
1779 			f->sis_ctl = SIS_CMDSTS_MORE | m->m_len;
1780 			bus_dmamap_create(sc->sis_tag, 0, &f->sis_map);
1781 			bus_dmamap_load(sc->sis_tag, f->sis_map,
1782 					mtod(m, void *), m->m_len,
1783 					sis_dma_map_desc_ptr, f, 0);
1784 			bus_dmamap_sync(sc->sis_tag, f->sis_map,
1785 					BUS_DMASYNC_PREREAD);
1786 			if (cnt != 0)
1787 				f->sis_ctl |= SIS_CMDSTS_OWN;
1788 			cur = frag;
1789 			SIS_INC(frag, SIS_TX_LIST_CNT);
1790 			cnt++;
1791 		}
1792 	}
1793 	/* Caller should make sure that 'm_head' is not excessive fragmented */
1794 	KASSERT(m == NULL, ("too many fragments\n"));
1795 
1796 	sc->sis_ldata.sis_tx_list[cur].sis_mbuf = m_head;
1797 	sc->sis_ldata.sis_tx_list[cur].sis_ctl &= ~SIS_CMDSTS_MORE;
1798 	sc->sis_ldata.sis_tx_list[*txidx].sis_ctl |= SIS_CMDSTS_OWN;
1799 	sc->sis_cdata.sis_tx_cnt += cnt;
1800 	*txidx = frag;
1801 
1802 	return(0);
1803 }
1804 
1805 /*
1806  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1807  * to the mbuf data regions directly in the transmit lists. We also save a
1808  * copy of the pointers since the transmit list fragment pointers are
1809  * physical addresses.
1810  */
1811 
1812 static void
1813 sis_start(struct ifnet *ifp)
1814 {
1815 	struct sis_softc *sc;
1816 	struct mbuf *m_head = NULL, *m_defragged;
1817 	uint32_t idx;
1818 	int need_trans;
1819 
1820 	sc = ifp->if_softc;
1821 
1822 	if (!sc->sis_link) {
1823 		ifq_purge(&ifp->if_snd);
1824 		return;
1825 	}
1826 
1827 	idx = sc->sis_cdata.sis_tx_prod;
1828 
1829 	if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING)
1830 		return;
1831 
1832 	need_trans = 0;
1833 	while (sc->sis_ldata.sis_tx_list[idx].sis_mbuf == NULL) {
1834 		struct mbuf *m;
1835 		int cnt;
1836 
1837 		/*
1838 		 * If there's no way we can send any packets, return now.
1839 		 */
1840 		if (SIS_TX_LIST_CNT - sc->sis_cdata.sis_tx_cnt < 2) {
1841 			ifp->if_flags |= IFF_OACTIVE;
1842 			break;
1843 		}
1844 
1845 		m_defragged = NULL;
1846 		m_head = ifq_dequeue(&ifp->if_snd, NULL);
1847 		if (m_head == NULL)
1848 			break;
1849 
1850 again:
1851 		cnt = 0;
1852 		for (m = m_head; m != NULL; m = m->m_next)
1853 			++cnt;
1854 		if ((SIS_TX_LIST_CNT -
1855 		    (sc->sis_cdata.sis_tx_cnt + cnt)) < 2) {
1856 			if (m_defragged != NULL) {
1857 				/*
1858 				 * Even after defragmentation, there
1859 				 * are still too many fragments, so
1860 				 * drop this packet.
1861 				 */
1862 				m_freem(m_head);
1863 				ifp->if_flags |= IFF_OACTIVE;
1864 				break;
1865 			}
1866 
1867 			m_defragged = m_defrag(m_head, MB_DONTWAIT);
1868 			if (m_defragged == NULL) {
1869 				m_freem(m_head);
1870 				continue;
1871 			}
1872 			m_head = m_defragged;
1873 
1874 			/* Recount # of fragments */
1875 			goto again;
1876 		}
1877 
1878 		sis_encap(sc, m_head, &idx);
1879 		need_trans = 1;
1880 
1881 		/*
1882 		 * If there's a BPF listener, bounce a copy of this frame
1883 		 * to him.
1884 		 */
1885 		BPF_MTAP(ifp, m_head);
1886 	}
1887 
1888 	if (!need_trans)
1889 		return;
1890 
1891 	/* Transmit */
1892 	sc->sis_cdata.sis_tx_prod = idx;
1893 	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_ENABLE);
1894 
1895 	/*
1896 	 * Set a timeout in case the chip goes out to lunch.
1897 	 */
1898 	ifp->if_timer = 5;
1899 }
1900 
1901 static void
1902 sis_init(void *xsc)
1903 {
1904 	struct sis_softc *sc = xsc;
1905 	struct ifnet *ifp = &sc->arpcom.ac_if;
1906 	struct mii_data *mii;
1907 
1908 	/*
1909 	 * Cancel pending I/O and free all RX/TX buffers.
1910 	 */
1911 	sis_stop(sc);
1912 
1913 	mii = device_get_softc(sc->sis_miibus);
1914 
1915 	/* Set MAC address */
1916 	if (sc->sis_type == SIS_TYPE_83815) {
1917 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR0);
1918 		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1919 		    ((uint16_t *)sc->arpcom.ac_enaddr)[0]);
1920 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR1);
1921 		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1922 		    ((uint16_t *)sc->arpcom.ac_enaddr)[1]);
1923 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR2);
1924 		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1925 		    ((uint16_t *)sc->arpcom.ac_enaddr)[2]);
1926 	} else {
1927 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0);
1928 		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1929 		    ((uint16_t *)sc->arpcom.ac_enaddr)[0]);
1930 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR1);
1931 		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1932 		    ((uint16_t *)sc->arpcom.ac_enaddr)[1]);
1933 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2);
1934 		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1935 		    ((uint16_t *)sc->arpcom.ac_enaddr)[2]);
1936 	}
1937 
1938 	/* Init circular RX list. */
1939 	if (sis_list_rx_init(sc) == ENOBUFS) {
1940 		if_printf(ifp, "initialization failed: "
1941 			  "no memory for rx buffers\n");
1942 		sis_stop(sc);
1943 		return;
1944 	}
1945 
1946 	/*
1947 	 * Init tx descriptors.
1948 	 */
1949 	sis_list_tx_init(sc);
1950 
1951 	/*
1952 	 * For the NatSemi chip, we have to explicitly enable the
1953 	 * reception of ARP frames, as well as turn on the 'perfect
1954 	 * match' filter where we store the station address, otherwise
1955 	 * we won't receive unicasts meant for this host.
1956 	 */
1957 	if (sc->sis_type == SIS_TYPE_83815) {
1958 		SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_ARP);
1959 		SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_PERFECT);
1960 	}
1961 
1962 	 /* If we want promiscuous mode, set the allframes bit. */
1963 	if (ifp->if_flags & IFF_PROMISC)
1964 		SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLPHYS);
1965 	else
1966 		SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLPHYS);
1967 
1968 	/*
1969 	 * Set the capture broadcast bit to capture broadcast frames.
1970 	 */
1971 	if (ifp->if_flags & IFF_BROADCAST)
1972 		SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_BROAD);
1973 	else
1974 		SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_BROAD);
1975 
1976 	/*
1977 	 * Load the multicast filter.
1978 	 */
1979 	if (sc->sis_type == SIS_TYPE_83815)
1980 		sis_setmulti_ns(sc);
1981 	else
1982 		sis_setmulti_sis(sc);
1983 
1984 	/* Turn the receive filter on */
1985 	SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ENABLE);
1986 
1987 	/*
1988 	 * Load the address of the RX and TX lists.
1989 	 */
1990 	CSR_WRITE_4(sc, SIS_RX_LISTPTR, sc->sis_cdata.sis_rx_paddr);
1991 	CSR_WRITE_4(sc, SIS_TX_LISTPTR, sc->sis_cdata.sis_tx_paddr);
1992 
1993 	/* SIS_CFG_EDB_MASTER_EN indicates the EDB bus is used instead of
1994 	 * the PCI bus. When this bit is set, the Max DMA Burst Size
1995 	 * for TX/RX DMA should be no larger than 16 double words.
1996 	 */
1997 	if (CSR_READ_4(sc, SIS_CFG) & SIS_CFG_EDB_MASTER_EN)
1998 		CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG64);
1999 	else
2000 		CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG256);
2001 
2002 	/* Accept Long Packets for VLAN support */
2003 	SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_JABBER);
2004 
2005 	/* Set TX configuration */
2006 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T)
2007 		CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_10);
2008 	else
2009 		CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_100);
2010 
2011 	/* Set full/half duplex mode. */
2012 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
2013 		SIS_SETBIT(sc, SIS_TX_CFG,
2014 		    (SIS_TXCFG_IGN_HBEAT|SIS_TXCFG_IGN_CARR));
2015 		SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS);
2016 	} else {
2017 		SIS_CLRBIT(sc, SIS_TX_CFG,
2018 		    (SIS_TXCFG_IGN_HBEAT|SIS_TXCFG_IGN_CARR));
2019 		SIS_CLRBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS);
2020 	}
2021 
2022 	/*
2023 	 * Enable interrupts.
2024 	 */
2025 	CSR_WRITE_4(sc, SIS_IMR, SIS_INTRS);
2026 #ifdef DEVICE_POLLING
2027 	/*
2028 	 * ... only enable interrupts if we are not polling, make sure
2029 	 * they are off otherwise.
2030 	 */
2031 	if (ifp->if_flags & IFF_POLLING)
2032 		CSR_WRITE_4(sc, SIS_IER, 0);
2033 	else
2034 #endif /* DEVICE_POLLING */
2035 	CSR_WRITE_4(sc, SIS_IER, 1);
2036 
2037 	/* Enable receiver and transmitter. */
2038 	SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE|SIS_CSR_RX_DISABLE);
2039 	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE);
2040 
2041 #ifdef notdef
2042 	mii_mediachg(mii);
2043 #endif
2044 
2045 	/*
2046 	 * Page 75 of the DP83815 manual recommends the
2047 	 * following register settings "for optimum
2048 	 * performance." Note however that at least three
2049 	 * of the registers are listed as "reserved" in
2050 	 * the register map, so who knows what they do.
2051 	 */
2052 	if (sc->sis_type == SIS_TYPE_83815) {
2053 		CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001);
2054 		CSR_WRITE_4(sc, NS_PHY_CR, 0x189C);
2055 		CSR_WRITE_4(sc, NS_PHY_TDATA, 0x0000);
2056 		CSR_WRITE_4(sc, NS_PHY_DSPCFG, 0x5040);
2057 		CSR_WRITE_4(sc, NS_PHY_SDCFG, 0x008C);
2058 	}
2059 
2060 	ifp->if_flags |= IFF_RUNNING;
2061 	ifp->if_flags &= ~IFF_OACTIVE;
2062 
2063 	callout_reset(&sc->sis_timer, hz, sis_tick, sc);
2064 }
2065 
2066 /*
2067  * Set media options.
2068  */
2069 static int
2070 sis_ifmedia_upd(struct ifnet *ifp)
2071 {
2072 	struct sis_softc *sc;
2073 	struct mii_data *mii;
2074 
2075 	sc = ifp->if_softc;
2076 
2077 	mii = device_get_softc(sc->sis_miibus);
2078 	sc->sis_link = 0;
2079 	if (mii->mii_instance) {
2080 		struct mii_softc	*miisc;
2081 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
2082 			mii_phy_reset(miisc);
2083 	}
2084 	mii_mediachg(mii);
2085 
2086 	return(0);
2087 }
2088 
2089 /*
2090  * Report current media status.
2091  */
2092 static void
2093 sis_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2094 {
2095 	struct sis_softc *sc;
2096 	struct mii_data *mii;
2097 
2098 	sc = ifp->if_softc;
2099 
2100 	mii = device_get_softc(sc->sis_miibus);
2101 	mii_pollstat(mii);
2102 	ifmr->ifm_active = mii->mii_media_active;
2103 	ifmr->ifm_status = mii->mii_media_status;
2104 }
2105 
2106 static int
2107 sis_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
2108 {
2109 	struct sis_softc *sc = ifp->if_softc;
2110 	struct ifreq *ifr = (struct ifreq *) data;
2111 	struct mii_data *mii;
2112 	int error = 0;
2113 
2114 	switch(command) {
2115 	case SIOCSIFFLAGS:
2116 		if (ifp->if_flags & IFF_UP) {
2117 			sis_init(sc);
2118 		} else {
2119 			if (ifp->if_flags & IFF_RUNNING)
2120 				sis_stop(sc);
2121 		}
2122 		error = 0;
2123 		break;
2124 	case SIOCADDMULTI:
2125 	case SIOCDELMULTI:
2126 		if (sc->sis_type == SIS_TYPE_83815)
2127 			sis_setmulti_ns(sc);
2128 		else
2129 			sis_setmulti_sis(sc);
2130 		error = 0;
2131 		break;
2132 	case SIOCGIFMEDIA:
2133 	case SIOCSIFMEDIA:
2134 		mii = device_get_softc(sc->sis_miibus);
2135 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2136 		break;
2137 	default:
2138 		error = ether_ioctl(ifp, command, data);
2139 		break;
2140 	}
2141 	return(error);
2142 }
2143 
2144 static void
2145 sis_watchdog(struct ifnet *ifp)
2146 {
2147 	struct sis_softc *sc;
2148 
2149 	sc = ifp->if_softc;
2150 
2151 	ifp->if_oerrors++;
2152 	if_printf(ifp, "watchdog timeout\n");
2153 
2154 	sis_stop(sc);
2155 	sis_reset(sc);
2156 	sis_init(sc);
2157 
2158 	if (!ifq_is_empty(&ifp->if_snd))
2159 		if_devstart(ifp);
2160 }
2161 
2162 /*
2163  * Stop the adapter and free any mbufs allocated to the
2164  * RX and TX lists.
2165  */
2166 static void
2167 sis_stop(struct sis_softc *sc)
2168 {
2169 	int i;
2170 	struct ifnet *ifp;
2171 
2172 	ifp = &sc->arpcom.ac_if;
2173 	ifp->if_timer = 0;
2174 
2175 	callout_stop(&sc->sis_timer);
2176 
2177 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2178 	CSR_WRITE_4(sc, SIS_IER, 0);
2179 	CSR_WRITE_4(sc, SIS_IMR, 0);
2180 	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE|SIS_CSR_RX_DISABLE);
2181 	DELAY(1000);
2182 	CSR_WRITE_4(sc, SIS_TX_LISTPTR, 0);
2183 	CSR_WRITE_4(sc, SIS_RX_LISTPTR, 0);
2184 
2185 	sc->sis_link = 0;
2186 
2187 	/*
2188 	 * Free data in the RX lists.
2189 	 */
2190 	for (i = 0; i < SIS_RX_LIST_CNT; i++) {
2191 		if (sc->sis_ldata.sis_rx_list[i].sis_mbuf != NULL) {
2192 			bus_dmamap_unload(sc->sis_tag,
2193 					  sc->sis_ldata.sis_rx_list[i].sis_map);
2194 			bus_dmamap_destroy(sc->sis_tag,
2195 					  sc->sis_ldata.sis_rx_list[i].sis_map);
2196 			m_freem(sc->sis_ldata.sis_rx_list[i].sis_mbuf);
2197 			sc->sis_ldata.sis_rx_list[i].sis_mbuf = NULL;
2198 		}
2199 	}
2200 	bzero(sc->sis_ldata.sis_rx_list, sizeof(sc->sis_ldata.sis_rx_list));
2201 
2202 	/*
2203 	 * Free the TX list buffers.
2204 	 */
2205 	for (i = 0; i < SIS_TX_LIST_CNT; i++) {
2206 		if (sc->sis_ldata.sis_tx_list[i].sis_mbuf != NULL) {
2207 			bus_dmamap_unload(sc->sis_tag,
2208 					  sc->sis_ldata.sis_tx_list[i].sis_map);
2209 			bus_dmamap_destroy(sc->sis_tag,
2210 					  sc->sis_ldata.sis_tx_list[i].sis_map);
2211 			m_freem(sc->sis_ldata.sis_tx_list[i].sis_mbuf);
2212 			sc->sis_ldata.sis_tx_list[i].sis_mbuf = NULL;
2213 		}
2214 	}
2215 
2216 	bzero(sc->sis_ldata.sis_tx_list, sizeof(sc->sis_ldata.sis_tx_list));
2217 }
2218 
2219 /*
2220  * Stop all chip I/O so that the kernel's probe routines don't
2221  * get confused by errant DMAs when rebooting.
2222  */
2223 static void
2224 sis_shutdown(device_t dev)
2225 {
2226 	struct sis_softc	*sc;
2227 	struct ifnet *ifp;
2228 
2229 	sc = device_get_softc(dev);
2230 	ifp = &sc->arpcom.ac_if;
2231 	lwkt_serialize_enter(ifp->if_serializer);
2232 	sis_reset(sc);
2233 	sis_stop(sc);
2234 	lwkt_serialize_exit(ifp->if_serializer);
2235 }
2236 
2237