xref: /openbsd/sys/dev/pci/if_sis.c (revision 404b540a)
1 /*	$OpenBSD: if_sis.c,v 1.97 2009/08/13 14:24:47 jasper Exp $ */
2 /*
3  * Copyright (c) 1997, 1998, 1999
4  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  * $FreeBSD: src/sys/pci/if_sis.c,v 1.30 2001/02/06 10:11:47 phk Exp $
34  */
35 
36 /*
37  * SiS 900/SiS 7016 fast ethernet PCI NIC driver. Datasheets are
38  * available from http://www.sis.com.tw.
39  *
40  * This driver also supports the NatSemi DP83815. Datasheets are
41  * available from http://www.national.com.
42  *
43  * Written by Bill Paul <wpaul@ee.columbia.edu>
44  * Electrical Engineering Department
45  * Columbia University, New York City
46  */
47 
48 /*
49  * The SiS 900 is a fairly simple chip. It uses bus master DMA with
50  * simple TX and RX descriptors of 3 longwords in size. The receiver
51  * has a single perfect filter entry for the station address and a
52  * 128-bit multicast hash table. The SiS 900 has a built-in MII-based
53  * transceiver while the 7016 requires an external transceiver chip.
54  * Both chips offer the standard bit-bang MII interface as well as
55  * an enchanced PHY interface which simplifies accessing MII registers.
56  *
57  * The only downside to this chipset is that RX descriptors must be
58  * longword aligned.
59  */
60 
61 #include "bpfilter.h"
62 
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/mbuf.h>
66 #include <sys/protosw.h>
67 #include <sys/socket.h>
68 #include <sys/ioctl.h>
69 #include <sys/errno.h>
70 #include <sys/malloc.h>
71 #include <sys/kernel.h>
72 #include <sys/timeout.h>
73 
74 #include <net/if.h>
75 #include <net/if_dl.h>
76 #include <net/if_types.h>
77 
78 #ifdef INET
79 #include <netinet/in.h>
80 #include <netinet/in_systm.h>
81 #include <netinet/in_var.h>
82 #include <netinet/ip.h>
83 #include <netinet/if_ether.h>
84 #endif
85 
86 #include <net/if_media.h>
87 
88 #if NBPFILTER > 0
89 #include <net/bpf.h>
90 #endif
91 
92 #include <sys/device.h>
93 
94 #include <dev/mii/mii.h>
95 #include <dev/mii/miivar.h>
96 
97 #include <dev/pci/pcireg.h>
98 #include <dev/pci/pcivar.h>
99 #include <dev/pci/pcidevs.h>
100 
101 #define SIS_USEIOSPACE
102 
103 #include <dev/pci/if_sisreg.h>
104 
105 int sis_probe(struct device *, void *, void *);
106 void sis_attach(struct device *, struct device *, void *);
107 
108 struct cfattach sis_ca = {
109 	sizeof(struct sis_softc), sis_probe, sis_attach
110 };
111 
112 struct cfdriver sis_cd = {
113 	NULL, "sis", DV_IFNET
114 };
115 
116 int sis_intr(void *);
117 void sis_fill_rx_ring(struct sis_softc *);
118 int sis_newbuf(struct sis_softc *, struct sis_desc *);
119 int sis_encap(struct sis_softc *, struct mbuf *, u_int32_t *);
120 void sis_rxeof(struct sis_softc *);
121 void sis_txeof(struct sis_softc *);
122 void sis_tick(void *);
123 void sis_start(struct ifnet *);
124 int sis_ioctl(struct ifnet *, u_long, caddr_t);
125 void sis_init(void *);
126 void sis_stop(struct sis_softc *);
127 void sis_watchdog(struct ifnet *);
128 int sis_ifmedia_upd(struct ifnet *);
129 void sis_ifmedia_sts(struct ifnet *, struct ifmediareq *);
130 
131 u_int16_t sis_reverse(u_int16_t);
132 void sis_delay(struct sis_softc *);
133 void sis_eeprom_idle(struct sis_softc *);
134 void sis_eeprom_putbyte(struct sis_softc *, int);
135 void sis_eeprom_getword(struct sis_softc *, int, u_int16_t *);
136 #if defined(__amd64__) || defined(__i386__)
137 void sis_read_cmos(struct sis_softc *, struct pci_attach_args *, caddr_t, int, int);
138 #endif
139 void sis_read_mac(struct sis_softc *, struct pci_attach_args *);
140 void sis_read_eeprom(struct sis_softc *, caddr_t, int, int, int);
141 void sis_read96x_mac(struct sis_softc *);
142 
143 void sis_mii_sync(struct sis_softc *);
144 void sis_mii_send(struct sis_softc *, u_int32_t, int);
145 int sis_mii_readreg(struct sis_softc *, struct sis_mii_frame *);
146 int sis_mii_writereg(struct sis_softc *, struct sis_mii_frame *);
147 int sis_miibus_readreg(struct device *, int, int);
148 void sis_miibus_writereg(struct device *, int, int, int);
149 void sis_miibus_statchg(struct device *);
150 
151 u_int32_t sis_mchash(struct sis_softc *, const uint8_t *);
152 void sis_iff(struct sis_softc *);
153 void sis_iff_ns(struct sis_softc *);
154 void sis_iff_sis(struct sis_softc *);
155 void sis_reset(struct sis_softc *);
156 int sis_ring_init(struct sis_softc *);
157 
158 #define SIS_SETBIT(sc, reg, x)				\
159 	CSR_WRITE_4(sc, reg,				\
160 		CSR_READ_4(sc, reg) | (x))
161 
162 #define SIS_CLRBIT(sc, reg, x)				\
163 	CSR_WRITE_4(sc, reg,				\
164 		CSR_READ_4(sc, reg) & ~(x))
165 
166 #define SIO_SET(x)					\
167 	CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) | x)
168 
169 #define SIO_CLR(x)					\
170 	CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) & ~x)
171 
172 const struct pci_matchid sis_devices[] = {
173 	{ PCI_VENDOR_SIS, PCI_PRODUCT_SIS_900 },
174 	{ PCI_VENDOR_SIS, PCI_PRODUCT_SIS_7016 },
175 	{ PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815 }
176 };
177 
178 /*
179  * Routine to reverse the bits in a word. Stolen almost
180  * verbatim from /usr/games/fortune.
181  */
182 u_int16_t
183 sis_reverse(u_int16_t n)
184 {
185 	n = ((n >>  1) & 0x5555) | ((n <<  1) & 0xaaaa);
186 	n = ((n >>  2) & 0x3333) | ((n <<  2) & 0xcccc);
187 	n = ((n >>  4) & 0x0f0f) | ((n <<  4) & 0xf0f0);
188 	n = ((n >>  8) & 0x00ff) | ((n <<  8) & 0xff00);
189 
190 	return (n);
191 }
192 
193 void
194 sis_delay(struct sis_softc *sc)
195 {
196 	int			idx;
197 
198 	for (idx = (300 / 33) + 1; idx > 0; idx--)
199 		CSR_READ_4(sc, SIS_CSR);
200 }
201 
202 void
203 sis_eeprom_idle(struct sis_softc *sc)
204 {
205 	int			i;
206 
207 	SIO_SET(SIS_EECTL_CSEL);
208 	sis_delay(sc);
209 	SIO_SET(SIS_EECTL_CLK);
210 	sis_delay(sc);
211 
212 	for (i = 0; i < 25; i++) {
213 		SIO_CLR(SIS_EECTL_CLK);
214 		sis_delay(sc);
215 		SIO_SET(SIS_EECTL_CLK);
216 		sis_delay(sc);
217 	}
218 
219 	SIO_CLR(SIS_EECTL_CLK);
220 	sis_delay(sc);
221 	SIO_CLR(SIS_EECTL_CSEL);
222 	sis_delay(sc);
223 	CSR_WRITE_4(sc, SIS_EECTL, 0x00000000);
224 }
225 
226 /*
227  * Send a read command and address to the EEPROM, check for ACK.
228  */
229 void
230 sis_eeprom_putbyte(struct sis_softc *sc, int addr)
231 {
232 	int			d, i;
233 
234 	d = addr | SIS_EECMD_READ;
235 
236 	/*
237 	 * Feed in each bit and strobe the clock.
238 	 */
239 	for (i = 0x400; i; i >>= 1) {
240 		if (d & i)
241 			SIO_SET(SIS_EECTL_DIN);
242 		else
243 			SIO_CLR(SIS_EECTL_DIN);
244 		sis_delay(sc);
245 		SIO_SET(SIS_EECTL_CLK);
246 		sis_delay(sc);
247 		SIO_CLR(SIS_EECTL_CLK);
248 		sis_delay(sc);
249 	}
250 }
251 
252 /*
253  * Read a word of data stored in the EEPROM at address 'addr.'
254  */
255 void
256 sis_eeprom_getword(struct sis_softc *sc, int addr, u_int16_t *dest)
257 {
258 	int			i;
259 	u_int16_t		word = 0;
260 
261 	/* Force EEPROM to idle state. */
262 	sis_eeprom_idle(sc);
263 
264 	/* Enter EEPROM access mode. */
265 	sis_delay(sc);
266 	SIO_CLR(SIS_EECTL_CLK);
267 	sis_delay(sc);
268 	SIO_SET(SIS_EECTL_CSEL);
269 	sis_delay(sc);
270 
271 	/*
272 	 * Send address of word we want to read.
273 	 */
274 	sis_eeprom_putbyte(sc, addr);
275 
276 	/*
277 	 * Start reading bits from EEPROM.
278 	 */
279 	for (i = 0x8000; i; i >>= 1) {
280 		SIO_SET(SIS_EECTL_CLK);
281 		sis_delay(sc);
282 		if (CSR_READ_4(sc, SIS_EECTL) & SIS_EECTL_DOUT)
283 			word |= i;
284 		sis_delay(sc);
285 		SIO_CLR(SIS_EECTL_CLK);
286 		sis_delay(sc);
287 	}
288 
289 	/* Turn off EEPROM access mode. */
290 	sis_eeprom_idle(sc);
291 
292 	*dest = word;
293 }
294 
295 /*
296  * Read a sequence of words from the EEPROM.
297  */
298 void
299 sis_read_eeprom(struct sis_softc *sc, caddr_t dest,
300     int off, int cnt, int swap)
301 {
302 	int			i;
303 	u_int16_t		word = 0, *ptr;
304 
305 	for (i = 0; i < cnt; i++) {
306 		sis_eeprom_getword(sc, off + i, &word);
307 		ptr = (u_int16_t *)(dest + (i * 2));
308 		if (swap)
309 			*ptr = letoh16(word);
310 		else
311 			*ptr = word;
312 	}
313 }
314 
315 #if defined(__amd64__) || defined(__i386__)
316 void
317 sis_read_cmos(struct sis_softc *sc, struct pci_attach_args *pa,
318     caddr_t dest, int off, int cnt)
319 {
320 	u_int32_t reg;
321 	int i;
322 
323 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, 0x48);
324 	pci_conf_write(pa->pa_pc, pa->pa_tag, 0x48, reg | 0x40);
325 
326 	for (i = 0; i < cnt; i++) {
327 		bus_space_write_1(pa->pa_iot, 0x0, 0x70, i + off);
328 		*(dest + i) = bus_space_read_1(pa->pa_iot, 0x0, 0x71);
329 	}
330 
331 	pci_conf_write(pa->pa_pc, pa->pa_tag, 0x48, reg & ~0x40);
332 }
333 #endif
334 
335 void
336 sis_read_mac(struct sis_softc *sc, struct pci_attach_args *pa)
337 {
338 	u_int16_t *enaddr = (u_int16_t *) &sc->arpcom.ac_enaddr;
339 
340 	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RELOAD);
341 	SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_RELOAD);
342 
343 	SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ENABLE);
344 
345 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0);
346 	enaddr[0] = letoh16(CSR_READ_4(sc, SIS_RXFILT_DATA) & 0xffff);
347 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR1);
348 	enaddr[1] = letoh16(CSR_READ_4(sc, SIS_RXFILT_DATA) & 0xffff);
349 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2);
350 	enaddr[2] = letoh16(CSR_READ_4(sc, SIS_RXFILT_DATA) & 0xffff);
351 
352 	SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ENABLE);
353 }
354 
355 void
356 sis_read96x_mac(struct sis_softc *sc)
357 {
358 	int i;
359 
360 	SIO_SET(SIS96x_EECTL_REQ);
361 
362 	for (i = 0; i < 2000; i++) {
363 		if ((CSR_READ_4(sc, SIS_EECTL) & SIS96x_EECTL_GNT)) {
364 			sis_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
365 			    SIS_EE_NODEADDR, 3, 1);
366 			break;
367 		} else
368 			DELAY(1);
369 	}
370 
371 	SIO_SET(SIS96x_EECTL_DONE);
372 }
373 
374 /*
375  * Sync the PHYs by setting data bit and strobing the clock 32 times.
376  */
377 void
378 sis_mii_sync(struct sis_softc *sc)
379 {
380 	int			i;
381 
382  	SIO_SET(SIS_MII_DIR|SIS_MII_DATA);
383 
384  	for (i = 0; i < 32; i++) {
385  		SIO_SET(SIS_MII_CLK);
386  		DELAY(1);
387  		SIO_CLR(SIS_MII_CLK);
388  		DELAY(1);
389  	}
390 }
391 
392 /*
393  * Clock a series of bits through the MII.
394  */
395 void
396 sis_mii_send(struct sis_softc *sc, u_int32_t bits, int cnt)
397 {
398 	int			i;
399 
400 	SIO_CLR(SIS_MII_CLK);
401 
402 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
403 		if (bits & i)
404 			SIO_SET(SIS_MII_DATA);
405 		else
406 			SIO_CLR(SIS_MII_DATA);
407 		DELAY(1);
408 		SIO_CLR(SIS_MII_CLK);
409 		DELAY(1);
410 		SIO_SET(SIS_MII_CLK);
411 	}
412 }
413 
414 /*
415  * Read an PHY register through the MII.
416  */
417 int
418 sis_mii_readreg(struct sis_softc *sc, struct sis_mii_frame *frame)
419 {
420 	int			i, ack, s;
421 
422 	s = splnet();
423 
424 	/*
425 	 * Set up frame for RX.
426 	 */
427 	frame->mii_stdelim = SIS_MII_STARTDELIM;
428 	frame->mii_opcode = SIS_MII_READOP;
429 	frame->mii_turnaround = 0;
430 	frame->mii_data = 0;
431 
432 	/*
433  	 * Turn on data xmit.
434 	 */
435 	SIO_SET(SIS_MII_DIR);
436 
437 	sis_mii_sync(sc);
438 
439 	/*
440 	 * Send command/address info.
441 	 */
442 	sis_mii_send(sc, frame->mii_stdelim, 2);
443 	sis_mii_send(sc, frame->mii_opcode, 2);
444 	sis_mii_send(sc, frame->mii_phyaddr, 5);
445 	sis_mii_send(sc, frame->mii_regaddr, 5);
446 
447 	/* Idle bit */
448 	SIO_CLR((SIS_MII_CLK|SIS_MII_DATA));
449 	DELAY(1);
450 	SIO_SET(SIS_MII_CLK);
451 	DELAY(1);
452 
453 	/* Turn off xmit. */
454 	SIO_CLR(SIS_MII_DIR);
455 
456 	/* Check for ack */
457 	SIO_CLR(SIS_MII_CLK);
458 	DELAY(1);
459 	ack = CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA;
460 	SIO_SET(SIS_MII_CLK);
461 	DELAY(1);
462 
463 	/*
464 	 * Now try reading data bits. If the ack failed, we still
465 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
466 	 */
467 	if (ack) {
468 		for(i = 0; i < 16; i++) {
469 			SIO_CLR(SIS_MII_CLK);
470 			DELAY(1);
471 			SIO_SET(SIS_MII_CLK);
472 			DELAY(1);
473 		}
474 		goto fail;
475 	}
476 
477 	for (i = 0x8000; i; i >>= 1) {
478 		SIO_CLR(SIS_MII_CLK);
479 		DELAY(1);
480 		if (!ack) {
481 			if (CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA)
482 				frame->mii_data |= i;
483 			DELAY(1);
484 		}
485 		SIO_SET(SIS_MII_CLK);
486 		DELAY(1);
487 	}
488 
489 fail:
490 
491 	SIO_CLR(SIS_MII_CLK);
492 	DELAY(1);
493 	SIO_SET(SIS_MII_CLK);
494 	DELAY(1);
495 
496 	splx(s);
497 
498 	if (ack)
499 		return (1);
500 	return (0);
501 }
502 
503 /*
504  * Write to a PHY register through the MII.
505  */
506 int
507 sis_mii_writereg(struct sis_softc *sc, struct sis_mii_frame *frame)
508 {
509 	int			s;
510 
511 	s = splnet();
512  	/*
513  	 * Set up frame for TX.
514  	 */
515 
516  	frame->mii_stdelim = SIS_MII_STARTDELIM;
517  	frame->mii_opcode = SIS_MII_WRITEOP;
518  	frame->mii_turnaround = SIS_MII_TURNAROUND;
519 
520  	/*
521   	 * Turn on data output.
522  	 */
523  	SIO_SET(SIS_MII_DIR);
524 
525  	sis_mii_sync(sc);
526 
527  	sis_mii_send(sc, frame->mii_stdelim, 2);
528  	sis_mii_send(sc, frame->mii_opcode, 2);
529  	sis_mii_send(sc, frame->mii_phyaddr, 5);
530  	sis_mii_send(sc, frame->mii_regaddr, 5);
531  	sis_mii_send(sc, frame->mii_turnaround, 2);
532  	sis_mii_send(sc, frame->mii_data, 16);
533 
534  	/* Idle bit. */
535  	SIO_SET(SIS_MII_CLK);
536  	DELAY(1);
537  	SIO_CLR(SIS_MII_CLK);
538  	DELAY(1);
539 
540  	/*
541  	 * Turn off xmit.
542  	 */
543  	SIO_CLR(SIS_MII_DIR);
544 
545  	splx(s);
546 
547  	return (0);
548 }
549 
550 int
551 sis_miibus_readreg(struct device *self, int phy, int reg)
552 {
553 	struct sis_softc	*sc = (struct sis_softc *)self;
554 	struct sis_mii_frame    frame;
555 
556 	if (sc->sis_type == SIS_TYPE_83815) {
557 		if (phy != 0)
558 			return (0);
559 		/*
560 		 * The NatSemi chip can take a while after
561 		 * a reset to come ready, during which the BMSR
562 		 * returns a value of 0. This is *never* supposed
563 		 * to happen: some of the BMSR bits are meant to
564 		 * be hardwired in the on position, and this can
565 		 * confuse the miibus code a bit during the probe
566 		 * and attach phase. So we make an effort to check
567 		 * for this condition and wait for it to clear.
568 		 */
569 		if (!CSR_READ_4(sc, NS_BMSR))
570 			DELAY(1000);
571 		return CSR_READ_4(sc, NS_BMCR + (reg * 4));
572 	}
573 
574 	/*
575 	 * Chipsets < SIS_635 seem not to be able to read/write
576 	 * through mdio. Use the enhanced PHY access register
577 	 * again for them.
578 	 */
579 	if (sc->sis_type == SIS_TYPE_900 &&
580 	    sc->sis_rev < SIS_REV_635) {
581 		int i, val = 0;
582 
583 		if (phy != 0)
584 			return (0);
585 
586 		CSR_WRITE_4(sc, SIS_PHYCTL,
587 		    (phy << 11) | (reg << 6) | SIS_PHYOP_READ);
588 		SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS);
589 
590 		for (i = 0; i < SIS_TIMEOUT; i++) {
591 			if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS))
592 				break;
593 		}
594 
595 		if (i == SIS_TIMEOUT) {
596 			printf("%s: PHY failed to come ready\n",
597 			    sc->sc_dev.dv_xname);
598 			return (0);
599 		}
600 
601 		val = (CSR_READ_4(sc, SIS_PHYCTL) >> 16) & 0xFFFF;
602 
603 		if (val == 0xFFFF)
604 			return (0);
605 
606 		return (val);
607 	} else {
608 		bzero((char *)&frame, sizeof(frame));
609 
610 		frame.mii_phyaddr = phy;
611 		frame.mii_regaddr = reg;
612 		sis_mii_readreg(sc, &frame);
613 
614 		return (frame.mii_data);
615 	}
616 }
617 
618 void
619 sis_miibus_writereg(struct device *self, int phy, int reg, int data)
620 {
621 	struct sis_softc	*sc = (struct sis_softc *)self;
622 	struct sis_mii_frame	frame;
623 
624 	if (sc->sis_type == SIS_TYPE_83815) {
625 		if (phy != 0)
626 			return;
627 		CSR_WRITE_4(sc, NS_BMCR + (reg * 4), data);
628 		return;
629 	}
630 
631 	/*
632 	 * Chipsets < SIS_635 seem not to be able to read/write
633 	 * through mdio. Use the enhanced PHY access register
634 	 * again for them.
635 	 */
636 	if (sc->sis_type == SIS_TYPE_900 &&
637 	    sc->sis_rev < SIS_REV_635) {
638 		int i;
639 
640 		if (phy != 0)
641 			return;
642 
643 		CSR_WRITE_4(sc, SIS_PHYCTL, (data << 16) | (phy << 11) |
644 		    (reg << 6) | SIS_PHYOP_WRITE);
645 		SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS);
646 
647 		for (i = 0; i < SIS_TIMEOUT; i++) {
648 			if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS))
649 				break;
650 		}
651 
652 		if (i == SIS_TIMEOUT)
653 			printf("%s: PHY failed to come ready\n",
654 			    sc->sc_dev.dv_xname);
655 	} else {
656 		bzero((char *)&frame, sizeof(frame));
657 
658 		frame.mii_phyaddr = phy;
659 		frame.mii_regaddr = reg;
660 		frame.mii_data = data;
661 		sis_mii_writereg(sc, &frame);
662 	}
663 }
664 
665 void
666 sis_miibus_statchg(struct device *self)
667 {
668 	struct sis_softc	*sc = (struct sis_softc *)self;
669 
670 	sis_init(sc);
671 }
672 
673 u_int32_t
674 sis_mchash(struct sis_softc *sc, const uint8_t *addr)
675 {
676 	uint32_t		crc;
677 
678 	/* Compute CRC for the address value. */
679 	crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
680 
681 	/*
682 	 * return the filter bit position
683 	 *
684 	 * The NatSemi chip has a 512-bit filter, which is
685 	 * different than the SiS, so we special-case it.
686 	 */
687 	if (sc->sis_type == SIS_TYPE_83815)
688 		return (crc >> 23);
689 	else if (sc->sis_rev >= SIS_REV_635 ||
690 	    sc->sis_rev == SIS_REV_900B)
691 		return (crc >> 24);
692 	else
693 		return (crc >> 25);
694 }
695 
696 void
697 sis_iff(struct sis_softc *sc)
698 {
699 	if (sc->sis_type == SIS_TYPE_83815)
700 		sis_iff_ns(sc);
701 	else
702 		sis_iff_sis(sc);
703 }
704 
705 void
706 sis_iff_ns(struct sis_softc *sc)
707 {
708 	struct ifnet		*ifp = &sc->arpcom.ac_if;
709 	struct arpcom		*ac = &sc->arpcom;
710 	struct ether_multi	*enm;
711 	struct ether_multistep  step;
712 	u_int32_t		h = 0, i, rxfilt;
713 	int			bit, index;
714 
715 	rxfilt = CSR_READ_4(sc, SIS_RXFILT_CTL);
716 	rxfilt &= ~(SIS_RXFILTCTL_ALLMULTI | SIS_RXFILTCTL_ALLPHYS |
717 	    NS_RXFILTCTL_ARP | SIS_RXFILTCTL_BROAD | NS_RXFILTCTL_MCHASH |
718 	    NS_RXFILTCTL_PERFECT);
719 	ifp->if_flags &= ~IFF_ALLMULTI;
720 
721 	/*
722 	 * Always accept ARP frames.
723 	 * Always accept broadcast frames.
724 	 * Always accept frames destined to our station address.
725 	 */
726 	rxfilt |= NS_RXFILTCTL_ARP | SIS_RXFILTCTL_BROAD |
727 	    NS_RXFILTCTL_PERFECT;
728 
729 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
730 		ifp->if_flags |= IFF_ALLMULTI;
731 		rxfilt |= SIS_RXFILTCTL_ALLMULTI;
732 		if (ifp->if_flags & IFF_PROMISC)
733 			rxfilt |= SIS_RXFILTCTL_ALLPHYS;
734 	} else {
735 		/*
736 		 * We have to explicitly enable the multicast hash table
737 		 * on the NatSemi chip if we want to use it, which we do.
738 		 */
739 		rxfilt |= NS_RXFILTCTL_MCHASH;
740 
741 		/* first, zot all the existing hash bits */
742 		for (i = 0; i < 32; i++) {
743 			CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + (i*2));
744 			CSR_WRITE_4(sc, SIS_RXFILT_DATA, 0);
745 		}
746 
747 		ETHER_FIRST_MULTI(step, ac, enm);
748 		while (enm != NULL) {
749 			h = sis_mchash(sc, enm->enm_addrlo);
750 
751 			index = h >> 3;
752 			bit = h & 0x1F;
753 
754 			CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + index);
755 
756 			if (bit > 0xF)
757 				bit -= 0x10;
758 
759 			SIS_SETBIT(sc, SIS_RXFILT_DATA, (1 << bit));
760 
761 			ETHER_NEXT_MULTI(step, enm);
762 		}
763 	}
764 
765 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt);
766 }
767 
768 void
769 sis_iff_sis(struct sis_softc *sc)
770 {
771 	struct ifnet		*ifp = &sc->arpcom.ac_if;
772 	struct arpcom		*ac = &sc->arpcom;
773 	struct ether_multi	*enm;
774 	struct ether_multistep	step;
775 	u_int32_t		h, i, maxmulti, rxfilt;
776 	u_int16_t		hashes[16];
777 
778 	/* hash table size */
779 	if (sc->sis_rev >= SIS_REV_635 ||
780 	    sc->sis_rev == SIS_REV_900B)
781 		maxmulti = 16;
782 	else
783 		maxmulti = 8;
784 
785 	rxfilt = CSR_READ_4(sc, SIS_RXFILT_CTL);
786 	rxfilt &= ~(SIS_RXFILTCTL_ALLMULTI | SIS_RXFILTCTL_ALLPHYS |
787 	    SIS_RXFILTCTL_BROAD);
788 	ifp->if_flags &= ~IFF_ALLMULTI;
789 
790 	/*
791 	 * Always accept broadcast frames.
792 	 */
793 	rxfilt |= SIS_RXFILTCTL_BROAD;
794 
795 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
796 	    ac->ac_multicnt > maxmulti) {
797 		ifp->if_flags |= IFF_ALLMULTI;
798 		rxfilt |= SIS_RXFILTCTL_ALLMULTI;
799 		if (ifp->if_flags & IFF_PROMISC)
800 			rxfilt |= SIS_RXFILTCTL_ALLPHYS;
801 
802 		for (i = 0; i < maxmulti; i++)
803 			hashes[i] = ~0;
804 	} else {
805 		for (i = 0; i < maxmulti; i++)
806 			hashes[i] = 0;
807 
808 		ETHER_FIRST_MULTI(step, ac, enm);
809 		while (enm != NULL) {
810 			h = sis_mchash(sc, enm->enm_addrlo);
811 
812 			hashes[h >> 4] |= 1 << (h & 0xf);
813 
814 			ETHER_NEXT_MULTI(step, enm);
815 		}
816 	}
817 
818 	for (i = 0; i < maxmulti; i++) {
819 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, (4 + i) << 16);
820 		CSR_WRITE_4(sc, SIS_RXFILT_DATA, hashes[i]);
821 	}
822 
823 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt);
824 }
825 
826 void
827 sis_reset(struct sis_softc *sc)
828 {
829 	int			i;
830 
831 	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RESET);
832 
833 	for (i = 0; i < SIS_TIMEOUT; i++) {
834 		if (!(CSR_READ_4(sc, SIS_CSR) & SIS_CSR_RESET))
835 			break;
836 	}
837 
838 	if (i == SIS_TIMEOUT)
839 		printf("%s: reset never completed\n", sc->sc_dev.dv_xname);
840 
841 	/* Wait a little while for the chip to get its brains in order. */
842 	DELAY(1000);
843 
844 	/*
845 	 * If this is a NetSemi chip, make sure to clear
846 	 * PME mode.
847 	 */
848 	if (sc->sis_type == SIS_TYPE_83815) {
849 		CSR_WRITE_4(sc, NS_CLKRUN, NS_CLKRUN_PMESTS);
850 		CSR_WRITE_4(sc, NS_CLKRUN, 0);
851 	}
852 }
853 
854 /*
855  * Probe for an SiS chip. Check the PCI vendor and device
856  * IDs against our list and return a device name if we find a match.
857  */
858 int
859 sis_probe(struct device *parent, void *match, void *aux)
860 {
861 	return (pci_matchbyid((struct pci_attach_args *)aux, sis_devices,
862 	    sizeof(sis_devices)/sizeof(sis_devices[0])));
863 }
864 
865 /*
866  * Attach the interface. Allocate softc structures, do ifmedia
867  * setup and ethernet/BPF attach.
868  */
869 void
870 sis_attach(struct device *parent, struct device *self, void *aux)
871 {
872 	int			i;
873 	const char		*intrstr = NULL;
874 	pcireg_t		command;
875 	struct sis_softc	*sc = (struct sis_softc *)self;
876 	struct pci_attach_args	*pa = aux;
877 	pci_chipset_tag_t	pc = pa->pa_pc;
878 	pci_intr_handle_t	ih;
879 	struct ifnet		*ifp;
880 	bus_size_t		size;
881 
882 	sc->sis_stopped = 1;
883 
884 	/*
885 	 * Handle power management nonsense.
886 	 */
887 	command = pci_conf_read(pc, pa->pa_tag, SIS_PCI_CAPID) & 0x000000FF;
888 	if (command == 0x01) {
889 
890 		command = pci_conf_read(pc, pa->pa_tag, SIS_PCI_PWRMGMTCTRL);
891 		if (command & SIS_PSTATE_MASK) {
892 			u_int32_t		iobase, membase, irq;
893 
894 			/* Save important PCI config data. */
895 			iobase = pci_conf_read(pc, pa->pa_tag, SIS_PCI_LOIO);
896 			membase = pci_conf_read(pc, pa->pa_tag, SIS_PCI_LOMEM);
897 			irq = pci_conf_read(pc, pa->pa_tag, SIS_PCI_INTLINE);
898 
899 			/* Reset the power state. */
900 			printf("%s: chip is in D%d power mode -- setting to D0\n",
901 			    sc->sc_dev.dv_xname, command & SIS_PSTATE_MASK);
902 			command &= 0xFFFFFFFC;
903 			pci_conf_write(pc, pa->pa_tag, SIS_PCI_PWRMGMTCTRL, command);
904 
905 			/* Restore PCI config data. */
906 			pci_conf_write(pc, pa->pa_tag, SIS_PCI_LOIO, iobase);
907 			pci_conf_write(pc, pa->pa_tag, SIS_PCI_LOMEM, membase);
908 			pci_conf_write(pc, pa->pa_tag, SIS_PCI_INTLINE, irq);
909 		}
910 	}
911 
912 	/*
913 	 * Map control/status registers.
914 	 */
915 
916 #ifdef SIS_USEIOSPACE
917 	if (pci_mapreg_map(pa, SIS_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0,
918 	    &sc->sis_btag, &sc->sis_bhandle, NULL, &size, 0)) {
919 		printf(": can't map i/o space\n");
920 		return;
921  	}
922 #else
923 	if (pci_mapreg_map(pa, SIS_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
924 	    &sc->sis_btag, &sc->sis_bhandle, NULL, &size, 0)) {
925  		printf(": can't map mem space\n");
926 		return;
927  	}
928 #endif
929 
930 	/* Allocate interrupt */
931 	if (pci_intr_map(pa, &ih)) {
932 		printf(": couldn't map interrupt\n");
933 		goto fail_1;
934 	}
935 	intrstr = pci_intr_string(pc, ih);
936 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, sis_intr, sc,
937 	    self->dv_xname);
938 	if (sc->sc_ih == NULL) {
939 		printf(": couldn't establish interrupt");
940 		if (intrstr != NULL)
941 			printf(" at %s", intrstr);
942 		printf("\n");
943 		goto fail_1;
944 	}
945 
946 	switch (PCI_PRODUCT(pa->pa_id)) {
947 	case PCI_PRODUCT_SIS_900:
948 		sc->sis_type = SIS_TYPE_900;
949 		break;
950 	case PCI_PRODUCT_SIS_7016:
951 		sc->sis_type = SIS_TYPE_7016;
952 		break;
953 	case PCI_PRODUCT_NS_DP83815:
954 		sc->sis_type = SIS_TYPE_83815;
955 		break;
956 	default:
957 		break;
958 	}
959 	sc->sis_rev = PCI_REVISION(pa->pa_class);
960 
961 	/* Reset the adapter. */
962 	sis_reset(sc);
963 
964 	if (sc->sis_type == SIS_TYPE_900 &&
965 	   (sc->sis_rev == SIS_REV_635 ||
966 	    sc->sis_rev == SIS_REV_900B)) {
967 		SIO_SET(SIS_CFG_RND_CNT);
968 		SIO_SET(SIS_CFG_PERR_DETECT);
969 	}
970 
971 	/*
972 	 * Get station address from the EEPROM.
973 	 */
974 	switch (PCI_VENDOR(pa->pa_id)) {
975 	case PCI_VENDOR_NS:
976 		sc->sis_srr = CSR_READ_4(sc, NS_SRR);
977 
978 		if (sc->sis_srr == NS_SRR_15C)
979 			printf(", DP83815C");
980 		else if (sc->sis_srr == NS_SRR_15D)
981 			printf(", DP83815D");
982 		else if (sc->sis_srr == NS_SRR_16A)
983 			printf(", DP83816A");
984 		else
985 			printf(", srr %x", sc->sis_srr);
986 
987 		/*
988 		 * Reading the MAC address out of the EEPROM on
989 		 * the NatSemi chip takes a bit more work than
990 		 * you'd expect. The address spans 4 16-bit words,
991 		 * with the first word containing only a single bit.
992 		 * You have to shift everything over one bit to
993 		 * get it aligned properly. Also, the bits are
994 		 * stored backwards (the LSB is really the MSB,
995 		 * and so on) so you have to reverse them in order
996 		 * to get the MAC address into the form we want.
997 		 * Why? Who the hell knows.
998 		 */
999 		{
1000 			u_int16_t		tmp[4];
1001 
1002 			sis_read_eeprom(sc, (caddr_t)&tmp, NS_EE_NODEADDR,
1003 			    4, 0);
1004 
1005 			/* Shift everything over one bit. */
1006 			tmp[3] = tmp[3] >> 1;
1007 			tmp[3] |= tmp[2] << 15;
1008 			tmp[2] = tmp[2] >> 1;
1009 			tmp[2] |= tmp[1] << 15;
1010 			tmp[1] = tmp[1] >> 1;
1011 			tmp[1] |= tmp[0] << 15;
1012 
1013 			/* Now reverse all the bits. */
1014 			tmp[3] = letoh16(sis_reverse(tmp[3]));
1015 			tmp[2] = letoh16(sis_reverse(tmp[2]));
1016 			tmp[1] = letoh16(sis_reverse(tmp[1]));
1017 
1018 			bcopy((char *)&tmp[1], sc->arpcom.ac_enaddr,
1019 			    ETHER_ADDR_LEN);
1020 		}
1021 		break;
1022 	case PCI_VENDOR_SIS:
1023 	default:
1024 #if defined(__amd64__) || defined(__i386__)
1025 		/*
1026 		 * If this is a SiS 630E chipset with an embedded
1027 		 * SiS 900 controller, we have to read the MAC address
1028 		 * from the APC CMOS RAM. Our method for doing this
1029 		 * is very ugly since we have to reach out and grab
1030 		 * ahold of hardware for which we cannot properly
1031 		 * allocate resources. This code is only compiled on
1032 		 * the i386 architecture since the SiS 630E chipset
1033 		 * is for x86 motherboards only. Note that there are
1034 		 * a lot of magic numbers in this hack. These are
1035 		 * taken from SiS's Linux driver. I'd like to replace
1036 		 * them with proper symbolic definitions, but that
1037 		 * requires some datasheets that I don't have access
1038 		 * to at the moment.
1039 		 */
1040 		if (sc->sis_rev == SIS_REV_630S ||
1041 		    sc->sis_rev == SIS_REV_630E)
1042 			sis_read_cmos(sc, pa, (caddr_t)&sc->arpcom.ac_enaddr,
1043 			    0x9, 6);
1044 		else
1045 #endif
1046 		if (sc->sis_rev == SIS_REV_96x)
1047 			sis_read96x_mac(sc);
1048 		else if (sc->sis_rev == SIS_REV_635 ||
1049 		    sc->sis_rev == SIS_REV_630ET ||
1050 		    sc->sis_rev == SIS_REV_630EA1)
1051 			sis_read_mac(sc, pa);
1052 		else
1053 			sis_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
1054 			    SIS_EE_NODEADDR, 3, 1);
1055 		break;
1056 	}
1057 
1058 	printf(": %s, address %s\n", intrstr,
1059 	    ether_sprintf(sc->arpcom.ac_enaddr));
1060 
1061 	sc->sc_dmat = pa->pa_dmat;
1062 
1063 	if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sis_list_data),
1064 	    PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg,
1065 	    BUS_DMA_NOWAIT) != 0) {
1066 		printf(": can't alloc list mem\n");
1067 		goto fail_2;
1068 	}
1069 	if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg,
1070 	    sizeof(struct sis_list_data), &sc->sc_listkva,
1071 	    BUS_DMA_NOWAIT) != 0) {
1072 		printf(": can't map list mem\n");
1073 		goto fail_2;
1074 	}
1075 	if (bus_dmamap_create(sc->sc_dmat, sizeof(struct sis_list_data), 1,
1076 	    sizeof(struct sis_list_data), 0, BUS_DMA_NOWAIT,
1077 	    &sc->sc_listmap) != 0) {
1078 		printf(": can't alloc list map\n");
1079 		goto fail_2;
1080 	}
1081 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva,
1082 	    sizeof(struct sis_list_data), NULL, BUS_DMA_NOWAIT) != 0) {
1083 		printf(": can't load list map\n");
1084 		goto fail_2;
1085 	}
1086 	sc->sis_ldata = (struct sis_list_data *)sc->sc_listkva;
1087 	bzero(sc->sis_ldata, sizeof(struct sis_list_data));
1088 
1089 	for (i = 0; i < SIS_RX_LIST_CNT; i++) {
1090 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
1091 		    BUS_DMA_NOWAIT, &sc->sis_ldata->sis_rx_list[i].map) != 0) {
1092 			printf(": can't create rx map\n");
1093 			goto fail_2;
1094 		}
1095 	}
1096 
1097 	for (i = 0; i < SIS_TX_LIST_CNT; i++) {
1098 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
1099 		    SIS_TX_LIST_CNT - 3, MCLBYTES, 0, BUS_DMA_NOWAIT,
1100 		    &sc->sis_ldata->sis_tx_list[i].map) != 0) {
1101 			printf(": can't create tx map\n");
1102 			goto fail_2;
1103 		}
1104 	}
1105 	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, SIS_TX_LIST_CNT - 3,
1106 	    MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) {
1107 		printf(": can't create tx spare map\n");
1108 		goto fail_2;
1109 	}
1110 
1111 	timeout_set(&sc->sis_timeout, sis_tick, sc);
1112 
1113 	ifp = &sc->arpcom.ac_if;
1114 	ifp->if_softc = sc;
1115 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1116 	ifp->if_ioctl = sis_ioctl;
1117 	ifp->if_start = sis_start;
1118 	ifp->if_watchdog = sis_watchdog;
1119 	ifp->if_baudrate = 10000000;
1120 	IFQ_SET_MAXLEN(&ifp->if_snd, SIS_TX_LIST_CNT - 1);
1121 	IFQ_SET_READY(&ifp->if_snd);
1122 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1123 
1124 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1125 
1126 	m_clsetwms(ifp, MCLBYTES, 2, SIS_RX_LIST_CNT - 1);
1127 
1128 	sc->sc_mii.mii_ifp = ifp;
1129 	sc->sc_mii.mii_readreg = sis_miibus_readreg;
1130 	sc->sc_mii.mii_writereg = sis_miibus_writereg;
1131 	sc->sc_mii.mii_statchg = sis_miibus_statchg;
1132 	ifmedia_init(&sc->sc_mii.mii_media, 0, sis_ifmedia_upd,sis_ifmedia_sts);
1133 	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
1134 	    0);
1135 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
1136 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
1137 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
1138 	} else
1139 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1140 
1141 	/*
1142 	 * Call MI attach routines.
1143 	 */
1144 	if_attach(ifp);
1145 	ether_ifattach(ifp);
1146 	return;
1147 
1148 fail_2:
1149 	pci_intr_disestablish(pc, sc->sc_ih);
1150 
1151 fail_1:
1152 	bus_space_unmap(sc->sis_btag, sc->sis_bhandle, size);
1153 }
1154 
1155 /*
1156  * Initialize the TX and RX descriptors and allocate mbufs for them. Note that
1157  * we arrange the descriptors in a closed ring, so that the last descriptor
1158  * points back to the first.
1159  */
1160 int
1161 sis_ring_init(struct sis_softc *sc)
1162 {
1163 	struct sis_list_data	*ld;
1164 	struct sis_ring_data	*cd;
1165 	int			i, nexti;
1166 
1167 	cd = &sc->sis_cdata;
1168 	ld = sc->sis_ldata;
1169 
1170 	for (i = 0; i < SIS_TX_LIST_CNT; i++) {
1171 		if (i == (SIS_TX_LIST_CNT - 1))
1172 			nexti = 0;
1173 		else
1174 			nexti = i + 1;
1175 		ld->sis_tx_list[i].sis_nextdesc = &ld->sis_tx_list[nexti];
1176 		ld->sis_tx_list[i].sis_next =
1177 		    htole32(sc->sc_listmap->dm_segs[0].ds_addr +
1178 		      offsetof(struct sis_list_data, sis_tx_list[nexti]));
1179 		ld->sis_tx_list[i].sis_mbuf = NULL;
1180 		ld->sis_tx_list[i].sis_ptr = 0;
1181 		ld->sis_tx_list[i].sis_ctl = 0;
1182 	}
1183 
1184 	cd->sis_tx_prod = cd->sis_tx_cons = cd->sis_tx_cnt = 0;
1185 
1186 	for (i = 0; i < SIS_RX_LIST_CNT; i++) {
1187 		if (i == SIS_RX_LIST_CNT - 1)
1188 			nexti = 0;
1189 		else
1190 			nexti = i + 1;
1191 		ld->sis_rx_list[i].sis_nextdesc = &ld->sis_rx_list[nexti];
1192 		ld->sis_rx_list[i].sis_next =
1193 		    htole32(sc->sc_listmap->dm_segs[0].ds_addr +
1194 		      offsetof(struct sis_list_data, sis_rx_list[nexti]));
1195 		ld->sis_rx_list[i].sis_ctl = 0;
1196 	}
1197 
1198 	cd->sis_rx_prod = cd->sis_rx_cons = cd->sis_rx_cnt = 0;
1199 	sis_fill_rx_ring(sc);
1200 
1201 	return (0);
1202 }
1203 
1204 void
1205 sis_fill_rx_ring(struct sis_softc *sc)
1206 {
1207 	struct sis_list_data    *ld;
1208 	struct sis_ring_data    *cd;
1209 
1210 	cd = &sc->sis_cdata;
1211 	ld = sc->sis_ldata;
1212 
1213 	while (cd->sis_rx_cnt < SIS_RX_LIST_CNT) {
1214 		if (sis_newbuf(sc, &ld->sis_rx_list[cd->sis_rx_prod]))
1215 			break;
1216 		SIS_INC(cd->sis_rx_prod, SIS_RX_LIST_CNT);
1217 		cd->sis_rx_cnt++;
1218 	}
1219 }
1220 
1221 /*
1222  * Initialize an RX descriptor and attach an MBUF cluster.
1223  */
1224 int
1225 sis_newbuf(struct sis_softc *sc, struct sis_desc *c)
1226 {
1227 	struct mbuf		*m_new = NULL;
1228 
1229 	if (c == NULL)
1230 		return (EINVAL);
1231 
1232 	m_new = MCLGETI(NULL, M_DONTWAIT, &sc->arpcom.ac_if, MCLBYTES);
1233 	if (!m_new)
1234 		return (ENOBUFS);
1235 
1236 	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1237 
1238 	if (bus_dmamap_load_mbuf(sc->sc_dmat, c->map, m_new,
1239 	    BUS_DMA_NOWAIT)) {
1240 		m_free(m_new);
1241 		return (ENOBUFS);
1242 	}
1243 
1244 	bus_dmamap_sync(sc->sc_dmat, c->map, 0, c->map->dm_mapsize,
1245 	    BUS_DMASYNC_PREREAD);
1246 
1247 	c->sis_mbuf = m_new;
1248 	c->sis_ptr = htole32(c->map->dm_segs[0].ds_addr);
1249 	c->sis_ctl = htole32(ETHER_MAX_DIX_LEN);
1250 
1251 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1252 	    ((caddr_t)c - sc->sc_listkva), sizeof(struct sis_desc),
1253 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1254 
1255 	return (0);
1256 }
1257 
1258 /*
1259  * A frame has been uploaded: pass the resulting mbuf chain up to
1260  * the higher level protocols.
1261  */
1262 void
1263 sis_rxeof(struct sis_softc *sc)
1264 {
1265 	struct mbuf		*m;
1266 	struct ifnet		*ifp;
1267 	struct sis_desc		*cur_rx;
1268 	int			total_len = 0;
1269 	u_int32_t		rxstat;
1270 
1271 	ifp = &sc->arpcom.ac_if;
1272 
1273 	while(sc->sis_cdata.sis_rx_cnt > 0) {
1274 		cur_rx = &sc->sis_ldata->sis_rx_list[sc->sis_cdata.sis_rx_cons];
1275 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1276 		    ((caddr_t)cur_rx - sc->sc_listkva),
1277 		    sizeof(struct sis_desc),
1278 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1279 		if (!SIS_OWNDESC(cur_rx))
1280 			break;
1281 
1282 		rxstat = letoh32(cur_rx->sis_rxstat);
1283 		m = cur_rx->sis_mbuf;
1284 		cur_rx->sis_mbuf = NULL;
1285 		total_len = SIS_RXBYTES(cur_rx);
1286 		/* from here on the buffer is consumed */
1287 		SIS_INC(sc->sis_cdata.sis_rx_cons, SIS_RX_LIST_CNT);
1288 		sc->sis_cdata.sis_rx_cnt--;
1289 
1290 		/*
1291 		 * If an error occurs, update stats, clear the
1292 		 * status word and leave the mbuf cluster in place:
1293 		 * it should simply get re-used next time this descriptor
1294 	 	 * comes up in the ring. However, don't report long
1295 		 * frames as errors since they could be VLANs.
1296 		 */
1297 		if (rxstat & SIS_RXSTAT_GIANT &&
1298 		    total_len <= (ETHER_MAX_DIX_LEN - ETHER_CRC_LEN))
1299 			rxstat &= ~SIS_RXSTAT_GIANT;
1300 		if (SIS_RXSTAT_ERROR(rxstat)) {
1301 			ifp->if_ierrors++;
1302 			if (rxstat & SIS_RXSTAT_COLL)
1303 				ifp->if_collisions++;
1304 			m_freem(m);
1305 			continue;
1306 		}
1307 
1308 		/* No errors; receive the packet. */
1309 		bus_dmamap_sync(sc->sc_dmat, cur_rx->map, 0,
1310 		    cur_rx->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1311 #ifdef __STRICT_ALIGNMENT
1312 		/*
1313 		 * On some architectures, we do not have alignment problems,
1314 		 * so try to allocate a new buffer for the receive ring, and
1315 		 * pass up the one where the packet is already, saving the
1316 		 * expensive copy done in m_devget().
1317 		 * If we are on an architecture with alignment problems, or
1318 		 * if the allocation fails, then use m_devget and leave the
1319 		 * existing buffer in the receive ring.
1320 		 */
1321 		{
1322 			struct mbuf *m0;
1323 			m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN,
1324 			    ifp, NULL);
1325 			m_freem(m);
1326 			if (m0 == NULL) {
1327 				ifp->if_ierrors++;
1328 				continue;
1329 			}
1330 			m = m0;
1331 		}
1332 #else
1333 		m->m_pkthdr.rcvif = ifp;
1334 		m->m_pkthdr.len = m->m_len = total_len;
1335 #endif
1336 		ifp->if_ipackets++;
1337 
1338 #if NBPFILTER > 0
1339 		if (ifp->if_bpf)
1340 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
1341 #endif
1342 
1343 		/* pass it on. */
1344 		ether_input_mbuf(ifp, m);
1345 	}
1346 
1347 	sis_fill_rx_ring(sc);
1348 }
1349 
1350 /*
1351  * A frame was downloaded to the chip. It's safe for us to clean up
1352  * the list buffers.
1353  */
1354 
1355 void
1356 sis_txeof(struct sis_softc *sc)
1357 {
1358 	struct ifnet		*ifp;
1359 	u_int32_t		idx, ctl, txstat;
1360 
1361 	ifp = &sc->arpcom.ac_if;
1362 
1363 	/*
1364 	 * Go through our tx list and free mbufs for those
1365 	 * frames that have been transmitted.
1366 	 */
1367 	for (idx = sc->sis_cdata.sis_tx_cons; sc->sis_cdata.sis_tx_cnt > 0;
1368 	    sc->sis_cdata.sis_tx_cnt--, SIS_INC(idx, SIS_TX_LIST_CNT)) {
1369 		struct sis_desc *cur_tx = &sc->sis_ldata->sis_tx_list[idx];
1370 
1371 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1372 		    ((caddr_t)cur_tx - sc->sc_listkva),
1373 		    sizeof(struct sis_desc),
1374 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1375 
1376 		if (SIS_OWNDESC(cur_tx))
1377 			break;
1378 
1379 		ctl = letoh32(cur_tx->sis_ctl);
1380 
1381 		if (ctl & SIS_CMDSTS_MORE)
1382 			continue;
1383 
1384 		txstat = letoh32(cur_tx->sis_txstat);
1385 
1386 		if (!(ctl & SIS_CMDSTS_PKT_OK)) {
1387 			ifp->if_oerrors++;
1388 			if (txstat & SIS_TXSTAT_EXCESSCOLLS)
1389 				ifp->if_collisions++;
1390 			if (txstat & SIS_TXSTAT_OUTOFWINCOLL)
1391 				ifp->if_collisions++;
1392 		}
1393 
1394 		ifp->if_collisions += (txstat & SIS_TXSTAT_COLLCNT) >> 16;
1395 
1396 		ifp->if_opackets++;
1397 		if (cur_tx->map->dm_nsegs != 0) {
1398 			bus_dmamap_t map = cur_tx->map;
1399 
1400 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1401 			    BUS_DMASYNC_POSTWRITE);
1402 			bus_dmamap_unload(sc->sc_dmat, map);
1403 		}
1404 		if (cur_tx->sis_mbuf != NULL) {
1405 			m_freem(cur_tx->sis_mbuf);
1406 			cur_tx->sis_mbuf = NULL;
1407 		}
1408 	}
1409 
1410 	if (idx != sc->sis_cdata.sis_tx_cons) {
1411 		/* we freed up some buffers */
1412 		sc->sis_cdata.sis_tx_cons = idx;
1413 		ifp->if_flags &= ~IFF_OACTIVE;
1414 	}
1415 
1416 	ifp->if_timer = (sc->sis_cdata.sis_tx_cnt == 0) ? 0 : 5;
1417 }
1418 
1419 void
1420 sis_tick(void *xsc)
1421 {
1422 	struct sis_softc	*sc = (struct sis_softc *)xsc;
1423 	struct mii_data		*mii;
1424 	struct ifnet		*ifp;
1425 	int			s;
1426 
1427 	s = splnet();
1428 
1429 	ifp = &sc->arpcom.ac_if;
1430 
1431 	mii = &sc->sc_mii;
1432 	mii_tick(mii);
1433 
1434 	if (!sc->sis_link && mii->mii_media_status & IFM_ACTIVE &&
1435 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1436 		sc->sis_link++;
1437 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
1438 			sis_start(ifp);
1439 	}
1440 	timeout_add_sec(&sc->sis_timeout, 1);
1441 
1442 	splx(s);
1443 }
1444 
1445 int
1446 sis_intr(void *arg)
1447 {
1448 	struct sis_softc	*sc;
1449 	struct ifnet		*ifp;
1450 	u_int32_t		status;
1451 	int			claimed = 0;
1452 
1453 	sc = arg;
1454 	ifp = &sc->arpcom.ac_if;
1455 
1456 	if (sc->sis_stopped)	/* Most likely shared interrupt */
1457 		return (claimed);
1458 
1459 	/* Disable interrupts. */
1460 	CSR_WRITE_4(sc, SIS_IER, 0);
1461 
1462 	for (;;) {
1463 		/* Reading the ISR register clears all interrupts. */
1464 		status = CSR_READ_4(sc, SIS_ISR);
1465 
1466 		if ((status & SIS_INTRS) == 0)
1467 			break;
1468 
1469 		claimed = 1;
1470 
1471 		if (status &
1472 		    (SIS_ISR_TX_DESC_OK | SIS_ISR_TX_ERR |
1473 		     SIS_ISR_TX_OK | SIS_ISR_TX_IDLE))
1474 			sis_txeof(sc);
1475 
1476 		if (status &
1477 		    (SIS_ISR_RX_DESC_OK | SIS_ISR_RX_OK |
1478 		     SIS_ISR_RX_ERR | SIS_ISR_RX_IDLE))
1479 			sis_rxeof(sc);
1480 
1481 		if (status & (SIS_ISR_RX_IDLE)) {
1482 			/* consume what's there so that sis_rx_cons points
1483 			 * to the first HW owned descriptor. */
1484 			sis_rxeof(sc);
1485 			/* reprogram the RX listptr */
1486 			CSR_WRITE_4(sc, SIS_RX_LISTPTR,
1487 			    sc->sc_listmap->dm_segs[0].ds_addr +
1488 			    offsetof(struct sis_list_data,
1489 			    sis_rx_list[sc->sis_cdata.sis_rx_cons]));
1490 		}
1491 
1492 		if (status & SIS_ISR_SYSERR) {
1493 			sis_reset(sc);
1494 			sis_init(sc);
1495 		}
1496 	}
1497 
1498 	/* Re-enable interrupts. */
1499 	CSR_WRITE_4(sc, SIS_IER, 1);
1500 
1501 	/*
1502 	 * XXX: Re-enable RX engine every time otherwise it occasionally
1503 	 * stops under unknown circumstances.
1504 	 */
1505 	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE);
1506 
1507 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1508 		sis_start(ifp);
1509 
1510 	return (claimed);
1511 }
1512 
1513 /*
1514  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1515  * pointers to the fragment pointers.
1516  */
1517 int
1518 sis_encap(struct sis_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
1519 {
1520 	struct sis_desc		*f = NULL;
1521 	int			frag, cur, i;
1522 	bus_dmamap_t		map;
1523 
1524 	map = sc->sc_tx_sparemap;
1525 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map,
1526 	    m_head, BUS_DMA_NOWAIT) != 0)
1527 		return (ENOBUFS);
1528 
1529 	/*
1530  	 * Start packing the mbufs in this chain into
1531 	 * the fragment pointers. Stop when we run out
1532  	 * of fragments or hit the end of the mbuf chain.
1533 	 */
1534 	cur = frag = *txidx;
1535 
1536 	for (i = 0; i < map->dm_nsegs; i++) {
1537 		if ((SIS_TX_LIST_CNT - (sc->sis_cdata.sis_tx_cnt + i)) < 2)
1538 			return(ENOBUFS);
1539 		f = &sc->sis_ldata->sis_tx_list[frag];
1540 		f->sis_ctl = htole32(SIS_CMDSTS_MORE | map->dm_segs[i].ds_len);
1541 		f->sis_ptr = htole32(map->dm_segs[i].ds_addr);
1542 		if (i != 0)
1543 			f->sis_ctl |= htole32(SIS_CMDSTS_OWN);
1544 		cur = frag;
1545 		SIS_INC(frag, SIS_TX_LIST_CNT);
1546 	}
1547 
1548 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1549 	    BUS_DMASYNC_PREWRITE);
1550 
1551 	sc->sis_ldata->sis_tx_list[cur].sis_mbuf = m_head;
1552 	sc->sis_ldata->sis_tx_list[cur].sis_ctl &= ~htole32(SIS_CMDSTS_MORE);
1553 	sc->sis_ldata->sis_tx_list[*txidx].sis_ctl |= htole32(SIS_CMDSTS_OWN);
1554 	sc->sis_cdata.sis_tx_cnt += i;
1555 	*txidx = frag;
1556 
1557 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1558 	    offsetof(struct sis_list_data, sis_tx_list[0]),
1559 	    sizeof(struct sis_desc) * SIS_TX_LIST_CNT,
1560 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1561 
1562 	return (0);
1563 }
1564 
1565 /*
1566  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1567  * to the mbuf data regions directly in the transmit lists. We also save a
1568  * copy of the pointers since the transmit list fragment pointers are
1569  * physical addresses.
1570  */
1571 
1572 void
1573 sis_start(struct ifnet *ifp)
1574 {
1575 	struct sis_softc	*sc;
1576 	struct mbuf		*m_head = NULL;
1577 	u_int32_t		idx, queued = 0;
1578 
1579 	sc = ifp->if_softc;
1580 
1581 	if (!sc->sis_link)
1582 		return;
1583 
1584 	idx = sc->sis_cdata.sis_tx_prod;
1585 
1586 	if (ifp->if_flags & IFF_OACTIVE)
1587 		return;
1588 
1589 	while(sc->sis_ldata->sis_tx_list[idx].sis_mbuf == NULL) {
1590 		IFQ_POLL(&ifp->if_snd, m_head);
1591 		if (m_head == NULL)
1592 			break;
1593 
1594 		if (sis_encap(sc, m_head, &idx)) {
1595 			ifp->if_flags |= IFF_OACTIVE;
1596 			break;
1597 		}
1598 
1599 		/* now we are committed to transmit the packet */
1600 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1601 
1602 		queued++;
1603 
1604 		/*
1605 		 * If there's a BPF listener, bounce a copy of this frame
1606 		 * to him.
1607 		 */
1608 #if NBPFILTER > 0
1609 		if (ifp->if_bpf)
1610 			bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
1611 #endif
1612 	}
1613 
1614 	if (queued) {
1615 		/* Transmit */
1616 		sc->sis_cdata.sis_tx_prod = idx;
1617 		SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_ENABLE);
1618 
1619 		/*
1620 		 * Set a timeout in case the chip goes out to lunch.
1621 		 */
1622 		ifp->if_timer = 5;
1623 	}
1624 }
1625 
1626 void
1627 sis_init(void *xsc)
1628 {
1629 	struct sis_softc	*sc = (struct sis_softc *)xsc;
1630 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1631 	struct mii_data		*mii;
1632 	int			s;
1633 
1634 	s = splnet();
1635 
1636 	/*
1637 	 * Cancel pending I/O and free all RX/TX buffers.
1638 	 */
1639 	sis_stop(sc);
1640 
1641 #if NS_IHR_DELAY > 0
1642 	/* Configure interrupt holdoff register. */
1643 	if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr == NS_SRR_16A)
1644 		CSR_WRITE_4(sc, NS_IHR, NS_IHR_VALUE);
1645 #endif
1646 
1647 	mii = &sc->sc_mii;
1648 
1649 	/* Set MAC address */
1650 	if (sc->sis_type == SIS_TYPE_83815) {
1651 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR0);
1652 		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1653 		    htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[0]));
1654 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR1);
1655 		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1656 		    htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[1]));
1657 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR2);
1658 		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1659 		    htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[2]));
1660 	} else {
1661 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0);
1662 		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1663 		    htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[0]));
1664 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR1);
1665 		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1666 		    htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[1]));
1667 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2);
1668 		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1669 		    htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[2]));
1670 	}
1671 
1672 	/* Init circular TX/RX lists. */
1673 	if (sis_ring_init(sc) != 0) {
1674 		printf("%s: initialization failed: no memory for rx buffers\n",
1675 		    sc->sc_dev.dv_xname);
1676 		sis_stop(sc);
1677 		splx(s);
1678 		return;
1679 	}
1680 
1681         /*
1682 	 * Short Cable Receive Errors (MP21.E)
1683 	 * also: Page 78 of the DP83815 data sheet (september 2002 version)
1684 	 * recommends the following register settings "for optimum
1685 	 * performance." for rev 15C.  The driver from NS also sets
1686 	 * the PHY_CR register for later versions.
1687 	 */
1688 	 if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr <= NS_SRR_15D) {
1689 		CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001);
1690 		CSR_WRITE_4(sc, NS_PHY_CR, 0x189C);
1691 		if (sc->sis_srr == NS_SRR_15C) {
1692 			/* set val for c2 */
1693 			CSR_WRITE_4(sc, NS_PHY_TDATA, 0x0000);
1694 			/* load/kill c2 */
1695 			CSR_WRITE_4(sc, NS_PHY_DSPCFG, 0x5040);
1696 			/* rais SD off, from 4 to c */
1697 			CSR_WRITE_4(sc, NS_PHY_SDCFG, 0x008C);
1698 		}
1699 		CSR_WRITE_4(sc, NS_PHY_PAGE, 0);
1700 	}
1701 
1702 	/*
1703 	 * Program promiscuous mode and multicast filters.
1704 	 */
1705 	sis_iff(sc);
1706 
1707 	/* Turn the receive filter on */
1708 	SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ENABLE);
1709 
1710 	/*
1711 	 * Load the address of the RX and TX lists.
1712 	 */
1713 	CSR_WRITE_4(sc, SIS_RX_LISTPTR, sc->sc_listmap->dm_segs[0].ds_addr +
1714 	    offsetof(struct sis_list_data, sis_rx_list[0]));
1715 	CSR_WRITE_4(sc, SIS_TX_LISTPTR, sc->sc_listmap->dm_segs[0].ds_addr +
1716 	    offsetof(struct sis_list_data, sis_tx_list[0]));
1717 
1718 	/* SIS_CFG_EDB_MASTER_EN indicates the EDB bus is used instead of
1719 	 * the PCI bus. When this bit is set, the Max DMA Burst Size
1720 	 * for TX/RX DMA should be no larger than 16 double words.
1721 	 */
1722 	if (CSR_READ_4(sc, SIS_CFG) & SIS_CFG_EDB_MASTER_EN)
1723 		CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG64);
1724 	else
1725 		CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG256);
1726 
1727 	/* Accept Long Packets for VLAN support */
1728 	SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_JABBER);
1729 
1730 	/* Set TX configuration */
1731 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T)
1732 		CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_10);
1733 	else
1734 		CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_100);
1735 
1736 	/* Set full/half duplex mode. */
1737 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1738 		SIS_SETBIT(sc, SIS_TX_CFG,
1739 		    (SIS_TXCFG_IGN_HBEAT|SIS_TXCFG_IGN_CARR));
1740 		SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS);
1741 	} else {
1742 		SIS_CLRBIT(sc, SIS_TX_CFG,
1743 		    (SIS_TXCFG_IGN_HBEAT|SIS_TXCFG_IGN_CARR));
1744 		SIS_CLRBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS);
1745 	}
1746 
1747 	if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr >= NS_SRR_16A) {
1748 		/*
1749 		 * MPII03.D: Half Duplex Excessive Collisions.
1750 		 * Also page 49 in 83816 manual
1751 		 */
1752 		SIS_SETBIT(sc, SIS_TX_CFG, SIS_TXCFG_MPII03D);
1753  	}
1754 
1755 	if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr < NS_SRR_16A &&
1756 	     IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
1757 		uint32_t reg;
1758 
1759 		/*
1760 		 * Short Cable Receive Errors (MP21.E)
1761 		 */
1762 		CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001);
1763 		reg = CSR_READ_4(sc, NS_PHY_DSPCFG) & 0xfff;
1764 		CSR_WRITE_4(sc, NS_PHY_DSPCFG, reg | 0x1000);
1765 		DELAY(100000);
1766 		reg = CSR_READ_4(sc, NS_PHY_TDATA) & 0xff;
1767 		if ((reg & 0x0080) == 0 || (reg > 0xd8 && reg <= 0xff)) {
1768 #ifdef DEBUG
1769 			printf("%s: Applying short cable fix (reg=%x)\n",
1770 			    sc->sc_dev.dv_xname, reg);
1771 #endif
1772 			CSR_WRITE_4(sc, NS_PHY_TDATA, 0x00e8);
1773 			reg = CSR_READ_4(sc, NS_PHY_DSPCFG);
1774 			SIS_SETBIT(sc, NS_PHY_DSPCFG, reg | 0x20);
1775 		}
1776 		CSR_WRITE_4(sc, NS_PHY_PAGE, 0);
1777 	}
1778 
1779 	/*
1780 	 * Enable interrupts.
1781 	 */
1782 	CSR_WRITE_4(sc, SIS_IMR, SIS_INTRS);
1783 	CSR_WRITE_4(sc, SIS_IER, 1);
1784 
1785 	/* Enable receiver and transmitter. */
1786 	SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE|SIS_CSR_RX_DISABLE);
1787 	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE);
1788 
1789 #ifdef notdef
1790 	mii_mediachg(mii);
1791 #endif
1792 
1793 	sc->sis_stopped = 0;
1794 	ifp->if_flags |= IFF_RUNNING;
1795 	ifp->if_flags &= ~IFF_OACTIVE;
1796 
1797 	splx(s);
1798 
1799 	timeout_add_sec(&sc->sis_timeout, 1);
1800 }
1801 
1802 /*
1803  * Set media options.
1804  */
1805 int
1806 sis_ifmedia_upd(struct ifnet *ifp)
1807 {
1808 	struct sis_softc	*sc;
1809 	struct mii_data		*mii;
1810 
1811 	sc = ifp->if_softc;
1812 
1813 	mii = &sc->sc_mii;
1814 	sc->sis_link = 0;
1815 	if (mii->mii_instance) {
1816 		struct mii_softc	*miisc;
1817 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1818 			mii_phy_reset(miisc);
1819 	}
1820 	mii_mediachg(mii);
1821 
1822 	return (0);
1823 }
1824 
1825 /*
1826  * Report current media status.
1827  */
1828 void
1829 sis_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1830 {
1831 	struct sis_softc	*sc;
1832 	struct mii_data		*mii;
1833 
1834 	sc = ifp->if_softc;
1835 
1836 	mii = &sc->sc_mii;
1837 	mii_pollstat(mii);
1838 	ifmr->ifm_active = mii->mii_media_active;
1839 	ifmr->ifm_status = mii->mii_media_status;
1840 }
1841 
1842 int
1843 sis_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1844 {
1845 	struct sis_softc	*sc = ifp->if_softc;
1846 	struct ifaddr		*ifa = (struct ifaddr *) data;
1847 	struct ifreq		*ifr = (struct ifreq *) data;
1848 	struct mii_data		*mii;
1849 	int			s, error = 0;
1850 
1851 	s = splnet();
1852 
1853 	switch(command) {
1854 	case SIOCSIFADDR:
1855 		ifp->if_flags |= IFF_UP;
1856 		if (!(ifp->if_flags & IFF_RUNNING))
1857 			sis_init(sc);
1858 #ifdef INET
1859 		if (ifa->ifa_addr->sa_family == AF_INET)
1860 			arp_ifinit(&sc->arpcom, ifa);
1861 #endif
1862 		break;
1863 
1864 	case SIOCSIFFLAGS:
1865 		if (ifp->if_flags & IFF_UP) {
1866 			if (ifp->if_flags & IFF_RUNNING)
1867 				error = ENETRESET;
1868 			else
1869 				sis_init(sc);
1870 		} else {
1871 			if (ifp->if_flags & IFF_RUNNING)
1872 				sis_stop(sc);
1873 		}
1874 		break;
1875 
1876 	case SIOCGIFMEDIA:
1877 	case SIOCSIFMEDIA:
1878 		mii = &sc->sc_mii;
1879 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1880 		break;
1881 
1882 	default:
1883 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
1884 	}
1885 
1886 	if (error == ENETRESET) {
1887 		if (ifp->if_flags & IFF_RUNNING)
1888 			sis_iff(sc);
1889 		error = 0;
1890 	}
1891 
1892 	splx(s);
1893 	return(error);
1894 }
1895 
1896 void
1897 sis_watchdog(struct ifnet *ifp)
1898 {
1899 	struct sis_softc	*sc;
1900 	int			s;
1901 
1902 	sc = ifp->if_softc;
1903 
1904 	if (sc->sis_stopped)
1905 		return;
1906 
1907 	ifp->if_oerrors++;
1908 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1909 
1910 	s = splnet();
1911 	sis_stop(sc);
1912 	sis_reset(sc);
1913 	sis_init(sc);
1914 
1915 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1916 		sis_start(ifp);
1917 
1918 	splx(s);
1919 }
1920 
1921 /*
1922  * Stop the adapter and free any mbufs allocated to the
1923  * RX and TX lists.
1924  */
1925 void
1926 sis_stop(struct sis_softc *sc)
1927 {
1928 	int			i;
1929 	struct ifnet		*ifp;
1930 
1931 	if (sc->sis_stopped)
1932 		return;
1933 
1934 	ifp = &sc->arpcom.ac_if;
1935 	ifp->if_timer = 0;
1936 
1937 	timeout_del(&sc->sis_timeout);
1938 
1939 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1940 	sc->sis_stopped = 1;
1941 
1942 	CSR_WRITE_4(sc, SIS_IER, 0);
1943 	CSR_WRITE_4(sc, SIS_IMR, 0);
1944 	CSR_READ_4(sc, SIS_ISR); /* clear any interrupts already pending */
1945 	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE|SIS_CSR_RX_DISABLE);
1946 	DELAY(1000);
1947 	CSR_WRITE_4(sc, SIS_TX_LISTPTR, 0);
1948 	CSR_WRITE_4(sc, SIS_RX_LISTPTR, 0);
1949 
1950 	sc->sis_link = 0;
1951 
1952 	/*
1953 	 * Free data in the RX lists.
1954 	 */
1955 	for (i = 0; i < SIS_RX_LIST_CNT; i++) {
1956 		if (sc->sis_ldata->sis_rx_list[i].map->dm_nsegs != 0) {
1957 			bus_dmamap_t map = sc->sis_ldata->sis_rx_list[i].map;
1958 
1959 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1960 			    BUS_DMASYNC_POSTREAD);
1961 			bus_dmamap_unload(sc->sc_dmat, map);
1962 		}
1963 		if (sc->sis_ldata->sis_rx_list[i].sis_mbuf != NULL) {
1964 			m_freem(sc->sis_ldata->sis_rx_list[i].sis_mbuf);
1965 			sc->sis_ldata->sis_rx_list[i].sis_mbuf = NULL;
1966 		}
1967 		bzero((char *)&sc->sis_ldata->sis_rx_list[i],
1968 		    sizeof(struct sis_desc) - sizeof(bus_dmamap_t));
1969 	}
1970 
1971 	/*
1972 	 * Free the TX list buffers.
1973 	 */
1974 	for (i = 0; i < SIS_TX_LIST_CNT; i++) {
1975 		if (sc->sis_ldata->sis_tx_list[i].map->dm_nsegs != 0) {
1976 			bus_dmamap_t map = sc->sis_ldata->sis_tx_list[i].map;
1977 
1978 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1979 			    BUS_DMASYNC_POSTWRITE);
1980 			bus_dmamap_unload(sc->sc_dmat, map);
1981 		}
1982 		if (sc->sis_ldata->sis_tx_list[i].sis_mbuf != NULL) {
1983 			m_freem(sc->sis_ldata->sis_tx_list[i].sis_mbuf);
1984 			sc->sis_ldata->sis_tx_list[i].sis_mbuf = NULL;
1985 		}
1986 		bzero((char *)&sc->sis_ldata->sis_tx_list[i],
1987 		    sizeof(struct sis_desc) - sizeof(bus_dmamap_t));
1988 	}
1989 }
1990