xref: /openbsd/sys/dev/pci/if_sis.c (revision 8932bfb7)
1 /*	$OpenBSD: if_sis.c,v 1.105 2011/06/22 16:44:27 tedu Exp $ */
2 /*
3  * Copyright (c) 1997, 1998, 1999
4  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  * $FreeBSD: src/sys/pci/if_sis.c,v 1.30 2001/02/06 10:11:47 phk Exp $
34  */
35 
36 /*
37  * SiS 900/SiS 7016 fast ethernet PCI NIC driver. Datasheets are
38  * available from http://www.sis.com.tw.
39  *
40  * This driver also supports the NatSemi DP83815. Datasheets are
41  * available from http://www.national.com.
42  *
43  * Written by Bill Paul <wpaul@ee.columbia.edu>
44  * Electrical Engineering Department
45  * Columbia University, New York City
46  */
47 
48 /*
49  * The SiS 900 is a fairly simple chip. It uses bus master DMA with
50  * simple TX and RX descriptors of 3 longwords in size. The receiver
51  * has a single perfect filter entry for the station address and a
52  * 128-bit multicast hash table. The SiS 900 has a built-in MII-based
53  * transceiver while the 7016 requires an external transceiver chip.
54  * Both chips offer the standard bit-bang MII interface as well as
55  * an enchanced PHY interface which simplifies accessing MII registers.
56  *
57  * The only downside to this chipset is that RX descriptors must be
58  * longword aligned.
59  */
60 
61 #include "bpfilter.h"
62 
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/mbuf.h>
66 #include <sys/protosw.h>
67 #include <sys/socket.h>
68 #include <sys/ioctl.h>
69 #include <sys/errno.h>
70 #include <sys/malloc.h>
71 #include <sys/kernel.h>
72 #include <sys/timeout.h>
73 
74 #include <net/if.h>
75 #include <net/if_dl.h>
76 #include <net/if_types.h>
77 
78 #ifdef INET
79 #include <netinet/in.h>
80 #include <netinet/in_systm.h>
81 #include <netinet/in_var.h>
82 #include <netinet/ip.h>
83 #include <netinet/if_ether.h>
84 #endif
85 
86 #include <net/if_media.h>
87 
88 #if NBPFILTER > 0
89 #include <net/bpf.h>
90 #endif
91 
92 #include <sys/device.h>
93 
94 #include <dev/mii/mii.h>
95 #include <dev/mii/miivar.h>
96 
97 #include <dev/pci/pcireg.h>
98 #include <dev/pci/pcivar.h>
99 #include <dev/pci/pcidevs.h>
100 
101 #define SIS_USEIOSPACE
102 
103 #include <dev/pci/if_sisreg.h>
104 
105 int sis_probe(struct device *, void *, void *);
106 void sis_attach(struct device *, struct device *, void *);
107 int sis_activate(struct device *, int);
108 
109 struct cfattach sis_ca = {
110 	sizeof(struct sis_softc), sis_probe, sis_attach, NULL,
111 	sis_activate
112 };
113 
114 struct cfdriver sis_cd = {
115 	NULL, "sis", DV_IFNET
116 };
117 
118 int sis_intr(void *);
119 void sis_fill_rx_ring(struct sis_softc *);
120 int sis_newbuf(struct sis_softc *, struct sis_desc *);
121 int sis_encap(struct sis_softc *, struct mbuf *, u_int32_t *);
122 void sis_rxeof(struct sis_softc *);
123 void sis_txeof(struct sis_softc *);
124 void sis_tick(void *);
125 void sis_start(struct ifnet *);
126 int sis_ioctl(struct ifnet *, u_long, caddr_t);
127 void sis_init(void *);
128 void sis_stop(struct sis_softc *);
129 void sis_watchdog(struct ifnet *);
130 int sis_ifmedia_upd(struct ifnet *);
131 void sis_ifmedia_sts(struct ifnet *, struct ifmediareq *);
132 
133 u_int16_t sis_reverse(u_int16_t);
134 void sis_delay(struct sis_softc *);
135 void sis_eeprom_idle(struct sis_softc *);
136 void sis_eeprom_putbyte(struct sis_softc *, int);
137 void sis_eeprom_getword(struct sis_softc *, int, u_int16_t *);
138 #if defined(__amd64__) || defined(__i386__)
139 void sis_read_cmos(struct sis_softc *, struct pci_attach_args *, caddr_t, int, int);
140 #endif
141 void sis_read_mac(struct sis_softc *, struct pci_attach_args *);
142 void sis_read_eeprom(struct sis_softc *, caddr_t, int, int, int);
143 void sis_read96x_mac(struct sis_softc *);
144 
145 void sis_mii_sync(struct sis_softc *);
146 void sis_mii_send(struct sis_softc *, u_int32_t, int);
147 int sis_mii_readreg(struct sis_softc *, struct sis_mii_frame *);
148 int sis_mii_writereg(struct sis_softc *, struct sis_mii_frame *);
149 int sis_miibus_readreg(struct device *, int, int);
150 void sis_miibus_writereg(struct device *, int, int, int);
151 void sis_miibus_statchg(struct device *);
152 
153 u_int32_t sis_mchash(struct sis_softc *, const uint8_t *);
154 void sis_iff(struct sis_softc *);
155 void sis_iff_ns(struct sis_softc *);
156 void sis_iff_sis(struct sis_softc *);
157 void sis_reset(struct sis_softc *);
158 int sis_ring_init(struct sis_softc *);
159 
160 #define SIS_SETBIT(sc, reg, x)				\
161 	CSR_WRITE_4(sc, reg,				\
162 		CSR_READ_4(sc, reg) | (x))
163 
164 #define SIS_CLRBIT(sc, reg, x)				\
165 	CSR_WRITE_4(sc, reg,				\
166 		CSR_READ_4(sc, reg) & ~(x))
167 
168 #define SIO_SET(x)					\
169 	CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) | x)
170 
171 #define SIO_CLR(x)					\
172 	CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) & ~x)
173 
174 const struct pci_matchid sis_devices[] = {
175 	{ PCI_VENDOR_SIS, PCI_PRODUCT_SIS_900 },
176 	{ PCI_VENDOR_SIS, PCI_PRODUCT_SIS_7016 },
177 	{ PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815 }
178 };
179 
180 /*
181  * Routine to reverse the bits in a word. Stolen almost
182  * verbatim from /usr/games/fortune.
183  */
184 u_int16_t
185 sis_reverse(u_int16_t n)
186 {
187 	n = ((n >>  1) & 0x5555) | ((n <<  1) & 0xaaaa);
188 	n = ((n >>  2) & 0x3333) | ((n <<  2) & 0xcccc);
189 	n = ((n >>  4) & 0x0f0f) | ((n <<  4) & 0xf0f0);
190 	n = ((n >>  8) & 0x00ff) | ((n <<  8) & 0xff00);
191 
192 	return (n);
193 }
194 
195 void
196 sis_delay(struct sis_softc *sc)
197 {
198 	int			idx;
199 
200 	for (idx = (300 / 33) + 1; idx > 0; idx--)
201 		CSR_READ_4(sc, SIS_CSR);
202 }
203 
204 void
205 sis_eeprom_idle(struct sis_softc *sc)
206 {
207 	int			i;
208 
209 	SIO_SET(SIS_EECTL_CSEL);
210 	sis_delay(sc);
211 	SIO_SET(SIS_EECTL_CLK);
212 	sis_delay(sc);
213 
214 	for (i = 0; i < 25; i++) {
215 		SIO_CLR(SIS_EECTL_CLK);
216 		sis_delay(sc);
217 		SIO_SET(SIS_EECTL_CLK);
218 		sis_delay(sc);
219 	}
220 
221 	SIO_CLR(SIS_EECTL_CLK);
222 	sis_delay(sc);
223 	SIO_CLR(SIS_EECTL_CSEL);
224 	sis_delay(sc);
225 	CSR_WRITE_4(sc, SIS_EECTL, 0x00000000);
226 }
227 
228 /*
229  * Send a read command and address to the EEPROM, check for ACK.
230  */
231 void
232 sis_eeprom_putbyte(struct sis_softc *sc, int addr)
233 {
234 	int			d, i;
235 
236 	d = addr | SIS_EECMD_READ;
237 
238 	/*
239 	 * Feed in each bit and strobe the clock.
240 	 */
241 	for (i = 0x400; i; i >>= 1) {
242 		if (d & i)
243 			SIO_SET(SIS_EECTL_DIN);
244 		else
245 			SIO_CLR(SIS_EECTL_DIN);
246 		sis_delay(sc);
247 		SIO_SET(SIS_EECTL_CLK);
248 		sis_delay(sc);
249 		SIO_CLR(SIS_EECTL_CLK);
250 		sis_delay(sc);
251 	}
252 }
253 
254 /*
255  * Read a word of data stored in the EEPROM at address 'addr.'
256  */
257 void
258 sis_eeprom_getword(struct sis_softc *sc, int addr, u_int16_t *dest)
259 {
260 	int			i;
261 	u_int16_t		word = 0;
262 
263 	/* Force EEPROM to idle state. */
264 	sis_eeprom_idle(sc);
265 
266 	/* Enter EEPROM access mode. */
267 	sis_delay(sc);
268 	SIO_CLR(SIS_EECTL_CLK);
269 	sis_delay(sc);
270 	SIO_SET(SIS_EECTL_CSEL);
271 	sis_delay(sc);
272 
273 	/*
274 	 * Send address of word we want to read.
275 	 */
276 	sis_eeprom_putbyte(sc, addr);
277 
278 	/*
279 	 * Start reading bits from EEPROM.
280 	 */
281 	for (i = 0x8000; i; i >>= 1) {
282 		SIO_SET(SIS_EECTL_CLK);
283 		sis_delay(sc);
284 		if (CSR_READ_4(sc, SIS_EECTL) & SIS_EECTL_DOUT)
285 			word |= i;
286 		sis_delay(sc);
287 		SIO_CLR(SIS_EECTL_CLK);
288 		sis_delay(sc);
289 	}
290 
291 	/* Turn off EEPROM access mode. */
292 	sis_eeprom_idle(sc);
293 
294 	*dest = word;
295 }
296 
297 /*
298  * Read a sequence of words from the EEPROM.
299  */
300 void
301 sis_read_eeprom(struct sis_softc *sc, caddr_t dest,
302     int off, int cnt, int swap)
303 {
304 	int			i;
305 	u_int16_t		word = 0, *ptr;
306 
307 	for (i = 0; i < cnt; i++) {
308 		sis_eeprom_getword(sc, off + i, &word);
309 		ptr = (u_int16_t *)(dest + (i * 2));
310 		if (swap)
311 			*ptr = letoh16(word);
312 		else
313 			*ptr = word;
314 	}
315 }
316 
317 #if defined(__amd64__) || defined(__i386__)
318 void
319 sis_read_cmos(struct sis_softc *sc, struct pci_attach_args *pa,
320     caddr_t dest, int off, int cnt)
321 {
322 	u_int32_t reg;
323 	int i;
324 
325 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, 0x48);
326 	pci_conf_write(pa->pa_pc, pa->pa_tag, 0x48, reg | 0x40);
327 
328 	for (i = 0; i < cnt; i++) {
329 		bus_space_write_1(pa->pa_iot, 0x0, 0x70, i + off);
330 		*(dest + i) = bus_space_read_1(pa->pa_iot, 0x0, 0x71);
331 	}
332 
333 	pci_conf_write(pa->pa_pc, pa->pa_tag, 0x48, reg & ~0x40);
334 }
335 #endif
336 
337 void
338 sis_read_mac(struct sis_softc *sc, struct pci_attach_args *pa)
339 {
340 	u_int16_t *enaddr = (u_int16_t *) &sc->arpcom.ac_enaddr;
341 
342 	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RELOAD);
343 	SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_RELOAD);
344 
345 	SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ENABLE);
346 
347 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0);
348 	enaddr[0] = letoh16(CSR_READ_4(sc, SIS_RXFILT_DATA) & 0xffff);
349 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR1);
350 	enaddr[1] = letoh16(CSR_READ_4(sc, SIS_RXFILT_DATA) & 0xffff);
351 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2);
352 	enaddr[2] = letoh16(CSR_READ_4(sc, SIS_RXFILT_DATA) & 0xffff);
353 
354 	SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ENABLE);
355 }
356 
357 void
358 sis_read96x_mac(struct sis_softc *sc)
359 {
360 	int i;
361 
362 	SIO_SET(SIS96x_EECTL_REQ);
363 
364 	for (i = 0; i < 2000; i++) {
365 		if ((CSR_READ_4(sc, SIS_EECTL) & SIS96x_EECTL_GNT)) {
366 			sis_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
367 			    SIS_EE_NODEADDR, 3, 1);
368 			break;
369 		} else
370 			DELAY(1);
371 	}
372 
373 	SIO_SET(SIS96x_EECTL_DONE);
374 }
375 
376 /*
377  * Sync the PHYs by setting data bit and strobing the clock 32 times.
378  */
379 void
380 sis_mii_sync(struct sis_softc *sc)
381 {
382 	int			i;
383 
384  	SIO_SET(SIS_MII_DIR|SIS_MII_DATA);
385 
386  	for (i = 0; i < 32; i++) {
387  		SIO_SET(SIS_MII_CLK);
388  		DELAY(1);
389  		SIO_CLR(SIS_MII_CLK);
390  		DELAY(1);
391  	}
392 }
393 
394 /*
395  * Clock a series of bits through the MII.
396  */
397 void
398 sis_mii_send(struct sis_softc *sc, u_int32_t bits, int cnt)
399 {
400 	int			i;
401 
402 	SIO_CLR(SIS_MII_CLK);
403 
404 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
405 		if (bits & i)
406 			SIO_SET(SIS_MII_DATA);
407 		else
408 			SIO_CLR(SIS_MII_DATA);
409 		DELAY(1);
410 		SIO_CLR(SIS_MII_CLK);
411 		DELAY(1);
412 		SIO_SET(SIS_MII_CLK);
413 	}
414 }
415 
416 /*
417  * Read an PHY register through the MII.
418  */
419 int
420 sis_mii_readreg(struct sis_softc *sc, struct sis_mii_frame *frame)
421 {
422 	int			i, ack, s;
423 
424 	s = splnet();
425 
426 	/*
427 	 * Set up frame for RX.
428 	 */
429 	frame->mii_stdelim = SIS_MII_STARTDELIM;
430 	frame->mii_opcode = SIS_MII_READOP;
431 	frame->mii_turnaround = 0;
432 	frame->mii_data = 0;
433 
434 	/*
435  	 * Turn on data xmit.
436 	 */
437 	SIO_SET(SIS_MII_DIR);
438 
439 	sis_mii_sync(sc);
440 
441 	/*
442 	 * Send command/address info.
443 	 */
444 	sis_mii_send(sc, frame->mii_stdelim, 2);
445 	sis_mii_send(sc, frame->mii_opcode, 2);
446 	sis_mii_send(sc, frame->mii_phyaddr, 5);
447 	sis_mii_send(sc, frame->mii_regaddr, 5);
448 
449 	/* Idle bit */
450 	SIO_CLR((SIS_MII_CLK|SIS_MII_DATA));
451 	DELAY(1);
452 	SIO_SET(SIS_MII_CLK);
453 	DELAY(1);
454 
455 	/* Turn off xmit. */
456 	SIO_CLR(SIS_MII_DIR);
457 
458 	/* Check for ack */
459 	SIO_CLR(SIS_MII_CLK);
460 	DELAY(1);
461 	ack = CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA;
462 	SIO_SET(SIS_MII_CLK);
463 	DELAY(1);
464 
465 	/*
466 	 * Now try reading data bits. If the ack failed, we still
467 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
468 	 */
469 	if (ack) {
470 		for(i = 0; i < 16; i++) {
471 			SIO_CLR(SIS_MII_CLK);
472 			DELAY(1);
473 			SIO_SET(SIS_MII_CLK);
474 			DELAY(1);
475 		}
476 		goto fail;
477 	}
478 
479 	for (i = 0x8000; i; i >>= 1) {
480 		SIO_CLR(SIS_MII_CLK);
481 		DELAY(1);
482 		if (!ack) {
483 			if (CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA)
484 				frame->mii_data |= i;
485 			DELAY(1);
486 		}
487 		SIO_SET(SIS_MII_CLK);
488 		DELAY(1);
489 	}
490 
491 fail:
492 
493 	SIO_CLR(SIS_MII_CLK);
494 	DELAY(1);
495 	SIO_SET(SIS_MII_CLK);
496 	DELAY(1);
497 
498 	splx(s);
499 
500 	if (ack)
501 		return (1);
502 	return (0);
503 }
504 
505 /*
506  * Write to a PHY register through the MII.
507  */
508 int
509 sis_mii_writereg(struct sis_softc *sc, struct sis_mii_frame *frame)
510 {
511 	int			s;
512 
513 	s = splnet();
514  	/*
515  	 * Set up frame for TX.
516  	 */
517 
518  	frame->mii_stdelim = SIS_MII_STARTDELIM;
519  	frame->mii_opcode = SIS_MII_WRITEOP;
520  	frame->mii_turnaround = SIS_MII_TURNAROUND;
521 
522  	/*
523   	 * Turn on data output.
524  	 */
525  	SIO_SET(SIS_MII_DIR);
526 
527  	sis_mii_sync(sc);
528 
529  	sis_mii_send(sc, frame->mii_stdelim, 2);
530  	sis_mii_send(sc, frame->mii_opcode, 2);
531  	sis_mii_send(sc, frame->mii_phyaddr, 5);
532  	sis_mii_send(sc, frame->mii_regaddr, 5);
533  	sis_mii_send(sc, frame->mii_turnaround, 2);
534  	sis_mii_send(sc, frame->mii_data, 16);
535 
536  	/* Idle bit. */
537  	SIO_SET(SIS_MII_CLK);
538  	DELAY(1);
539  	SIO_CLR(SIS_MII_CLK);
540  	DELAY(1);
541 
542  	/*
543  	 * Turn off xmit.
544  	 */
545  	SIO_CLR(SIS_MII_DIR);
546 
547  	splx(s);
548 
549  	return (0);
550 }
551 
552 int
553 sis_miibus_readreg(struct device *self, int phy, int reg)
554 {
555 	struct sis_softc	*sc = (struct sis_softc *)self;
556 	struct sis_mii_frame    frame;
557 
558 	if (sc->sis_type == SIS_TYPE_83815) {
559 		if (phy != 0)
560 			return (0);
561 		/*
562 		 * The NatSemi chip can take a while after
563 		 * a reset to come ready, during which the BMSR
564 		 * returns a value of 0. This is *never* supposed
565 		 * to happen: some of the BMSR bits are meant to
566 		 * be hardwired in the on position, and this can
567 		 * confuse the miibus code a bit during the probe
568 		 * and attach phase. So we make an effort to check
569 		 * for this condition and wait for it to clear.
570 		 */
571 		if (!CSR_READ_4(sc, NS_BMSR))
572 			DELAY(1000);
573 		return CSR_READ_4(sc, NS_BMCR + (reg * 4));
574 	}
575 
576 	/*
577 	 * Chipsets < SIS_635 seem not to be able to read/write
578 	 * through mdio. Use the enhanced PHY access register
579 	 * again for them.
580 	 */
581 	if (sc->sis_type == SIS_TYPE_900 &&
582 	    sc->sis_rev < SIS_REV_635) {
583 		int i, val = 0;
584 
585 		if (phy != 0)
586 			return (0);
587 
588 		CSR_WRITE_4(sc, SIS_PHYCTL,
589 		    (phy << 11) | (reg << 6) | SIS_PHYOP_READ);
590 		SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS);
591 
592 		for (i = 0; i < SIS_TIMEOUT; i++) {
593 			if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS))
594 				break;
595 		}
596 
597 		if (i == SIS_TIMEOUT) {
598 			printf("%s: PHY failed to come ready\n",
599 			    sc->sc_dev.dv_xname);
600 			return (0);
601 		}
602 
603 		val = (CSR_READ_4(sc, SIS_PHYCTL) >> 16) & 0xFFFF;
604 
605 		if (val == 0xFFFF)
606 			return (0);
607 
608 		return (val);
609 	} else {
610 		bzero(&frame, sizeof(frame));
611 
612 		frame.mii_phyaddr = phy;
613 		frame.mii_regaddr = reg;
614 		sis_mii_readreg(sc, &frame);
615 
616 		return (frame.mii_data);
617 	}
618 }
619 
620 void
621 sis_miibus_writereg(struct device *self, int phy, int reg, int data)
622 {
623 	struct sis_softc	*sc = (struct sis_softc *)self;
624 	struct sis_mii_frame	frame;
625 
626 	if (sc->sis_type == SIS_TYPE_83815) {
627 		if (phy != 0)
628 			return;
629 		CSR_WRITE_4(sc, NS_BMCR + (reg * 4), data);
630 		return;
631 	}
632 
633 	/*
634 	 * Chipsets < SIS_635 seem not to be able to read/write
635 	 * through mdio. Use the enhanced PHY access register
636 	 * again for them.
637 	 */
638 	if (sc->sis_type == SIS_TYPE_900 &&
639 	    sc->sis_rev < SIS_REV_635) {
640 		int i;
641 
642 		if (phy != 0)
643 			return;
644 
645 		CSR_WRITE_4(sc, SIS_PHYCTL, (data << 16) | (phy << 11) |
646 		    (reg << 6) | SIS_PHYOP_WRITE);
647 		SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS);
648 
649 		for (i = 0; i < SIS_TIMEOUT; i++) {
650 			if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS))
651 				break;
652 		}
653 
654 		if (i == SIS_TIMEOUT)
655 			printf("%s: PHY failed to come ready\n",
656 			    sc->sc_dev.dv_xname);
657 	} else {
658 		bzero(&frame, sizeof(frame));
659 
660 		frame.mii_phyaddr = phy;
661 		frame.mii_regaddr = reg;
662 		frame.mii_data = data;
663 		sis_mii_writereg(sc, &frame);
664 	}
665 }
666 
667 void
668 sis_miibus_statchg(struct device *self)
669 {
670 	struct sis_softc	*sc = (struct sis_softc *)self;
671 
672 	sis_init(sc);
673 }
674 
675 u_int32_t
676 sis_mchash(struct sis_softc *sc, const uint8_t *addr)
677 {
678 	uint32_t		crc;
679 
680 	/* Compute CRC for the address value. */
681 	crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
682 
683 	/*
684 	 * return the filter bit position
685 	 *
686 	 * The NatSemi chip has a 512-bit filter, which is
687 	 * different than the SiS, so we special-case it.
688 	 */
689 	if (sc->sis_type == SIS_TYPE_83815)
690 		return (crc >> 23);
691 	else if (sc->sis_rev >= SIS_REV_635 ||
692 	    sc->sis_rev == SIS_REV_900B)
693 		return (crc >> 24);
694 	else
695 		return (crc >> 25);
696 }
697 
698 void
699 sis_iff(struct sis_softc *sc)
700 {
701 	if (sc->sis_type == SIS_TYPE_83815)
702 		sis_iff_ns(sc);
703 	else
704 		sis_iff_sis(sc);
705 }
706 
707 void
708 sis_iff_ns(struct sis_softc *sc)
709 {
710 	struct ifnet		*ifp = &sc->arpcom.ac_if;
711 	struct arpcom		*ac = &sc->arpcom;
712 	struct ether_multi	*enm;
713 	struct ether_multistep  step;
714 	u_int32_t		h = 0, i, rxfilt;
715 	int			bit, index;
716 
717 	rxfilt = CSR_READ_4(sc, SIS_RXFILT_CTL);
718 	rxfilt &= ~(SIS_RXFILTCTL_ALLMULTI | SIS_RXFILTCTL_ALLPHYS |
719 	    NS_RXFILTCTL_ARP | SIS_RXFILTCTL_BROAD | NS_RXFILTCTL_MCHASH |
720 	    NS_RXFILTCTL_PERFECT);
721 	ifp->if_flags &= ~IFF_ALLMULTI;
722 
723 	/*
724 	 * Always accept ARP frames.
725 	 * Always accept broadcast frames.
726 	 * Always accept frames destined to our station address.
727 	 */
728 	rxfilt |= NS_RXFILTCTL_ARP | SIS_RXFILTCTL_BROAD |
729 	    NS_RXFILTCTL_PERFECT;
730 
731 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
732 		ifp->if_flags |= IFF_ALLMULTI;
733 		rxfilt |= SIS_RXFILTCTL_ALLMULTI;
734 		if (ifp->if_flags & IFF_PROMISC)
735 			rxfilt |= SIS_RXFILTCTL_ALLPHYS;
736 	} else {
737 		/*
738 		 * We have to explicitly enable the multicast hash table
739 		 * on the NatSemi chip if we want to use it, which we do.
740 		 */
741 		rxfilt |= NS_RXFILTCTL_MCHASH;
742 
743 		/* first, zot all the existing hash bits */
744 		for (i = 0; i < 32; i++) {
745 			CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + (i*2));
746 			CSR_WRITE_4(sc, SIS_RXFILT_DATA, 0);
747 		}
748 
749 		ETHER_FIRST_MULTI(step, ac, enm);
750 		while (enm != NULL) {
751 			h = sis_mchash(sc, enm->enm_addrlo);
752 
753 			index = h >> 3;
754 			bit = h & 0x1F;
755 
756 			CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + index);
757 
758 			if (bit > 0xF)
759 				bit -= 0x10;
760 
761 			SIS_SETBIT(sc, SIS_RXFILT_DATA, (1 << bit));
762 
763 			ETHER_NEXT_MULTI(step, enm);
764 		}
765 	}
766 
767 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt);
768 }
769 
770 void
771 sis_iff_sis(struct sis_softc *sc)
772 {
773 	struct ifnet		*ifp = &sc->arpcom.ac_if;
774 	struct arpcom		*ac = &sc->arpcom;
775 	struct ether_multi	*enm;
776 	struct ether_multistep	step;
777 	u_int32_t		h, i, maxmulti, rxfilt;
778 	u_int16_t		hashes[16];
779 
780 	/* hash table size */
781 	if (sc->sis_rev >= SIS_REV_635 ||
782 	    sc->sis_rev == SIS_REV_900B)
783 		maxmulti = 16;
784 	else
785 		maxmulti = 8;
786 
787 	rxfilt = CSR_READ_4(sc, SIS_RXFILT_CTL);
788 	rxfilt &= ~(SIS_RXFILTCTL_ALLMULTI | SIS_RXFILTCTL_ALLPHYS |
789 	    SIS_RXFILTCTL_BROAD);
790 	ifp->if_flags &= ~IFF_ALLMULTI;
791 
792 	/*
793 	 * Always accept broadcast frames.
794 	 */
795 	rxfilt |= SIS_RXFILTCTL_BROAD;
796 
797 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
798 	    ac->ac_multicnt > maxmulti) {
799 		ifp->if_flags |= IFF_ALLMULTI;
800 		rxfilt |= SIS_RXFILTCTL_ALLMULTI;
801 		if (ifp->if_flags & IFF_PROMISC)
802 			rxfilt |= SIS_RXFILTCTL_ALLPHYS;
803 
804 		for (i = 0; i < maxmulti; i++)
805 			hashes[i] = ~0;
806 	} else {
807 		for (i = 0; i < maxmulti; i++)
808 			hashes[i] = 0;
809 
810 		ETHER_FIRST_MULTI(step, ac, enm);
811 		while (enm != NULL) {
812 			h = sis_mchash(sc, enm->enm_addrlo);
813 
814 			hashes[h >> 4] |= 1 << (h & 0xf);
815 
816 			ETHER_NEXT_MULTI(step, enm);
817 		}
818 	}
819 
820 	for (i = 0; i < maxmulti; i++) {
821 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, (4 + i) << 16);
822 		CSR_WRITE_4(sc, SIS_RXFILT_DATA, hashes[i]);
823 	}
824 
825 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt);
826 }
827 
828 void
829 sis_reset(struct sis_softc *sc)
830 {
831 	int			i;
832 
833 	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RESET);
834 
835 	for (i = 0; i < SIS_TIMEOUT; i++) {
836 		if (!(CSR_READ_4(sc, SIS_CSR) & SIS_CSR_RESET))
837 			break;
838 	}
839 
840 	if (i == SIS_TIMEOUT)
841 		printf("%s: reset never completed\n", sc->sc_dev.dv_xname);
842 
843 	/* Wait a little while for the chip to get its brains in order. */
844 	DELAY(1000);
845 
846 	/*
847 	 * If this is a NetSemi chip, make sure to clear
848 	 * PME mode.
849 	 */
850 	if (sc->sis_type == SIS_TYPE_83815) {
851 		CSR_WRITE_4(sc, NS_CLKRUN, NS_CLKRUN_PMESTS);
852 		CSR_WRITE_4(sc, NS_CLKRUN, 0);
853 	}
854 }
855 
856 /*
857  * Probe for an SiS chip. Check the PCI vendor and device
858  * IDs against our list and return a device name if we find a match.
859  */
860 int
861 sis_probe(struct device *parent, void *match, void *aux)
862 {
863 	return (pci_matchbyid((struct pci_attach_args *)aux, sis_devices,
864 	    nitems(sis_devices)));
865 }
866 
867 /*
868  * Attach the interface. Allocate softc structures, do ifmedia
869  * setup and ethernet/BPF attach.
870  */
871 void
872 sis_attach(struct device *parent, struct device *self, void *aux)
873 {
874 	int			i;
875 	const char		*intrstr = NULL;
876 	pcireg_t		command;
877 	struct sis_softc	*sc = (struct sis_softc *)self;
878 	struct pci_attach_args	*pa = aux;
879 	pci_chipset_tag_t	pc = pa->pa_pc;
880 	pci_intr_handle_t	ih;
881 	struct ifnet		*ifp;
882 	bus_size_t		size;
883 
884 	sc->sis_stopped = 1;
885 
886 	/*
887 	 * Handle power management nonsense.
888 	 */
889 	command = pci_conf_read(pc, pa->pa_tag, SIS_PCI_CAPID) & 0x000000FF;
890 	if (command == 0x01) {
891 
892 		command = pci_conf_read(pc, pa->pa_tag, SIS_PCI_PWRMGMTCTRL);
893 		if (command & SIS_PSTATE_MASK) {
894 			u_int32_t		iobase, membase, irq;
895 
896 			/* Save important PCI config data. */
897 			iobase = pci_conf_read(pc, pa->pa_tag, SIS_PCI_LOIO);
898 			membase = pci_conf_read(pc, pa->pa_tag, SIS_PCI_LOMEM);
899 			irq = pci_conf_read(pc, pa->pa_tag, SIS_PCI_INTLINE);
900 
901 			/* Reset the power state. */
902 			printf("%s: chip is in D%d power mode -- setting to D0\n",
903 			    sc->sc_dev.dv_xname, command & SIS_PSTATE_MASK);
904 			command &= 0xFFFFFFFC;
905 			pci_conf_write(pc, pa->pa_tag, SIS_PCI_PWRMGMTCTRL, command);
906 
907 			/* Restore PCI config data. */
908 			pci_conf_write(pc, pa->pa_tag, SIS_PCI_LOIO, iobase);
909 			pci_conf_write(pc, pa->pa_tag, SIS_PCI_LOMEM, membase);
910 			pci_conf_write(pc, pa->pa_tag, SIS_PCI_INTLINE, irq);
911 		}
912 	}
913 
914 	/*
915 	 * Map control/status registers.
916 	 */
917 
918 #ifdef SIS_USEIOSPACE
919 	if (pci_mapreg_map(pa, SIS_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0,
920 	    &sc->sis_btag, &sc->sis_bhandle, NULL, &size, 0)) {
921 		printf(": can't map i/o space\n");
922 		return;
923  	}
924 #else
925 	if (pci_mapreg_map(pa, SIS_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
926 	    &sc->sis_btag, &sc->sis_bhandle, NULL, &size, 0)) {
927  		printf(": can't map mem space\n");
928 		return;
929  	}
930 #endif
931 
932 	/* Allocate interrupt */
933 	if (pci_intr_map(pa, &ih)) {
934 		printf(": couldn't map interrupt\n");
935 		goto fail_1;
936 	}
937 	intrstr = pci_intr_string(pc, ih);
938 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, sis_intr, sc,
939 	    self->dv_xname);
940 	if (sc->sc_ih == NULL) {
941 		printf(": couldn't establish interrupt");
942 		if (intrstr != NULL)
943 			printf(" at %s", intrstr);
944 		printf("\n");
945 		goto fail_1;
946 	}
947 
948 	switch (PCI_PRODUCT(pa->pa_id)) {
949 	case PCI_PRODUCT_SIS_900:
950 		sc->sis_type = SIS_TYPE_900;
951 		break;
952 	case PCI_PRODUCT_SIS_7016:
953 		sc->sis_type = SIS_TYPE_7016;
954 		break;
955 	case PCI_PRODUCT_NS_DP83815:
956 		sc->sis_type = SIS_TYPE_83815;
957 		break;
958 	default:
959 		break;
960 	}
961 	sc->sis_rev = PCI_REVISION(pa->pa_class);
962 
963 	/* Reset the adapter. */
964 	sis_reset(sc);
965 
966 	if (sc->sis_type == SIS_TYPE_900 &&
967 	   (sc->sis_rev == SIS_REV_635 ||
968 	    sc->sis_rev == SIS_REV_900B)) {
969 		SIO_SET(SIS_CFG_RND_CNT);
970 		SIO_SET(SIS_CFG_PERR_DETECT);
971 	}
972 
973 	/*
974 	 * Get station address from the EEPROM.
975 	 */
976 	switch (PCI_VENDOR(pa->pa_id)) {
977 	case PCI_VENDOR_NS:
978 		sc->sis_srr = CSR_READ_4(sc, NS_SRR);
979 
980 		if (sc->sis_srr == NS_SRR_15C)
981 			printf(", DP83815C");
982 		else if (sc->sis_srr == NS_SRR_15D)
983 			printf(", DP83815D");
984 		else if (sc->sis_srr == NS_SRR_16A)
985 			printf(", DP83816A");
986 		else
987 			printf(", srr %x", sc->sis_srr);
988 
989 		/*
990 		 * Reading the MAC address out of the EEPROM on
991 		 * the NatSemi chip takes a bit more work than
992 		 * you'd expect. The address spans 4 16-bit words,
993 		 * with the first word containing only a single bit.
994 		 * You have to shift everything over one bit to
995 		 * get it aligned properly. Also, the bits are
996 		 * stored backwards (the LSB is really the MSB,
997 		 * and so on) so you have to reverse them in order
998 		 * to get the MAC address into the form we want.
999 		 * Why? Who the hell knows.
1000 		 */
1001 		{
1002 			u_int16_t		tmp[4];
1003 
1004 			sis_read_eeprom(sc, (caddr_t)&tmp, NS_EE_NODEADDR,
1005 			    4, 0);
1006 
1007 			/* Shift everything over one bit. */
1008 			tmp[3] = tmp[3] >> 1;
1009 			tmp[3] |= tmp[2] << 15;
1010 			tmp[2] = tmp[2] >> 1;
1011 			tmp[2] |= tmp[1] << 15;
1012 			tmp[1] = tmp[1] >> 1;
1013 			tmp[1] |= tmp[0] << 15;
1014 
1015 			/* Now reverse all the bits. */
1016 			tmp[3] = letoh16(sis_reverse(tmp[3]));
1017 			tmp[2] = letoh16(sis_reverse(tmp[2]));
1018 			tmp[1] = letoh16(sis_reverse(tmp[1]));
1019 
1020 			bcopy(&tmp[1], sc->arpcom.ac_enaddr,
1021 			    ETHER_ADDR_LEN);
1022 		}
1023 		break;
1024 	case PCI_VENDOR_SIS:
1025 	default:
1026 #if defined(__amd64__) || defined(__i386__)
1027 		/*
1028 		 * If this is a SiS 630E chipset with an embedded
1029 		 * SiS 900 controller, we have to read the MAC address
1030 		 * from the APC CMOS RAM. Our method for doing this
1031 		 * is very ugly since we have to reach out and grab
1032 		 * ahold of hardware for which we cannot properly
1033 		 * allocate resources. This code is only compiled on
1034 		 * the i386 architecture since the SiS 630E chipset
1035 		 * is for x86 motherboards only. Note that there are
1036 		 * a lot of magic numbers in this hack. These are
1037 		 * taken from SiS's Linux driver. I'd like to replace
1038 		 * them with proper symbolic definitions, but that
1039 		 * requires some datasheets that I don't have access
1040 		 * to at the moment.
1041 		 */
1042 		if (sc->sis_rev == SIS_REV_630S ||
1043 		    sc->sis_rev == SIS_REV_630E)
1044 			sis_read_cmos(sc, pa, (caddr_t)&sc->arpcom.ac_enaddr,
1045 			    0x9, 6);
1046 		else
1047 #endif
1048 		if (sc->sis_rev == SIS_REV_96x)
1049 			sis_read96x_mac(sc);
1050 		else if (sc->sis_rev == SIS_REV_635 ||
1051 		    sc->sis_rev == SIS_REV_630ET ||
1052 		    sc->sis_rev == SIS_REV_630EA1)
1053 			sis_read_mac(sc, pa);
1054 		else
1055 			sis_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
1056 			    SIS_EE_NODEADDR, 3, 1);
1057 		break;
1058 	}
1059 
1060 	printf(": %s, address %s\n", intrstr,
1061 	    ether_sprintf(sc->arpcom.ac_enaddr));
1062 
1063 	sc->sc_dmat = pa->pa_dmat;
1064 
1065 	if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sis_list_data),
1066 	    PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg,
1067 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) {
1068 		printf(": can't alloc list mem\n");
1069 		goto fail_2;
1070 	}
1071 	if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg,
1072 	    sizeof(struct sis_list_data), &sc->sc_listkva,
1073 	    BUS_DMA_NOWAIT) != 0) {
1074 		printf(": can't map list mem\n");
1075 		goto fail_2;
1076 	}
1077 	if (bus_dmamap_create(sc->sc_dmat, sizeof(struct sis_list_data), 1,
1078 	    sizeof(struct sis_list_data), 0, BUS_DMA_NOWAIT,
1079 	    &sc->sc_listmap) != 0) {
1080 		printf(": can't alloc list map\n");
1081 		goto fail_2;
1082 	}
1083 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva,
1084 	    sizeof(struct sis_list_data), NULL, BUS_DMA_NOWAIT) != 0) {
1085 		printf(": can't load list map\n");
1086 		goto fail_2;
1087 	}
1088 	sc->sis_ldata = (struct sis_list_data *)sc->sc_listkva;
1089 
1090 	for (i = 0; i < SIS_RX_LIST_CNT; i++) {
1091 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
1092 		    BUS_DMA_NOWAIT, &sc->sis_ldata->sis_rx_list[i].map) != 0) {
1093 			printf(": can't create rx map\n");
1094 			goto fail_2;
1095 		}
1096 	}
1097 
1098 	for (i = 0; i < SIS_TX_LIST_CNT; i++) {
1099 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
1100 		    SIS_TX_LIST_CNT - 3, MCLBYTES, 0, BUS_DMA_NOWAIT,
1101 		    &sc->sis_ldata->sis_tx_list[i].map) != 0) {
1102 			printf(": can't create tx map\n");
1103 			goto fail_2;
1104 		}
1105 	}
1106 	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, SIS_TX_LIST_CNT - 3,
1107 	    MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) {
1108 		printf(": can't create tx spare map\n");
1109 		goto fail_2;
1110 	}
1111 
1112 	timeout_set(&sc->sis_timeout, sis_tick, sc);
1113 
1114 	ifp = &sc->arpcom.ac_if;
1115 	ifp->if_softc = sc;
1116 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1117 	ifp->if_ioctl = sis_ioctl;
1118 	ifp->if_start = sis_start;
1119 	ifp->if_watchdog = sis_watchdog;
1120 	ifp->if_baudrate = 10000000;
1121 	IFQ_SET_MAXLEN(&ifp->if_snd, SIS_TX_LIST_CNT - 1);
1122 	IFQ_SET_READY(&ifp->if_snd);
1123 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1124 
1125 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1126 
1127 	m_clsetwms(ifp, MCLBYTES, 2, SIS_RX_LIST_CNT - 1);
1128 
1129 	sc->sc_mii.mii_ifp = ifp;
1130 	sc->sc_mii.mii_readreg = sis_miibus_readreg;
1131 	sc->sc_mii.mii_writereg = sis_miibus_writereg;
1132 	sc->sc_mii.mii_statchg = sis_miibus_statchg;
1133 	ifmedia_init(&sc->sc_mii.mii_media, 0, sis_ifmedia_upd,sis_ifmedia_sts);
1134 	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
1135 	    0);
1136 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
1137 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
1138 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
1139 	} else
1140 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1141 
1142 	/*
1143 	 * Call MI attach routines.
1144 	 */
1145 	if_attach(ifp);
1146 	ether_ifattach(ifp);
1147 	return;
1148 
1149 fail_2:
1150 	pci_intr_disestablish(pc, sc->sc_ih);
1151 
1152 fail_1:
1153 	bus_space_unmap(sc->sis_btag, sc->sis_bhandle, size);
1154 }
1155 
1156 int
1157 sis_activate(struct device *self, int act)
1158 {
1159 	struct sis_softc *sc = (struct sis_softc *)self;
1160 	struct ifnet *ifp = &sc->arpcom.ac_if;
1161 	int rv = 0;
1162 
1163 	switch (act) {
1164 	case DVACT_QUIESCE:
1165 		rv = config_activate_children(self, act);
1166 		break;
1167 	case DVACT_SUSPEND:
1168 		if (ifp->if_flags & IFF_RUNNING)
1169 			sis_stop(sc);
1170 		rv = config_activate_children(self, act);
1171 		break;
1172 	case DVACT_RESUME:
1173 		rv = config_activate_children(self, act);
1174 		if (ifp->if_flags & IFF_UP)
1175 			sis_init(sc);
1176 		break;
1177 	}
1178 	return (rv);
1179 }
1180 
1181 /*
1182  * Initialize the TX and RX descriptors and allocate mbufs for them. Note that
1183  * we arrange the descriptors in a closed ring, so that the last descriptor
1184  * points back to the first.
1185  */
1186 int
1187 sis_ring_init(struct sis_softc *sc)
1188 {
1189 	struct sis_list_data	*ld;
1190 	struct sis_ring_data	*cd;
1191 	int			i, nexti;
1192 
1193 	cd = &sc->sis_cdata;
1194 	ld = sc->sis_ldata;
1195 
1196 	for (i = 0; i < SIS_TX_LIST_CNT; i++) {
1197 		if (i == (SIS_TX_LIST_CNT - 1))
1198 			nexti = 0;
1199 		else
1200 			nexti = i + 1;
1201 		ld->sis_tx_list[i].sis_nextdesc = &ld->sis_tx_list[nexti];
1202 		ld->sis_tx_list[i].sis_next =
1203 		    htole32(sc->sc_listmap->dm_segs[0].ds_addr +
1204 		      offsetof(struct sis_list_data, sis_tx_list[nexti]));
1205 		ld->sis_tx_list[i].sis_mbuf = NULL;
1206 		ld->sis_tx_list[i].sis_ptr = 0;
1207 		ld->sis_tx_list[i].sis_ctl = 0;
1208 	}
1209 
1210 	cd->sis_tx_prod = cd->sis_tx_cons = cd->sis_tx_cnt = 0;
1211 
1212 	for (i = 0; i < SIS_RX_LIST_CNT; i++) {
1213 		if (i == SIS_RX_LIST_CNT - 1)
1214 			nexti = 0;
1215 		else
1216 			nexti = i + 1;
1217 		ld->sis_rx_list[i].sis_nextdesc = &ld->sis_rx_list[nexti];
1218 		ld->sis_rx_list[i].sis_next =
1219 		    htole32(sc->sc_listmap->dm_segs[0].ds_addr +
1220 		      offsetof(struct sis_list_data, sis_rx_list[nexti]));
1221 		ld->sis_rx_list[i].sis_ctl = 0;
1222 	}
1223 
1224 	cd->sis_rx_prod = cd->sis_rx_cons = cd->sis_rx_cnt = 0;
1225 	sis_fill_rx_ring(sc);
1226 
1227 	return (0);
1228 }
1229 
1230 void
1231 sis_fill_rx_ring(struct sis_softc *sc)
1232 {
1233 	struct sis_list_data    *ld;
1234 	struct sis_ring_data    *cd;
1235 
1236 	cd = &sc->sis_cdata;
1237 	ld = sc->sis_ldata;
1238 
1239 	while (cd->sis_rx_cnt < SIS_RX_LIST_CNT) {
1240 		if (sis_newbuf(sc, &ld->sis_rx_list[cd->sis_rx_prod]))
1241 			break;
1242 		SIS_INC(cd->sis_rx_prod, SIS_RX_LIST_CNT);
1243 		cd->sis_rx_cnt++;
1244 	}
1245 }
1246 
1247 /*
1248  * Initialize an RX descriptor and attach an MBUF cluster.
1249  */
1250 int
1251 sis_newbuf(struct sis_softc *sc, struct sis_desc *c)
1252 {
1253 	struct mbuf		*m_new = NULL;
1254 
1255 	if (c == NULL)
1256 		return (EINVAL);
1257 
1258 	m_new = MCLGETI(NULL, M_DONTWAIT, &sc->arpcom.ac_if, MCLBYTES);
1259 	if (!m_new)
1260 		return (ENOBUFS);
1261 
1262 	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1263 
1264 	if (bus_dmamap_load_mbuf(sc->sc_dmat, c->map, m_new,
1265 	    BUS_DMA_NOWAIT)) {
1266 		m_free(m_new);
1267 		return (ENOBUFS);
1268 	}
1269 
1270 	bus_dmamap_sync(sc->sc_dmat, c->map, 0, c->map->dm_mapsize,
1271 	    BUS_DMASYNC_PREREAD);
1272 
1273 	c->sis_mbuf = m_new;
1274 	c->sis_ptr = htole32(c->map->dm_segs[0].ds_addr);
1275 
1276 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1277 	    ((caddr_t)c - sc->sc_listkva), sizeof(struct sis_desc),
1278 	    BUS_DMASYNC_PREWRITE);
1279 
1280 	c->sis_ctl = htole32(ETHER_MAX_DIX_LEN);
1281 
1282 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1283 	    ((caddr_t)c - sc->sc_listkva), sizeof(struct sis_desc),
1284 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1285 
1286 	return (0);
1287 }
1288 
1289 /*
1290  * A frame has been uploaded: pass the resulting mbuf chain up to
1291  * the higher level protocols.
1292  */
1293 void
1294 sis_rxeof(struct sis_softc *sc)
1295 {
1296 	struct mbuf		*m;
1297 	struct ifnet		*ifp;
1298 	struct sis_desc		*cur_rx;
1299 	int			total_len = 0;
1300 	u_int32_t		rxstat;
1301 
1302 	ifp = &sc->arpcom.ac_if;
1303 
1304 	while(sc->sis_cdata.sis_rx_cnt > 0) {
1305 		cur_rx = &sc->sis_ldata->sis_rx_list[sc->sis_cdata.sis_rx_cons];
1306 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1307 		    ((caddr_t)cur_rx - sc->sc_listkva),
1308 		    sizeof(struct sis_desc),
1309 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1310 		if (!SIS_OWNDESC(cur_rx))
1311 			break;
1312 
1313 		rxstat = letoh32(cur_rx->sis_rxstat);
1314 		m = cur_rx->sis_mbuf;
1315 		cur_rx->sis_mbuf = NULL;
1316 		total_len = SIS_RXBYTES(cur_rx);
1317 		/* from here on the buffer is consumed */
1318 		SIS_INC(sc->sis_cdata.sis_rx_cons, SIS_RX_LIST_CNT);
1319 		sc->sis_cdata.sis_rx_cnt--;
1320 
1321 		/*
1322 		 * If an error occurs, update stats, clear the
1323 		 * status word and leave the mbuf cluster in place:
1324 		 * it should simply get re-used next time this descriptor
1325 	 	 * comes up in the ring. However, don't report long
1326 		 * frames as errors since they could be VLANs.
1327 		 */
1328 		if (rxstat & SIS_RXSTAT_GIANT &&
1329 		    total_len <= (ETHER_MAX_DIX_LEN - ETHER_CRC_LEN))
1330 			rxstat &= ~SIS_RXSTAT_GIANT;
1331 		if (SIS_RXSTAT_ERROR(rxstat)) {
1332 			ifp->if_ierrors++;
1333 			if (rxstat & SIS_RXSTAT_COLL)
1334 				ifp->if_collisions++;
1335 			m_freem(m);
1336 			continue;
1337 		}
1338 
1339 		/* No errors; receive the packet. */
1340 		bus_dmamap_sync(sc->sc_dmat, cur_rx->map, 0,
1341 		    cur_rx->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1342 #ifdef __STRICT_ALIGNMENT
1343 		/*
1344 		 * On some architectures, we do not have alignment problems,
1345 		 * so try to allocate a new buffer for the receive ring, and
1346 		 * pass up the one where the packet is already, saving the
1347 		 * expensive copy done in m_devget().
1348 		 * If we are on an architecture with alignment problems, or
1349 		 * if the allocation fails, then use m_devget and leave the
1350 		 * existing buffer in the receive ring.
1351 		 */
1352 		{
1353 			struct mbuf *m0;
1354 			m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN,
1355 			    ifp, NULL);
1356 			m_freem(m);
1357 			if (m0 == NULL) {
1358 				ifp->if_ierrors++;
1359 				continue;
1360 			}
1361 			m = m0;
1362 		}
1363 #else
1364 		m->m_pkthdr.rcvif = ifp;
1365 		m->m_pkthdr.len = m->m_len = total_len;
1366 #endif
1367 		ifp->if_ipackets++;
1368 
1369 #if NBPFILTER > 0
1370 		if (ifp->if_bpf)
1371 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
1372 #endif
1373 
1374 		/* pass it on. */
1375 		ether_input_mbuf(ifp, m);
1376 	}
1377 
1378 	sis_fill_rx_ring(sc);
1379 }
1380 
1381 /*
1382  * A frame was downloaded to the chip. It's safe for us to clean up
1383  * the list buffers.
1384  */
1385 
1386 void
1387 sis_txeof(struct sis_softc *sc)
1388 {
1389 	struct ifnet		*ifp;
1390 	u_int32_t		idx, ctl, txstat;
1391 
1392 	ifp = &sc->arpcom.ac_if;
1393 
1394 	/*
1395 	 * Go through our tx list and free mbufs for those
1396 	 * frames that have been transmitted.
1397 	 */
1398 	for (idx = sc->sis_cdata.sis_tx_cons; sc->sis_cdata.sis_tx_cnt > 0;
1399 	    sc->sis_cdata.sis_tx_cnt--, SIS_INC(idx, SIS_TX_LIST_CNT)) {
1400 		struct sis_desc *cur_tx = &sc->sis_ldata->sis_tx_list[idx];
1401 
1402 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1403 		    ((caddr_t)cur_tx - sc->sc_listkva),
1404 		    sizeof(struct sis_desc),
1405 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1406 
1407 		if (SIS_OWNDESC(cur_tx))
1408 			break;
1409 
1410 		ctl = letoh32(cur_tx->sis_ctl);
1411 
1412 		if (ctl & SIS_CMDSTS_MORE)
1413 			continue;
1414 
1415 		txstat = letoh32(cur_tx->sis_txstat);
1416 
1417 		if (!(ctl & SIS_CMDSTS_PKT_OK)) {
1418 			ifp->if_oerrors++;
1419 			if (txstat & SIS_TXSTAT_EXCESSCOLLS)
1420 				ifp->if_collisions++;
1421 			if (txstat & SIS_TXSTAT_OUTOFWINCOLL)
1422 				ifp->if_collisions++;
1423 		}
1424 
1425 		ifp->if_collisions += (txstat & SIS_TXSTAT_COLLCNT) >> 16;
1426 
1427 		ifp->if_opackets++;
1428 		if (cur_tx->map->dm_nsegs != 0) {
1429 			bus_dmamap_t map = cur_tx->map;
1430 
1431 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1432 			    BUS_DMASYNC_POSTWRITE);
1433 			bus_dmamap_unload(sc->sc_dmat, map);
1434 		}
1435 		if (cur_tx->sis_mbuf != NULL) {
1436 			m_freem(cur_tx->sis_mbuf);
1437 			cur_tx->sis_mbuf = NULL;
1438 		}
1439 	}
1440 
1441 	if (idx != sc->sis_cdata.sis_tx_cons) {
1442 		/* we freed up some buffers */
1443 		sc->sis_cdata.sis_tx_cons = idx;
1444 		ifp->if_flags &= ~IFF_OACTIVE;
1445 	}
1446 
1447 	ifp->if_timer = (sc->sis_cdata.sis_tx_cnt == 0) ? 0 : 5;
1448 }
1449 
1450 void
1451 sis_tick(void *xsc)
1452 {
1453 	struct sis_softc	*sc = (struct sis_softc *)xsc;
1454 	struct mii_data		*mii;
1455 	struct ifnet		*ifp;
1456 	int			s;
1457 
1458 	s = splnet();
1459 
1460 	ifp = &sc->arpcom.ac_if;
1461 
1462 	mii = &sc->sc_mii;
1463 	mii_tick(mii);
1464 
1465 	if (!sc->sis_link && mii->mii_media_status & IFM_ACTIVE &&
1466 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1467 		sc->sis_link++;
1468 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
1469 			sis_start(ifp);
1470 	}
1471 	timeout_add_sec(&sc->sis_timeout, 1);
1472 
1473 	splx(s);
1474 }
1475 
1476 int
1477 sis_intr(void *arg)
1478 {
1479 	struct sis_softc	*sc = arg;
1480 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1481 	u_int32_t		status;
1482 
1483 	if (sc->sis_stopped)	/* Most likely shared interrupt */
1484 		return (0);
1485 
1486 	/* Reading the ISR register clears all interrupts. */
1487 	status = CSR_READ_4(sc, SIS_ISR);
1488 	if ((status & SIS_INTRS) == 0)
1489 		return (0);
1490 
1491 	if (status &
1492 	    (SIS_ISR_TX_DESC_OK | SIS_ISR_TX_ERR |
1493 	     SIS_ISR_TX_OK | SIS_ISR_TX_IDLE))
1494 		sis_txeof(sc);
1495 
1496 	if (status &
1497 	    (SIS_ISR_RX_DESC_OK | SIS_ISR_RX_OK |
1498 	     SIS_ISR_RX_ERR | SIS_ISR_RX_IDLE))
1499 		sis_rxeof(sc);
1500 
1501 	if (status & (SIS_ISR_RX_IDLE)) {
1502 		/* consume what's there so that sis_rx_cons points
1503 		 * to the first HW owned descriptor. */
1504 		sis_rxeof(sc);
1505 		/* reprogram the RX listptr */
1506 		CSR_WRITE_4(sc, SIS_RX_LISTPTR,
1507 		    sc->sc_listmap->dm_segs[0].ds_addr +
1508 		    offsetof(struct sis_list_data,
1509 		    sis_rx_list[sc->sis_cdata.sis_rx_cons]));
1510 	}
1511 
1512 	if (status & SIS_ISR_SYSERR) {
1513 		sis_reset(sc);
1514 		sis_init(sc);
1515 	}
1516 
1517 	/*
1518 	 * XXX: Re-enable RX engine every time otherwise it occasionally
1519 	 * stops under unknown circumstances.
1520 	 */
1521 	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE);
1522 
1523 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1524 		sis_start(ifp);
1525 
1526 	return (1);
1527 }
1528 
1529 /*
1530  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1531  * pointers to the fragment pointers.
1532  */
1533 int
1534 sis_encap(struct sis_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
1535 {
1536 	struct sis_desc		*f = NULL;
1537 	int			frag, cur, i;
1538 	bus_dmamap_t		map;
1539 
1540 	map = sc->sc_tx_sparemap;
1541 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map,
1542 	    m_head, BUS_DMA_NOWAIT) != 0)
1543 		return (ENOBUFS);
1544 
1545 	/*
1546  	 * Start packing the mbufs in this chain into
1547 	 * the fragment pointers. Stop when we run out
1548  	 * of fragments or hit the end of the mbuf chain.
1549 	 */
1550 	cur = frag = *txidx;
1551 
1552 	for (i = 0; i < map->dm_nsegs; i++) {
1553 		if ((SIS_TX_LIST_CNT - (sc->sis_cdata.sis_tx_cnt + i)) < 2)
1554 			return(ENOBUFS);
1555 		f = &sc->sis_ldata->sis_tx_list[frag];
1556 		f->sis_ctl = htole32(SIS_CMDSTS_MORE | map->dm_segs[i].ds_len);
1557 		f->sis_ptr = htole32(map->dm_segs[i].ds_addr);
1558 		if (i != 0)
1559 			f->sis_ctl |= htole32(SIS_CMDSTS_OWN);
1560 		cur = frag;
1561 		SIS_INC(frag, SIS_TX_LIST_CNT);
1562 	}
1563 
1564 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1565 	    BUS_DMASYNC_PREWRITE);
1566 
1567 	sc->sis_ldata->sis_tx_list[cur].sis_mbuf = m_head;
1568 	sc->sis_ldata->sis_tx_list[cur].sis_ctl &= ~htole32(SIS_CMDSTS_MORE);
1569 	sc->sis_ldata->sis_tx_list[*txidx].sis_ctl |= htole32(SIS_CMDSTS_OWN);
1570 	sc->sis_cdata.sis_tx_cnt += i;
1571 	*txidx = frag;
1572 
1573 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1574 	    offsetof(struct sis_list_data, sis_tx_list[0]),
1575 	    sizeof(struct sis_desc) * SIS_TX_LIST_CNT,
1576 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1577 
1578 	return (0);
1579 }
1580 
1581 /*
1582  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1583  * to the mbuf data regions directly in the transmit lists. We also save a
1584  * copy of the pointers since the transmit list fragment pointers are
1585  * physical addresses.
1586  */
1587 
1588 void
1589 sis_start(struct ifnet *ifp)
1590 {
1591 	struct sis_softc	*sc;
1592 	struct mbuf		*m_head = NULL;
1593 	u_int32_t		idx, queued = 0;
1594 
1595 	sc = ifp->if_softc;
1596 
1597 	if (!sc->sis_link)
1598 		return;
1599 
1600 	idx = sc->sis_cdata.sis_tx_prod;
1601 
1602 	if (ifp->if_flags & IFF_OACTIVE)
1603 		return;
1604 
1605 	while(sc->sis_ldata->sis_tx_list[idx].sis_mbuf == NULL) {
1606 		IFQ_POLL(&ifp->if_snd, m_head);
1607 		if (m_head == NULL)
1608 			break;
1609 
1610 		if (sis_encap(sc, m_head, &idx)) {
1611 			ifp->if_flags |= IFF_OACTIVE;
1612 			break;
1613 		}
1614 
1615 		/* now we are committed to transmit the packet */
1616 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1617 
1618 		queued++;
1619 
1620 		/*
1621 		 * If there's a BPF listener, bounce a copy of this frame
1622 		 * to him.
1623 		 */
1624 #if NBPFILTER > 0
1625 		if (ifp->if_bpf)
1626 			bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
1627 #endif
1628 	}
1629 
1630 	if (queued) {
1631 		/* Transmit */
1632 		sc->sis_cdata.sis_tx_prod = idx;
1633 		SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_ENABLE);
1634 
1635 		/*
1636 		 * Set a timeout in case the chip goes out to lunch.
1637 		 */
1638 		ifp->if_timer = 5;
1639 	}
1640 }
1641 
1642 void
1643 sis_init(void *xsc)
1644 {
1645 	struct sis_softc	*sc = (struct sis_softc *)xsc;
1646 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1647 	struct mii_data		*mii;
1648 	int			s;
1649 
1650 	s = splnet();
1651 
1652 	/*
1653 	 * Cancel pending I/O and free all RX/TX buffers.
1654 	 */
1655 	sis_stop(sc);
1656 
1657 #if NS_IHR_DELAY > 0
1658 	/* Configure interrupt holdoff register. */
1659 	if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr == NS_SRR_16A)
1660 		CSR_WRITE_4(sc, NS_IHR, NS_IHR_VALUE);
1661 #endif
1662 
1663 	mii = &sc->sc_mii;
1664 
1665 	/* Set MAC address */
1666 	if (sc->sis_type == SIS_TYPE_83815) {
1667 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR0);
1668 		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1669 		    htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[0]));
1670 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR1);
1671 		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1672 		    htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[1]));
1673 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR2);
1674 		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1675 		    htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[2]));
1676 	} else {
1677 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0);
1678 		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1679 		    htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[0]));
1680 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR1);
1681 		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1682 		    htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[1]));
1683 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2);
1684 		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1685 		    htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[2]));
1686 	}
1687 
1688 	/* Init circular TX/RX lists. */
1689 	if (sis_ring_init(sc) != 0) {
1690 		printf("%s: initialization failed: no memory for rx buffers\n",
1691 		    sc->sc_dev.dv_xname);
1692 		sis_stop(sc);
1693 		splx(s);
1694 		return;
1695 	}
1696 
1697         /*
1698 	 * Short Cable Receive Errors (MP21.E)
1699 	 * also: Page 78 of the DP83815 data sheet (september 2002 version)
1700 	 * recommends the following register settings "for optimum
1701 	 * performance." for rev 15C.  The driver from NS also sets
1702 	 * the PHY_CR register for later versions.
1703 	 */
1704 	 if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr <= NS_SRR_15D) {
1705 		CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001);
1706 		CSR_WRITE_4(sc, NS_PHY_CR, 0x189C);
1707 		if (sc->sis_srr == NS_SRR_15C) {
1708 			/* set val for c2 */
1709 			CSR_WRITE_4(sc, NS_PHY_TDATA, 0x0000);
1710 			/* load/kill c2 */
1711 			CSR_WRITE_4(sc, NS_PHY_DSPCFG, 0x5040);
1712 			/* rais SD off, from 4 to c */
1713 			CSR_WRITE_4(sc, NS_PHY_SDCFG, 0x008C);
1714 		}
1715 		CSR_WRITE_4(sc, NS_PHY_PAGE, 0);
1716 	}
1717 
1718 	/*
1719 	 * Program promiscuous mode and multicast filters.
1720 	 */
1721 	sis_iff(sc);
1722 
1723 	/* Turn the receive filter on */
1724 	SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ENABLE);
1725 
1726 	/*
1727 	 * Load the address of the RX and TX lists.
1728 	 */
1729 	CSR_WRITE_4(sc, SIS_RX_LISTPTR, sc->sc_listmap->dm_segs[0].ds_addr +
1730 	    offsetof(struct sis_list_data, sis_rx_list[0]));
1731 	CSR_WRITE_4(sc, SIS_TX_LISTPTR, sc->sc_listmap->dm_segs[0].ds_addr +
1732 	    offsetof(struct sis_list_data, sis_tx_list[0]));
1733 
1734 	/* SIS_CFG_EDB_MASTER_EN indicates the EDB bus is used instead of
1735 	 * the PCI bus. When this bit is set, the Max DMA Burst Size
1736 	 * for TX/RX DMA should be no larger than 16 double words.
1737 	 */
1738 	if (CSR_READ_4(sc, SIS_CFG) & SIS_CFG_EDB_MASTER_EN)
1739 		CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG64);
1740 	else
1741 		CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG256);
1742 
1743 	/* Accept Long Packets for VLAN support */
1744 	SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_JABBER);
1745 
1746 	/* Set TX configuration */
1747 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T)
1748 		CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_10);
1749 	else
1750 		CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_100);
1751 
1752 	/* Set full/half duplex mode. */
1753 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1754 		SIS_SETBIT(sc, SIS_TX_CFG,
1755 		    (SIS_TXCFG_IGN_HBEAT|SIS_TXCFG_IGN_CARR));
1756 		SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS);
1757 	} else {
1758 		SIS_CLRBIT(sc, SIS_TX_CFG,
1759 		    (SIS_TXCFG_IGN_HBEAT|SIS_TXCFG_IGN_CARR));
1760 		SIS_CLRBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS);
1761 	}
1762 
1763 	if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr >= NS_SRR_16A) {
1764 		/*
1765 		 * MPII03.D: Half Duplex Excessive Collisions.
1766 		 * Also page 49 in 83816 manual
1767 		 */
1768 		SIS_SETBIT(sc, SIS_TX_CFG, SIS_TXCFG_MPII03D);
1769  	}
1770 
1771 	if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr < NS_SRR_16A &&
1772 	     IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
1773 		uint32_t reg;
1774 
1775 		/*
1776 		 * Short Cable Receive Errors (MP21.E)
1777 		 */
1778 		CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001);
1779 		reg = CSR_READ_4(sc, NS_PHY_DSPCFG) & 0xfff;
1780 		CSR_WRITE_4(sc, NS_PHY_DSPCFG, reg | 0x1000);
1781 		DELAY(100000);
1782 		reg = CSR_READ_4(sc, NS_PHY_TDATA) & 0xff;
1783 		if ((reg & 0x0080) == 0 || (reg > 0xd8 && reg <= 0xff)) {
1784 #ifdef DEBUG
1785 			printf("%s: Applying short cable fix (reg=%x)\n",
1786 			    sc->sc_dev.dv_xname, reg);
1787 #endif
1788 			CSR_WRITE_4(sc, NS_PHY_TDATA, 0x00e8);
1789 			reg = CSR_READ_4(sc, NS_PHY_DSPCFG);
1790 			SIS_SETBIT(sc, NS_PHY_DSPCFG, reg | 0x20);
1791 		}
1792 		CSR_WRITE_4(sc, NS_PHY_PAGE, 0);
1793 	}
1794 
1795 	/*
1796 	 * Enable interrupts.
1797 	 */
1798 	CSR_WRITE_4(sc, SIS_IMR, SIS_INTRS);
1799 	CSR_WRITE_4(sc, SIS_IER, 1);
1800 
1801 	/* Enable receiver and transmitter. */
1802 	SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE|SIS_CSR_RX_DISABLE);
1803 	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE);
1804 
1805 #ifdef notdef
1806 	mii_mediachg(mii);
1807 #endif
1808 
1809 	sc->sis_stopped = 0;
1810 	ifp->if_flags |= IFF_RUNNING;
1811 	ifp->if_flags &= ~IFF_OACTIVE;
1812 
1813 	splx(s);
1814 
1815 	timeout_add_sec(&sc->sis_timeout, 1);
1816 }
1817 
1818 /*
1819  * Set media options.
1820  */
1821 int
1822 sis_ifmedia_upd(struct ifnet *ifp)
1823 {
1824 	struct sis_softc	*sc;
1825 	struct mii_data		*mii;
1826 
1827 	sc = ifp->if_softc;
1828 
1829 	mii = &sc->sc_mii;
1830 	sc->sis_link = 0;
1831 	if (mii->mii_instance) {
1832 		struct mii_softc	*miisc;
1833 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1834 			mii_phy_reset(miisc);
1835 	}
1836 	mii_mediachg(mii);
1837 
1838 	return (0);
1839 }
1840 
1841 /*
1842  * Report current media status.
1843  */
1844 void
1845 sis_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1846 {
1847 	struct sis_softc	*sc;
1848 	struct mii_data		*mii;
1849 
1850 	sc = ifp->if_softc;
1851 
1852 	mii = &sc->sc_mii;
1853 	mii_pollstat(mii);
1854 	ifmr->ifm_active = mii->mii_media_active;
1855 	ifmr->ifm_status = mii->mii_media_status;
1856 }
1857 
1858 int
1859 sis_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1860 {
1861 	struct sis_softc	*sc = ifp->if_softc;
1862 	struct ifaddr		*ifa = (struct ifaddr *) data;
1863 	struct ifreq		*ifr = (struct ifreq *) data;
1864 	struct mii_data		*mii;
1865 	int			s, error = 0;
1866 
1867 	s = splnet();
1868 
1869 	switch(command) {
1870 	case SIOCSIFADDR:
1871 		ifp->if_flags |= IFF_UP;
1872 		if (!(ifp->if_flags & IFF_RUNNING))
1873 			sis_init(sc);
1874 #ifdef INET
1875 		if (ifa->ifa_addr->sa_family == AF_INET)
1876 			arp_ifinit(&sc->arpcom, ifa);
1877 #endif
1878 		break;
1879 
1880 	case SIOCSIFFLAGS:
1881 		if (ifp->if_flags & IFF_UP) {
1882 			if (ifp->if_flags & IFF_RUNNING)
1883 				error = ENETRESET;
1884 			else
1885 				sis_init(sc);
1886 		} else {
1887 			if (ifp->if_flags & IFF_RUNNING)
1888 				sis_stop(sc);
1889 		}
1890 		break;
1891 
1892 	case SIOCGIFMEDIA:
1893 	case SIOCSIFMEDIA:
1894 		mii = &sc->sc_mii;
1895 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1896 		break;
1897 
1898 	default:
1899 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
1900 	}
1901 
1902 	if (error == ENETRESET) {
1903 		if (ifp->if_flags & IFF_RUNNING)
1904 			sis_iff(sc);
1905 		error = 0;
1906 	}
1907 
1908 	splx(s);
1909 	return(error);
1910 }
1911 
1912 void
1913 sis_watchdog(struct ifnet *ifp)
1914 {
1915 	struct sis_softc	*sc;
1916 	int			s;
1917 
1918 	sc = ifp->if_softc;
1919 
1920 	if (sc->sis_stopped)
1921 		return;
1922 
1923 	ifp->if_oerrors++;
1924 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1925 
1926 	s = splnet();
1927 	sis_stop(sc);
1928 	sis_reset(sc);
1929 	sis_init(sc);
1930 
1931 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1932 		sis_start(ifp);
1933 
1934 	splx(s);
1935 }
1936 
1937 /*
1938  * Stop the adapter and free any mbufs allocated to the
1939  * RX and TX lists.
1940  */
1941 void
1942 sis_stop(struct sis_softc *sc)
1943 {
1944 	int			i;
1945 	struct ifnet		*ifp;
1946 
1947 	if (sc->sis_stopped)
1948 		return;
1949 
1950 	ifp = &sc->arpcom.ac_if;
1951 	ifp->if_timer = 0;
1952 
1953 	timeout_del(&sc->sis_timeout);
1954 
1955 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1956 	sc->sis_stopped = 1;
1957 
1958 	CSR_WRITE_4(sc, SIS_IER, 0);
1959 	CSR_WRITE_4(sc, SIS_IMR, 0);
1960 	CSR_READ_4(sc, SIS_ISR); /* clear any interrupts already pending */
1961 	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE|SIS_CSR_RX_DISABLE);
1962 	DELAY(1000);
1963 	CSR_WRITE_4(sc, SIS_TX_LISTPTR, 0);
1964 	CSR_WRITE_4(sc, SIS_RX_LISTPTR, 0);
1965 
1966 	sc->sis_link = 0;
1967 
1968 	/*
1969 	 * Free data in the RX lists.
1970 	 */
1971 	for (i = 0; i < SIS_RX_LIST_CNT; i++) {
1972 		if (sc->sis_ldata->sis_rx_list[i].map->dm_nsegs != 0) {
1973 			bus_dmamap_t map = sc->sis_ldata->sis_rx_list[i].map;
1974 
1975 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1976 			    BUS_DMASYNC_POSTREAD);
1977 			bus_dmamap_unload(sc->sc_dmat, map);
1978 		}
1979 		if (sc->sis_ldata->sis_rx_list[i].sis_mbuf != NULL) {
1980 			m_freem(sc->sis_ldata->sis_rx_list[i].sis_mbuf);
1981 			sc->sis_ldata->sis_rx_list[i].sis_mbuf = NULL;
1982 		}
1983 		bzero(&sc->sis_ldata->sis_rx_list[i],
1984 		    sizeof(struct sis_desc) - sizeof(bus_dmamap_t));
1985 	}
1986 
1987 	/*
1988 	 * Free the TX list buffers.
1989 	 */
1990 	for (i = 0; i < SIS_TX_LIST_CNT; i++) {
1991 		if (sc->sis_ldata->sis_tx_list[i].map->dm_nsegs != 0) {
1992 			bus_dmamap_t map = sc->sis_ldata->sis_tx_list[i].map;
1993 
1994 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1995 			    BUS_DMASYNC_POSTWRITE);
1996 			bus_dmamap_unload(sc->sc_dmat, map);
1997 		}
1998 		if (sc->sis_ldata->sis_tx_list[i].sis_mbuf != NULL) {
1999 			m_freem(sc->sis_ldata->sis_tx_list[i].sis_mbuf);
2000 			sc->sis_ldata->sis_tx_list[i].sis_mbuf = NULL;
2001 		}
2002 		bzero(&sc->sis_ldata->sis_tx_list[i],
2003 		    sizeof(struct sis_desc) - sizeof(bus_dmamap_t));
2004 	}
2005 }
2006