xref: /openbsd/sys/dev/pci/if_sis.c (revision 73471bf0)
1 /*	$OpenBSD: if_sis.c,v 1.139 2020/12/12 11:48:53 jan Exp $ */
2 /*
3  * Copyright (c) 1997, 1998, 1999
4  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  * $FreeBSD: src/sys/pci/if_sis.c,v 1.30 2001/02/06 10:11:47 phk Exp $
34  */
35 
36 /*
37  * SiS 900/SiS 7016 fast ethernet PCI NIC driver. Datasheets are
38  * available from http://www.sis.com.tw.
39  *
40  * This driver also supports the NatSemi DP83815. Datasheets are
41  * available from http://www.national.com.
42  *
43  * Written by Bill Paul <wpaul@ee.columbia.edu>
44  * Electrical Engineering Department
45  * Columbia University, New York City
46  */
47 
48 /*
49  * The SiS 900 is a fairly simple chip. It uses bus master DMA with
50  * simple TX and RX descriptors of 3 longwords in size. The receiver
51  * has a single perfect filter entry for the station address and a
52  * 128-bit multicast hash table. The SiS 900 has a built-in MII-based
53  * transceiver while the 7016 requires an external transceiver chip.
54  * Both chips offer the standard bit-bang MII interface as well as
55  * an enchanced PHY interface which simplifies accessing MII registers.
56  *
57  * The only downside to this chipset is that RX descriptors must be
58  * longword aligned.
59  */
60 
61 #include "bpfilter.h"
62 
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/mbuf.h>
66 #include <sys/protosw.h>
67 #include <sys/socket.h>
68 #include <sys/ioctl.h>
69 #include <sys/errno.h>
70 #include <sys/malloc.h>
71 #include <sys/kernel.h>
72 #include <sys/timeout.h>
73 
74 #include <net/if.h>
75 
76 #include <netinet/in.h>
77 #include <netinet/if_ether.h>
78 
79 #include <net/if_media.h>
80 
81 #if NBPFILTER > 0
82 #include <net/bpf.h>
83 #endif
84 
85 #include <sys/device.h>
86 
87 #include <dev/mii/miivar.h>
88 
89 #include <dev/pci/pcireg.h>
90 #include <dev/pci/pcivar.h>
91 #include <dev/pci/pcidevs.h>
92 
93 #define SIS_USEIOSPACE
94 
95 #include <dev/pci/if_sisreg.h>
96 
97 int sis_probe(struct device *, void *, void *);
98 void sis_attach(struct device *, struct device *, void *);
99 int sis_activate(struct device *, int);
100 
101 struct cfattach sis_ca = {
102 	sizeof(struct sis_softc), sis_probe, sis_attach, NULL,
103 	sis_activate
104 };
105 
106 struct cfdriver sis_cd = {
107 	NULL, "sis", DV_IFNET
108 };
109 
110 int sis_intr(void *);
111 void sis_fill_rx_ring(struct sis_softc *);
112 int sis_newbuf(struct sis_softc *, struct sis_desc *);
113 int sis_encap(struct sis_softc *, struct mbuf *, u_int32_t *);
114 void sis_rxeof(struct sis_softc *);
115 void sis_txeof(struct sis_softc *);
116 void sis_tick(void *);
117 void sis_start(struct ifnet *);
118 int sis_ioctl(struct ifnet *, u_long, caddr_t);
119 void sis_init(void *);
120 void sis_stop(struct sis_softc *);
121 void sis_watchdog(struct ifnet *);
122 int sis_ifmedia_upd(struct ifnet *);
123 void sis_ifmedia_sts(struct ifnet *, struct ifmediareq *);
124 
125 u_int16_t sis_reverse(u_int16_t);
126 void sis_delay(struct sis_softc *);
127 void sis_eeprom_idle(struct sis_softc *);
128 void sis_eeprom_putbyte(struct sis_softc *, int);
129 void sis_eeprom_getword(struct sis_softc *, int, u_int16_t *);
130 #if defined(__amd64__) || defined(__i386__)
131 void sis_read_cmos(struct sis_softc *, struct pci_attach_args *, caddr_t, int, int);
132 #endif
133 void sis_read_mac(struct sis_softc *, struct pci_attach_args *);
134 void sis_read_eeprom(struct sis_softc *, caddr_t, int, int, int);
135 void sis_read96x_mac(struct sis_softc *);
136 
137 void sis_mii_sync(struct sis_softc *);
138 void sis_mii_send(struct sis_softc *, u_int32_t, int);
139 int sis_mii_readreg(struct sis_softc *, struct sis_mii_frame *);
140 int sis_mii_writereg(struct sis_softc *, struct sis_mii_frame *);
141 int sis_miibus_readreg(struct device *, int, int);
142 void sis_miibus_writereg(struct device *, int, int, int);
143 void sis_miibus_statchg(struct device *);
144 
145 u_int32_t sis_mchash(struct sis_softc *, const uint8_t *);
146 void sis_iff(struct sis_softc *);
147 void sis_iff_ns(struct sis_softc *);
148 void sis_iff_sis(struct sis_softc *);
149 void sis_reset(struct sis_softc *);
150 int sis_ring_init(struct sis_softc *);
151 
152 #define SIS_SETBIT(sc, reg, x)				\
153 	CSR_WRITE_4(sc, reg,				\
154 		CSR_READ_4(sc, reg) | (x))
155 
156 #define SIS_CLRBIT(sc, reg, x)				\
157 	CSR_WRITE_4(sc, reg,				\
158 		CSR_READ_4(sc, reg) & ~(x))
159 
160 #define SIO_SET(x)					\
161 	CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) | x)
162 
163 #define SIO_CLR(x)					\
164 	CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) & ~x)
165 
166 const struct pci_matchid sis_devices[] = {
167 	{ PCI_VENDOR_SIS, PCI_PRODUCT_SIS_900 },
168 	{ PCI_VENDOR_SIS, PCI_PRODUCT_SIS_7016 },
169 	{ PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815 }
170 };
171 
172 /*
173  * Routine to reverse the bits in a word. Stolen almost
174  * verbatim from /usr/games/fortune.
175  */
176 u_int16_t
177 sis_reverse(u_int16_t n)
178 {
179 	n = ((n >>  1) & 0x5555) | ((n <<  1) & 0xaaaa);
180 	n = ((n >>  2) & 0x3333) | ((n <<  2) & 0xcccc);
181 	n = ((n >>  4) & 0x0f0f) | ((n <<  4) & 0xf0f0);
182 	n = ((n >>  8) & 0x00ff) | ((n <<  8) & 0xff00);
183 
184 	return (n);
185 }
186 
187 void
188 sis_delay(struct sis_softc *sc)
189 {
190 	int			idx;
191 
192 	for (idx = (300 / 33) + 1; idx > 0; idx--)
193 		CSR_READ_4(sc, SIS_CSR);
194 }
195 
196 void
197 sis_eeprom_idle(struct sis_softc *sc)
198 {
199 	int			i;
200 
201 	SIO_SET(SIS_EECTL_CSEL);
202 	sis_delay(sc);
203 	SIO_SET(SIS_EECTL_CLK);
204 	sis_delay(sc);
205 
206 	for (i = 0; i < 25; i++) {
207 		SIO_CLR(SIS_EECTL_CLK);
208 		sis_delay(sc);
209 		SIO_SET(SIS_EECTL_CLK);
210 		sis_delay(sc);
211 	}
212 
213 	SIO_CLR(SIS_EECTL_CLK);
214 	sis_delay(sc);
215 	SIO_CLR(SIS_EECTL_CSEL);
216 	sis_delay(sc);
217 	CSR_WRITE_4(sc, SIS_EECTL, 0x00000000);
218 }
219 
220 /*
221  * Send a read command and address to the EEPROM, check for ACK.
222  */
223 void
224 sis_eeprom_putbyte(struct sis_softc *sc, int addr)
225 {
226 	int			d, i;
227 
228 	d = addr | SIS_EECMD_READ;
229 
230 	/*
231 	 * Feed in each bit and strobe the clock.
232 	 */
233 	for (i = 0x400; i; i >>= 1) {
234 		if (d & i)
235 			SIO_SET(SIS_EECTL_DIN);
236 		else
237 			SIO_CLR(SIS_EECTL_DIN);
238 		sis_delay(sc);
239 		SIO_SET(SIS_EECTL_CLK);
240 		sis_delay(sc);
241 		SIO_CLR(SIS_EECTL_CLK);
242 		sis_delay(sc);
243 	}
244 }
245 
246 /*
247  * Read a word of data stored in the EEPROM at address 'addr.'
248  */
249 void
250 sis_eeprom_getword(struct sis_softc *sc, int addr, u_int16_t *dest)
251 {
252 	int			i;
253 	u_int16_t		word = 0;
254 
255 	/* Force EEPROM to idle state. */
256 	sis_eeprom_idle(sc);
257 
258 	/* Enter EEPROM access mode. */
259 	sis_delay(sc);
260 	SIO_CLR(SIS_EECTL_CLK);
261 	sis_delay(sc);
262 	SIO_SET(SIS_EECTL_CSEL);
263 	sis_delay(sc);
264 
265 	/*
266 	 * Send address of word we want to read.
267 	 */
268 	sis_eeprom_putbyte(sc, addr);
269 
270 	/*
271 	 * Start reading bits from EEPROM.
272 	 */
273 	for (i = 0x8000; i; i >>= 1) {
274 		SIO_SET(SIS_EECTL_CLK);
275 		sis_delay(sc);
276 		if (CSR_READ_4(sc, SIS_EECTL) & SIS_EECTL_DOUT)
277 			word |= i;
278 		sis_delay(sc);
279 		SIO_CLR(SIS_EECTL_CLK);
280 		sis_delay(sc);
281 	}
282 
283 	/* Turn off EEPROM access mode. */
284 	sis_eeprom_idle(sc);
285 
286 	*dest = word;
287 }
288 
289 /*
290  * Read a sequence of words from the EEPROM.
291  */
292 void
293 sis_read_eeprom(struct sis_softc *sc, caddr_t dest,
294     int off, int cnt, int swap)
295 {
296 	int			i;
297 	u_int16_t		word = 0, *ptr;
298 
299 	for (i = 0; i < cnt; i++) {
300 		sis_eeprom_getword(sc, off + i, &word);
301 		ptr = (u_int16_t *)(dest + (i * 2));
302 		if (swap)
303 			*ptr = letoh16(word);
304 		else
305 			*ptr = word;
306 	}
307 }
308 
309 #if defined(__amd64__) || defined(__i386__)
310 void
311 sis_read_cmos(struct sis_softc *sc, struct pci_attach_args *pa,
312     caddr_t dest, int off, int cnt)
313 {
314 	u_int32_t reg;
315 	int i;
316 
317 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, 0x48);
318 	pci_conf_write(pa->pa_pc, pa->pa_tag, 0x48, reg | 0x40);
319 
320 	for (i = 0; i < cnt; i++) {
321 		bus_space_write_1(pa->pa_iot, 0x0, 0x70, i + off);
322 		*(dest + i) = bus_space_read_1(pa->pa_iot, 0x0, 0x71);
323 	}
324 
325 	pci_conf_write(pa->pa_pc, pa->pa_tag, 0x48, reg & ~0x40);
326 }
327 #endif
328 
329 void
330 sis_read_mac(struct sis_softc *sc, struct pci_attach_args *pa)
331 {
332 	uint32_t rxfilt, csrsave;
333 	u_int16_t *enaddr = (u_int16_t *) &sc->arpcom.ac_enaddr;
334 
335 	rxfilt = CSR_READ_4(sc, SIS_RXFILT_CTL);
336 	csrsave = CSR_READ_4(sc, SIS_CSR);
337 
338 	CSR_WRITE_4(sc, SIS_CSR, SIS_CSR_RELOAD | csrsave);
339 	CSR_WRITE_4(sc, SIS_CSR, 0);
340 
341 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt & ~SIS_RXFILTCTL_ENABLE);
342 
343 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0);
344 	enaddr[0] = letoh16(CSR_READ_4(sc, SIS_RXFILT_DATA) & 0xffff);
345 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR1);
346 	enaddr[1] = letoh16(CSR_READ_4(sc, SIS_RXFILT_DATA) & 0xffff);
347 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2);
348 	enaddr[2] = letoh16(CSR_READ_4(sc, SIS_RXFILT_DATA) & 0xffff);
349 
350 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt);
351 	CSR_WRITE_4(sc, SIS_CSR, csrsave);
352 }
353 
354 void
355 sis_read96x_mac(struct sis_softc *sc)
356 {
357 	int i;
358 
359 	SIO_SET(SIS96x_EECTL_REQ);
360 
361 	for (i = 0; i < 2000; i++) {
362 		if ((CSR_READ_4(sc, SIS_EECTL) & SIS96x_EECTL_GNT)) {
363 			sis_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
364 			    SIS_EE_NODEADDR, 3, 1);
365 			break;
366 		} else
367 			DELAY(1);
368 	}
369 
370 	SIO_SET(SIS96x_EECTL_DONE);
371 }
372 
373 /*
374  * Sync the PHYs by setting data bit and strobing the clock 32 times.
375  */
376 void
377 sis_mii_sync(struct sis_softc *sc)
378 {
379 	int			i;
380 
381  	SIO_SET(SIS_MII_DIR|SIS_MII_DATA);
382 
383  	for (i = 0; i < 32; i++) {
384  		SIO_SET(SIS_MII_CLK);
385  		DELAY(1);
386  		SIO_CLR(SIS_MII_CLK);
387  		DELAY(1);
388  	}
389 }
390 
391 /*
392  * Clock a series of bits through the MII.
393  */
394 void
395 sis_mii_send(struct sis_softc *sc, u_int32_t bits, int cnt)
396 {
397 	int			i;
398 
399 	SIO_CLR(SIS_MII_CLK);
400 
401 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
402 		if (bits & i)
403 			SIO_SET(SIS_MII_DATA);
404 		else
405 			SIO_CLR(SIS_MII_DATA);
406 		DELAY(1);
407 		SIO_CLR(SIS_MII_CLK);
408 		DELAY(1);
409 		SIO_SET(SIS_MII_CLK);
410 	}
411 }
412 
413 /*
414  * Read an PHY register through the MII.
415  */
416 int
417 sis_mii_readreg(struct sis_softc *sc, struct sis_mii_frame *frame)
418 {
419 	int			i, ack, s;
420 
421 	s = splnet();
422 
423 	/*
424 	 * Set up frame for RX.
425 	 */
426 	frame->mii_stdelim = SIS_MII_STARTDELIM;
427 	frame->mii_opcode = SIS_MII_READOP;
428 	frame->mii_turnaround = 0;
429 	frame->mii_data = 0;
430 
431 	/*
432  	 * Turn on data xmit.
433 	 */
434 	SIO_SET(SIS_MII_DIR);
435 
436 	sis_mii_sync(sc);
437 
438 	/*
439 	 * Send command/address info.
440 	 */
441 	sis_mii_send(sc, frame->mii_stdelim, 2);
442 	sis_mii_send(sc, frame->mii_opcode, 2);
443 	sis_mii_send(sc, frame->mii_phyaddr, 5);
444 	sis_mii_send(sc, frame->mii_regaddr, 5);
445 
446 	/* Idle bit */
447 	SIO_CLR((SIS_MII_CLK|SIS_MII_DATA));
448 	DELAY(1);
449 	SIO_SET(SIS_MII_CLK);
450 	DELAY(1);
451 
452 	/* Turn off xmit. */
453 	SIO_CLR(SIS_MII_DIR);
454 
455 	/* Check for ack */
456 	SIO_CLR(SIS_MII_CLK);
457 	DELAY(1);
458 	ack = CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA;
459 	SIO_SET(SIS_MII_CLK);
460 	DELAY(1);
461 
462 	/*
463 	 * Now try reading data bits. If the ack failed, we still
464 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
465 	 */
466 	if (ack) {
467 		for(i = 0; i < 16; i++) {
468 			SIO_CLR(SIS_MII_CLK);
469 			DELAY(1);
470 			SIO_SET(SIS_MII_CLK);
471 			DELAY(1);
472 		}
473 		goto fail;
474 	}
475 
476 	for (i = 0x8000; i; i >>= 1) {
477 		SIO_CLR(SIS_MII_CLK);
478 		DELAY(1);
479 		if (!ack) {
480 			if (CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA)
481 				frame->mii_data |= i;
482 			DELAY(1);
483 		}
484 		SIO_SET(SIS_MII_CLK);
485 		DELAY(1);
486 	}
487 
488 fail:
489 
490 	SIO_CLR(SIS_MII_CLK);
491 	DELAY(1);
492 	SIO_SET(SIS_MII_CLK);
493 	DELAY(1);
494 
495 	splx(s);
496 
497 	if (ack)
498 		return (1);
499 	return (0);
500 }
501 
502 /*
503  * Write to a PHY register through the MII.
504  */
505 int
506 sis_mii_writereg(struct sis_softc *sc, struct sis_mii_frame *frame)
507 {
508 	int			s;
509 
510 	s = splnet();
511  	/*
512  	 * Set up frame for TX.
513  	 */
514 
515  	frame->mii_stdelim = SIS_MII_STARTDELIM;
516  	frame->mii_opcode = SIS_MII_WRITEOP;
517  	frame->mii_turnaround = SIS_MII_TURNAROUND;
518 
519  	/*
520   	 * Turn on data output.
521  	 */
522  	SIO_SET(SIS_MII_DIR);
523 
524  	sis_mii_sync(sc);
525 
526  	sis_mii_send(sc, frame->mii_stdelim, 2);
527  	sis_mii_send(sc, frame->mii_opcode, 2);
528  	sis_mii_send(sc, frame->mii_phyaddr, 5);
529  	sis_mii_send(sc, frame->mii_regaddr, 5);
530  	sis_mii_send(sc, frame->mii_turnaround, 2);
531  	sis_mii_send(sc, frame->mii_data, 16);
532 
533  	/* Idle bit. */
534  	SIO_SET(SIS_MII_CLK);
535  	DELAY(1);
536  	SIO_CLR(SIS_MII_CLK);
537  	DELAY(1);
538 
539  	/*
540  	 * Turn off xmit.
541  	 */
542  	SIO_CLR(SIS_MII_DIR);
543 
544  	splx(s);
545 
546  	return (0);
547 }
548 
549 int
550 sis_miibus_readreg(struct device *self, int phy, int reg)
551 {
552 	struct sis_softc	*sc = (struct sis_softc *)self;
553 	struct sis_mii_frame    frame;
554 
555 	if (sc->sis_type == SIS_TYPE_83815) {
556 		if (phy != 0)
557 			return (0);
558 		/*
559 		 * The NatSemi chip can take a while after
560 		 * a reset to come ready, during which the BMSR
561 		 * returns a value of 0. This is *never* supposed
562 		 * to happen: some of the BMSR bits are meant to
563 		 * be hardwired in the on position, and this can
564 		 * confuse the miibus code a bit during the probe
565 		 * and attach phase. So we make an effort to check
566 		 * for this condition and wait for it to clear.
567 		 */
568 		if (!CSR_READ_4(sc, NS_BMSR))
569 			DELAY(1000);
570 		return CSR_READ_4(sc, NS_BMCR + (reg * 4));
571 	}
572 
573 	/*
574 	 * Chipsets < SIS_635 seem not to be able to read/write
575 	 * through mdio. Use the enhanced PHY access register
576 	 * again for them.
577 	 */
578 	if (sc->sis_type == SIS_TYPE_900 &&
579 	    sc->sis_rev < SIS_REV_635) {
580 		int i, val = 0;
581 
582 		if (phy != 0)
583 			return (0);
584 
585 		CSR_WRITE_4(sc, SIS_PHYCTL,
586 		    (phy << 11) | (reg << 6) | SIS_PHYOP_READ);
587 		SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS);
588 
589 		for (i = 0; i < SIS_TIMEOUT; i++) {
590 			if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS))
591 				break;
592 		}
593 
594 		if (i == SIS_TIMEOUT) {
595 			printf("%s: PHY failed to come ready\n",
596 			    sc->sc_dev.dv_xname);
597 			return (0);
598 		}
599 
600 		val = (CSR_READ_4(sc, SIS_PHYCTL) >> 16) & 0xFFFF;
601 
602 		if (val == 0xFFFF)
603 			return (0);
604 
605 		return (val);
606 	} else {
607 		bzero(&frame, sizeof(frame));
608 
609 		frame.mii_phyaddr = phy;
610 		frame.mii_regaddr = reg;
611 		sis_mii_readreg(sc, &frame);
612 
613 		return (frame.mii_data);
614 	}
615 }
616 
617 void
618 sis_miibus_writereg(struct device *self, int phy, int reg, int data)
619 {
620 	struct sis_softc	*sc = (struct sis_softc *)self;
621 	struct sis_mii_frame	frame;
622 
623 	if (sc->sis_type == SIS_TYPE_83815) {
624 		if (phy != 0)
625 			return;
626 		CSR_WRITE_4(sc, NS_BMCR + (reg * 4), data);
627 		return;
628 	}
629 
630 	/*
631 	 * Chipsets < SIS_635 seem not to be able to read/write
632 	 * through mdio. Use the enhanced PHY access register
633 	 * again for them.
634 	 */
635 	if (sc->sis_type == SIS_TYPE_900 &&
636 	    sc->sis_rev < SIS_REV_635) {
637 		int i;
638 
639 		if (phy != 0)
640 			return;
641 
642 		CSR_WRITE_4(sc, SIS_PHYCTL, (data << 16) | (phy << 11) |
643 		    (reg << 6) | SIS_PHYOP_WRITE);
644 		SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS);
645 
646 		for (i = 0; i < SIS_TIMEOUT; i++) {
647 			if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS))
648 				break;
649 		}
650 
651 		if (i == SIS_TIMEOUT)
652 			printf("%s: PHY failed to come ready\n",
653 			    sc->sc_dev.dv_xname);
654 	} else {
655 		bzero(&frame, sizeof(frame));
656 
657 		frame.mii_phyaddr = phy;
658 		frame.mii_regaddr = reg;
659 		frame.mii_data = data;
660 		sis_mii_writereg(sc, &frame);
661 	}
662 }
663 
664 void
665 sis_miibus_statchg(struct device *self)
666 {
667 	struct sis_softc	*sc = (struct sis_softc *)self;
668 	struct ifnet		*ifp = &sc->arpcom.ac_if;
669 	struct mii_data		*mii = &sc->sc_mii;
670 
671 	if ((ifp->if_flags & IFF_RUNNING) == 0)
672 		return;
673 
674 	sc->sis_link = 0;
675 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
676 	    (IFM_ACTIVE | IFM_AVALID)) {
677 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
678 		case IFM_10_T:
679 			CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_10);
680 			sc->sis_link++;
681 			break;
682 		case IFM_100_TX:
683 			CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_100);
684 			sc->sis_link++;
685 			break;
686 		default:
687 			break;
688 		}
689 	}
690 
691 	if (!sc->sis_link) {
692 		/*
693 		 * Stopping MACs seem to reset SIS_TX_LISTPTR and
694 		 * SIS_RX_LISTPTR which in turn requires resetting
695 		 * TX/RX buffers.  So just don't do anything for
696 		 * lost link.
697 		 */
698 		return;
699 	}
700 
701 	/* Set full/half duplex mode. */
702 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
703 		SIS_SETBIT(sc, SIS_TX_CFG,
704 		    (SIS_TXCFG_IGN_HBEAT | SIS_TXCFG_IGN_CARR));
705 		SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS);
706 	} else {
707 		SIS_CLRBIT(sc, SIS_TX_CFG,
708 		    (SIS_TXCFG_IGN_HBEAT | SIS_TXCFG_IGN_CARR));
709 		SIS_CLRBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS);
710 	}
711 
712 	if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr >= NS_SRR_16A) {
713 		/*
714 		 * MPII03.D: Half Duplex Excessive Collisions.
715 		 * Also page 49 in 83816 manual
716 		 */
717 		SIS_SETBIT(sc, SIS_TX_CFG, SIS_TXCFG_MPII03D);
718 	}
719 
720 	/*
721 	 * Some DP83815s experience problems when used with short
722 	 * (< 30m/100ft) Ethernet cables in 100baseTX mode.  This
723 	 * sequence adjusts the DSP's signal attenuation to fix the
724 	 * problem.
725 	 */
726 	if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr < NS_SRR_16A &&
727 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
728 		uint32_t reg;
729 
730 		CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001);
731 		reg = CSR_READ_4(sc, NS_PHY_DSPCFG) & 0xfff;
732 		CSR_WRITE_4(sc, NS_PHY_DSPCFG, reg | 0x1000);
733 		DELAY(100);
734 		reg = CSR_READ_4(sc, NS_PHY_TDATA) & 0xff;
735 		if ((reg & 0x0080) == 0 || (reg > 0xd8 && reg <= 0xff)) {
736 #ifdef DEBUG
737 			printf("%s: Applying short cable fix (reg=%x)\n",
738 			    sc->sc_dev.dv_xname, reg);
739 #endif
740 			CSR_WRITE_4(sc, NS_PHY_TDATA, 0x00e8);
741 			SIS_SETBIT(sc, NS_PHY_DSPCFG, 0x20);
742 		}
743 		CSR_WRITE_4(sc, NS_PHY_PAGE, 0);
744 	}
745 	/* Enable TX/RX MACs. */
746 	SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE | SIS_CSR_RX_DISABLE);
747 	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_ENABLE | SIS_CSR_RX_ENABLE);
748 }
749 
750 u_int32_t
751 sis_mchash(struct sis_softc *sc, const uint8_t *addr)
752 {
753 	uint32_t		crc;
754 
755 	/* Compute CRC for the address value. */
756 	crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
757 
758 	/*
759 	 * return the filter bit position
760 	 *
761 	 * The NatSemi chip has a 512-bit filter, which is
762 	 * different than the SiS, so we special-case it.
763 	 */
764 	if (sc->sis_type == SIS_TYPE_83815)
765 		return (crc >> 23);
766 	else if (sc->sis_rev >= SIS_REV_635 ||
767 	    sc->sis_rev == SIS_REV_900B)
768 		return (crc >> 24);
769 	else
770 		return (crc >> 25);
771 }
772 
773 void
774 sis_iff(struct sis_softc *sc)
775 {
776 	if (sc->sis_type == SIS_TYPE_83815)
777 		sis_iff_ns(sc);
778 	else
779 		sis_iff_sis(sc);
780 }
781 
782 void
783 sis_iff_ns(struct sis_softc *sc)
784 {
785 	struct ifnet		*ifp = &sc->arpcom.ac_if;
786 	struct arpcom		*ac = &sc->arpcom;
787 	struct ether_multi	*enm;
788 	struct ether_multistep  step;
789 	u_int32_t		h = 0, i, rxfilt;
790 	int			bit, index;
791 
792 	rxfilt = CSR_READ_4(sc, SIS_RXFILT_CTL);
793 	if (rxfilt & SIS_RXFILTCTL_ENABLE) {
794 		/*
795 		 * Filter should be disabled to program other bits.
796 		 */
797 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt & ~SIS_RXFILTCTL_ENABLE);
798 		CSR_READ_4(sc, SIS_RXFILT_CTL);
799 	}
800 	rxfilt &= ~(SIS_RXFILTCTL_ALLMULTI | SIS_RXFILTCTL_ALLPHYS |
801 	    NS_RXFILTCTL_ARP | SIS_RXFILTCTL_BROAD | NS_RXFILTCTL_MCHASH |
802 	    NS_RXFILTCTL_PERFECT);
803 	ifp->if_flags &= ~IFF_ALLMULTI;
804 
805 	/*
806 	 * Always accept ARP frames.
807 	 * Always accept broadcast frames.
808 	 * Always accept frames destined to our station address.
809 	 */
810 	rxfilt |= NS_RXFILTCTL_ARP | SIS_RXFILTCTL_BROAD |
811 	    NS_RXFILTCTL_PERFECT;
812 
813 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
814 		ifp->if_flags |= IFF_ALLMULTI;
815 		rxfilt |= SIS_RXFILTCTL_ALLMULTI;
816 		if (ifp->if_flags & IFF_PROMISC)
817 			rxfilt |= SIS_RXFILTCTL_ALLPHYS;
818 	} else {
819 		/*
820 		 * We have to explicitly enable the multicast hash table
821 		 * on the NatSemi chip if we want to use it, which we do.
822 		 */
823 		rxfilt |= NS_RXFILTCTL_MCHASH;
824 
825 		/* first, zot all the existing hash bits */
826 		for (i = 0; i < 32; i++) {
827 			CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + (i * 2));
828 			CSR_WRITE_4(sc, SIS_RXFILT_DATA, 0);
829 		}
830 
831 		ETHER_FIRST_MULTI(step, ac, enm);
832 		while (enm != NULL) {
833 			h = sis_mchash(sc, enm->enm_addrlo);
834 
835 			index = h >> 3;
836 			bit = h & 0x1F;
837 
838 			CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + index);
839 
840 			if (bit > 0xF)
841 				bit -= 0x10;
842 
843 			SIS_SETBIT(sc, SIS_RXFILT_DATA, (1 << bit));
844 
845 			ETHER_NEXT_MULTI(step, enm);
846 		}
847 	}
848 
849 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt);
850 	/* Turn the receive filter on. */
851 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt | SIS_RXFILTCTL_ENABLE);
852 	CSR_READ_4(sc, SIS_RXFILT_CTL);
853 }
854 
855 void
856 sis_iff_sis(struct sis_softc *sc)
857 {
858 	struct ifnet		*ifp = &sc->arpcom.ac_if;
859 	struct arpcom		*ac = &sc->arpcom;
860 	struct ether_multi	*enm;
861 	struct ether_multistep	step;
862 	u_int32_t		h, i, maxmulti, rxfilt;
863 	u_int16_t		hashes[16];
864 
865 	/* hash table size */
866 	if (sc->sis_rev >= SIS_REV_635 ||
867 	    sc->sis_rev == SIS_REV_900B)
868 		maxmulti = 16;
869 	else
870 		maxmulti = 8;
871 
872 	rxfilt = CSR_READ_4(sc, SIS_RXFILT_CTL);
873 	if (rxfilt & SIS_RXFILTCTL_ENABLE) {
874 		/*
875 		 * Filter should be disabled to program other bits.
876 		 */
877 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt & ~SIS_RXFILTCTL_ENABLE);
878 		CSR_READ_4(sc, SIS_RXFILT_CTL);
879 	}
880 	rxfilt &= ~(SIS_RXFILTCTL_ALLMULTI | SIS_RXFILTCTL_ALLPHYS |
881 	    SIS_RXFILTCTL_BROAD);
882 	ifp->if_flags &= ~IFF_ALLMULTI;
883 
884 	/*
885 	 * Always accept broadcast frames.
886 	 */
887 	rxfilt |= SIS_RXFILTCTL_BROAD;
888 
889 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
890 	    ac->ac_multicnt > maxmulti) {
891 		ifp->if_flags |= IFF_ALLMULTI;
892 		rxfilt |= SIS_RXFILTCTL_ALLMULTI;
893 		if (ifp->if_flags & IFF_PROMISC)
894 			rxfilt |= SIS_RXFILTCTL_ALLPHYS;
895 
896 		for (i = 0; i < maxmulti; i++)
897 			hashes[i] = ~0;
898 	} else {
899 		for (i = 0; i < maxmulti; i++)
900 			hashes[i] = 0;
901 
902 		ETHER_FIRST_MULTI(step, ac, enm);
903 		while (enm != NULL) {
904 			h = sis_mchash(sc, enm->enm_addrlo);
905 
906 			hashes[h >> 4] |= 1 << (h & 0xf);
907 
908 			ETHER_NEXT_MULTI(step, enm);
909 		}
910 	}
911 
912 	for (i = 0; i < maxmulti; i++) {
913 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, (4 + i) << 16);
914 		CSR_WRITE_4(sc, SIS_RXFILT_DATA, hashes[i]);
915 	}
916 
917 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt);
918 	/* Turn the receive filter on. */
919 	CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt | SIS_RXFILTCTL_ENABLE);
920 	CSR_READ_4(sc, SIS_RXFILT_CTL);
921 }
922 
923 void
924 sis_reset(struct sis_softc *sc)
925 {
926 	int			i;
927 
928 	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RESET);
929 
930 	for (i = 0; i < SIS_TIMEOUT; i++) {
931 		if (!(CSR_READ_4(sc, SIS_CSR) & SIS_CSR_RESET))
932 			break;
933 	}
934 
935 	if (i == SIS_TIMEOUT)
936 		printf("%s: reset never completed\n", sc->sc_dev.dv_xname);
937 
938 	/* Wait a little while for the chip to get its brains in order. */
939 	DELAY(1000);
940 
941 	/*
942 	 * If this is a NetSemi chip, make sure to clear
943 	 * PME mode.
944 	 */
945 	if (sc->sis_type == SIS_TYPE_83815) {
946 		CSR_WRITE_4(sc, NS_CLKRUN, NS_CLKRUN_PMESTS);
947 		CSR_WRITE_4(sc, NS_CLKRUN, 0);
948 	}
949 }
950 
951 /*
952  * Probe for an SiS chip. Check the PCI vendor and device
953  * IDs against our list and return a device name if we find a match.
954  */
955 int
956 sis_probe(struct device *parent, void *match, void *aux)
957 {
958 	return (pci_matchbyid((struct pci_attach_args *)aux, sis_devices,
959 	    nitems(sis_devices)));
960 }
961 
962 /*
963  * Attach the interface. Allocate softc structures, do ifmedia
964  * setup and ethernet/BPF attach.
965  */
966 void
967 sis_attach(struct device *parent, struct device *self, void *aux)
968 {
969 	int			i;
970 	const char		*intrstr = NULL;
971 	struct sis_softc	*sc = (struct sis_softc *)self;
972 	struct pci_attach_args	*pa = aux;
973 	pci_chipset_tag_t	pc = pa->pa_pc;
974 	pci_intr_handle_t	ih;
975 	struct ifnet		*ifp;
976 	bus_size_t		size;
977 
978 	sc->sis_stopped = 1;
979 
980 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
981 
982 	/*
983 	 * Map control/status registers.
984 	 */
985 
986 #ifdef SIS_USEIOSPACE
987 	if (pci_mapreg_map(pa, SIS_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0,
988 	    &sc->sis_btag, &sc->sis_bhandle, NULL, &size, 0)) {
989 		printf(": can't map i/o space\n");
990 		return;
991  	}
992 #else
993 	if (pci_mapreg_map(pa, SIS_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
994 	    &sc->sis_btag, &sc->sis_bhandle, NULL, &size, 0)) {
995  		printf(": can't map mem space\n");
996 		return;
997  	}
998 #endif
999 
1000 	/* Allocate interrupt */
1001 	if (pci_intr_map(pa, &ih)) {
1002 		printf(": couldn't map interrupt\n");
1003 		goto fail_1;
1004 	}
1005 	intrstr = pci_intr_string(pc, ih);
1006 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, sis_intr, sc,
1007 	    self->dv_xname);
1008 	if (sc->sc_ih == NULL) {
1009 		printf(": couldn't establish interrupt");
1010 		if (intrstr != NULL)
1011 			printf(" at %s", intrstr);
1012 		printf("\n");
1013 		goto fail_1;
1014 	}
1015 
1016 	switch (PCI_PRODUCT(pa->pa_id)) {
1017 	case PCI_PRODUCT_SIS_900:
1018 		sc->sis_type = SIS_TYPE_900;
1019 		break;
1020 	case PCI_PRODUCT_SIS_7016:
1021 		sc->sis_type = SIS_TYPE_7016;
1022 		break;
1023 	case PCI_PRODUCT_NS_DP83815:
1024 		sc->sis_type = SIS_TYPE_83815;
1025 		break;
1026 	default:
1027 		break;
1028 	}
1029 	sc->sis_rev = PCI_REVISION(pa->pa_class);
1030 
1031 	/* Reset the adapter. */
1032 	sis_reset(sc);
1033 
1034 	if (sc->sis_type == SIS_TYPE_900 &&
1035 	   (sc->sis_rev == SIS_REV_635 ||
1036 	    sc->sis_rev == SIS_REV_900B)) {
1037 		SIO_SET(SIS_CFG_RND_CNT);
1038 		SIO_SET(SIS_CFG_PERR_DETECT);
1039 	}
1040 
1041 	/*
1042 	 * Get station address from the EEPROM.
1043 	 */
1044 	switch (PCI_VENDOR(pa->pa_id)) {
1045 	case PCI_VENDOR_NS:
1046 		sc->sis_srr = CSR_READ_4(sc, NS_SRR);
1047 
1048 		if (sc->sis_srr == NS_SRR_15C)
1049 			printf(", DP83815C");
1050 		else if (sc->sis_srr == NS_SRR_15D)
1051 			printf(", DP83815D");
1052 		else if (sc->sis_srr == NS_SRR_16A)
1053 			printf(", DP83816A");
1054 		else
1055 			printf(", srr %x", sc->sis_srr);
1056 
1057 		/*
1058 		 * Reading the MAC address out of the EEPROM on
1059 		 * the NatSemi chip takes a bit more work than
1060 		 * you'd expect. The address spans 4 16-bit words,
1061 		 * with the first word containing only a single bit.
1062 		 * You have to shift everything over one bit to
1063 		 * get it aligned properly. Also, the bits are
1064 		 * stored backwards (the LSB is really the MSB,
1065 		 * and so on) so you have to reverse them in order
1066 		 * to get the MAC address into the form we want.
1067 		 * Why? Who the hell knows.
1068 		 */
1069 		{
1070 			u_int16_t		tmp[4];
1071 
1072 			sis_read_eeprom(sc, (caddr_t)&tmp, NS_EE_NODEADDR,
1073 			    4, 0);
1074 
1075 			/* Shift everything over one bit. */
1076 			tmp[3] = tmp[3] >> 1;
1077 			tmp[3] |= tmp[2] << 15;
1078 			tmp[2] = tmp[2] >> 1;
1079 			tmp[2] |= tmp[1] << 15;
1080 			tmp[1] = tmp[1] >> 1;
1081 			tmp[1] |= tmp[0] << 15;
1082 
1083 			/* Now reverse all the bits. */
1084 			tmp[3] = letoh16(sis_reverse(tmp[3]));
1085 			tmp[2] = letoh16(sis_reverse(tmp[2]));
1086 			tmp[1] = letoh16(sis_reverse(tmp[1]));
1087 
1088 			bcopy(&tmp[1], sc->arpcom.ac_enaddr,
1089 			    ETHER_ADDR_LEN);
1090 		}
1091 		break;
1092 	case PCI_VENDOR_SIS:
1093 	default:
1094 #if defined(__amd64__) || defined(__i386__)
1095 		/*
1096 		 * If this is a SiS 630E chipset with an embedded
1097 		 * SiS 900 controller, we have to read the MAC address
1098 		 * from the APC CMOS RAM. Our method for doing this
1099 		 * is very ugly since we have to reach out and grab
1100 		 * ahold of hardware for which we cannot properly
1101 		 * allocate resources. This code is only compiled on
1102 		 * the i386 architecture since the SiS 630E chipset
1103 		 * is for x86 motherboards only. Note that there are
1104 		 * a lot of magic numbers in this hack. These are
1105 		 * taken from SiS's Linux driver. I'd like to replace
1106 		 * them with proper symbolic definitions, but that
1107 		 * requires some datasheets that I don't have access
1108 		 * to at the moment.
1109 		 */
1110 		if (sc->sis_rev == SIS_REV_630S ||
1111 		    sc->sis_rev == SIS_REV_630E)
1112 			sis_read_cmos(sc, pa, (caddr_t)&sc->arpcom.ac_enaddr,
1113 			    0x9, 6);
1114 		else
1115 #endif
1116 		if (sc->sis_rev == SIS_REV_96x)
1117 			sis_read96x_mac(sc);
1118 		else if (sc->sis_rev == SIS_REV_635 ||
1119 		    sc->sis_rev == SIS_REV_630ET ||
1120 		    sc->sis_rev == SIS_REV_630EA1)
1121 			sis_read_mac(sc, pa);
1122 		else
1123 			sis_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
1124 			    SIS_EE_NODEADDR, 3, 1);
1125 		break;
1126 	}
1127 
1128 	printf(": %s, address %s\n", intrstr,
1129 	    ether_sprintf(sc->arpcom.ac_enaddr));
1130 
1131 	sc->sc_dmat = pa->pa_dmat;
1132 
1133 	if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sis_list_data),
1134 	    PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg,
1135 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) {
1136 		printf(": can't alloc list mem\n");
1137 		goto fail_2;
1138 	}
1139 	if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg,
1140 	    sizeof(struct sis_list_data), &sc->sc_listkva,
1141 	    BUS_DMA_NOWAIT) != 0) {
1142 		printf(": can't map list mem\n");
1143 		goto fail_2;
1144 	}
1145 	if (bus_dmamap_create(sc->sc_dmat, sizeof(struct sis_list_data), 1,
1146 	    sizeof(struct sis_list_data), 0, BUS_DMA_NOWAIT,
1147 	    &sc->sc_listmap) != 0) {
1148 		printf(": can't alloc list map\n");
1149 		goto fail_2;
1150 	}
1151 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva,
1152 	    sizeof(struct sis_list_data), NULL, BUS_DMA_NOWAIT) != 0) {
1153 		printf(": can't load list map\n");
1154 		goto fail_2;
1155 	}
1156 	sc->sis_ldata = (struct sis_list_data *)sc->sc_listkva;
1157 
1158 	for (i = 0; i < SIS_RX_LIST_CNT; i++) {
1159 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
1160 		    BUS_DMA_NOWAIT, &sc->sis_ldata->sis_rx_list[i].map) != 0) {
1161 			printf(": can't create rx map\n");
1162 			goto fail_2;
1163 		}
1164 	}
1165 
1166 	for (i = 0; i < SIS_TX_LIST_CNT; i++) {
1167 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
1168 		    SIS_MAXTXSEGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
1169 		    &sc->sis_ldata->sis_tx_list[i].map) != 0) {
1170 			printf(": can't create tx map\n");
1171 			goto fail_2;
1172 		}
1173 	}
1174 	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, SIS_MAXTXSEGS,
1175 	    MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) {
1176 		printf(": can't create tx spare map\n");
1177 		goto fail_2;
1178 	}
1179 
1180 	timeout_set(&sc->sis_timeout, sis_tick, sc);
1181 
1182 	ifp = &sc->arpcom.ac_if;
1183 	ifp->if_softc = sc;
1184 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1185 	ifp->if_ioctl = sis_ioctl;
1186 	ifp->if_start = sis_start;
1187 	ifp->if_watchdog = sis_watchdog;
1188 	ifq_set_maxlen(&ifp->if_snd, SIS_TX_LIST_CNT - 1);
1189 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1190 	ifp->if_hardmtu = 1518; /* determined experimentally on DP83815 */
1191 
1192 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1193 
1194 	sc->sc_mii.mii_ifp = ifp;
1195 	sc->sc_mii.mii_readreg = sis_miibus_readreg;
1196 	sc->sc_mii.mii_writereg = sis_miibus_writereg;
1197 	sc->sc_mii.mii_statchg = sis_miibus_statchg;
1198 	ifmedia_init(&sc->sc_mii.mii_media, 0, sis_ifmedia_upd,sis_ifmedia_sts);
1199 	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
1200 	    0);
1201 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
1202 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
1203 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
1204 	} else
1205 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1206 
1207 	/*
1208 	 * Call MI attach routines.
1209 	 */
1210 	if_attach(ifp);
1211 	ether_ifattach(ifp);
1212 	return;
1213 
1214 fail_2:
1215 	pci_intr_disestablish(pc, sc->sc_ih);
1216 
1217 fail_1:
1218 	bus_space_unmap(sc->sis_btag, sc->sis_bhandle, size);
1219 }
1220 
1221 int
1222 sis_activate(struct device *self, int act)
1223 {
1224 	struct sis_softc *sc = (struct sis_softc *)self;
1225 	struct ifnet *ifp = &sc->arpcom.ac_if;
1226 	int rv = 0;
1227 
1228 	switch (act) {
1229 	case DVACT_SUSPEND:
1230 		if (ifp->if_flags & IFF_RUNNING)
1231 			sis_stop(sc);
1232 		rv = config_activate_children(self, act);
1233 		break;
1234 	case DVACT_RESUME:
1235 		if (ifp->if_flags & IFF_UP)
1236 			sis_init(sc);
1237 		break;
1238 	default:
1239 		rv = config_activate_children(self, act);
1240 		break;
1241 	}
1242 	return (rv);
1243 }
1244 
1245 /*
1246  * Initialize the TX and RX descriptors and allocate mbufs for them. Note that
1247  * we arrange the descriptors in a closed ring, so that the last descriptor
1248  * points back to the first.
1249  */
1250 int
1251 sis_ring_init(struct sis_softc *sc)
1252 {
1253 	struct sis_list_data	*ld;
1254 	struct sis_ring_data	*cd;
1255 	int			i, nexti;
1256 
1257 	cd = &sc->sis_cdata;
1258 	ld = sc->sis_ldata;
1259 
1260 	for (i = 0; i < SIS_TX_LIST_CNT; i++) {
1261 		if (i == (SIS_TX_LIST_CNT - 1))
1262 			nexti = 0;
1263 		else
1264 			nexti = i + 1;
1265 		ld->sis_tx_list[i].sis_nextdesc = &ld->sis_tx_list[nexti];
1266 		ld->sis_tx_list[i].sis_next =
1267 		    htole32(sc->sc_listmap->dm_segs[0].ds_addr +
1268 		      offsetof(struct sis_list_data, sis_tx_list[nexti]));
1269 		ld->sis_tx_list[i].sis_mbuf = NULL;
1270 		ld->sis_tx_list[i].sis_ptr = 0;
1271 		ld->sis_tx_list[i].sis_ctl = 0;
1272 	}
1273 
1274 	cd->sis_tx_prod = cd->sis_tx_cons = cd->sis_tx_cnt = 0;
1275 
1276 	for (i = 0; i < SIS_RX_LIST_CNT; i++) {
1277 		if (i == SIS_RX_LIST_CNT - 1)
1278 			nexti = 0;
1279 		else
1280 			nexti = i + 1;
1281 		ld->sis_rx_list[i].sis_nextdesc = &ld->sis_rx_list[nexti];
1282 		ld->sis_rx_list[i].sis_next =
1283 		    htole32(sc->sc_listmap->dm_segs[0].ds_addr +
1284 		      offsetof(struct sis_list_data, sis_rx_list[nexti]));
1285 		ld->sis_rx_list[i].sis_ctl = 0;
1286 	}
1287 
1288 	cd->sis_rx_prod = cd->sis_rx_cons = 0;
1289 	if_rxr_init(&cd->sis_rx_ring, 2, SIS_RX_LIST_CNT - 1);
1290 	sis_fill_rx_ring(sc);
1291 
1292 	return (0);
1293 }
1294 
1295 void
1296 sis_fill_rx_ring(struct sis_softc *sc)
1297 {
1298 	struct sis_list_data    *ld;
1299 	struct sis_ring_data    *cd;
1300 	u_int			slots;
1301 
1302 	cd = &sc->sis_cdata;
1303 	ld = sc->sis_ldata;
1304 
1305 	for (slots = if_rxr_get(&cd->sis_rx_ring, SIS_RX_LIST_CNT);
1306 	    slots > 0; slots--) {
1307 		if (sis_newbuf(sc, &ld->sis_rx_list[cd->sis_rx_prod]))
1308 			break;
1309 
1310 		SIS_INC(cd->sis_rx_prod, SIS_RX_LIST_CNT);
1311 	}
1312 	if_rxr_put(&cd->sis_rx_ring, slots);
1313 }
1314 
1315 /*
1316  * Initialize an RX descriptor and attach an MBUF cluster.
1317  */
1318 int
1319 sis_newbuf(struct sis_softc *sc, struct sis_desc *c)
1320 {
1321 	struct mbuf		*m_new = NULL;
1322 
1323 	if (c == NULL)
1324 		return (EINVAL);
1325 
1326 	m_new = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
1327 	if (!m_new)
1328 		return (ENOBUFS);
1329 
1330 	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1331 
1332 	if (bus_dmamap_load_mbuf(sc->sc_dmat, c->map, m_new,
1333 	    BUS_DMA_NOWAIT)) {
1334 		m_free(m_new);
1335 		return (ENOBUFS);
1336 	}
1337 
1338 	bus_dmamap_sync(sc->sc_dmat, c->map, 0, c->map->dm_mapsize,
1339 	    BUS_DMASYNC_PREREAD);
1340 
1341 	c->sis_mbuf = m_new;
1342 	c->sis_ptr = htole32(c->map->dm_segs[0].ds_addr);
1343 
1344 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1345 	    ((caddr_t)c - sc->sc_listkva), sizeof(struct sis_desc),
1346 	    BUS_DMASYNC_PREWRITE);
1347 
1348 	c->sis_ctl = htole32(ETHER_MAX_DIX_LEN);
1349 
1350 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1351 	    ((caddr_t)c - sc->sc_listkva), sizeof(struct sis_desc),
1352 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1353 
1354 	return (0);
1355 }
1356 
1357 /*
1358  * A frame has been uploaded: pass the resulting mbuf chain up to
1359  * the higher level protocols.
1360  */
1361 void
1362 sis_rxeof(struct sis_softc *sc)
1363 {
1364 	struct mbuf_list	ml = MBUF_LIST_INITIALIZER();
1365 	struct mbuf		*m;
1366 	struct ifnet		*ifp;
1367 	struct sis_desc		*cur_rx;
1368 	int			total_len = 0;
1369 	u_int32_t		rxstat;
1370 
1371 	ifp = &sc->arpcom.ac_if;
1372 
1373 	while (if_rxr_inuse(&sc->sis_cdata.sis_rx_ring) > 0) {
1374 		cur_rx = &sc->sis_ldata->sis_rx_list[sc->sis_cdata.sis_rx_cons];
1375 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1376 		    ((caddr_t)cur_rx - sc->sc_listkva),
1377 		    sizeof(struct sis_desc),
1378 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1379 		if (!SIS_OWNDESC(cur_rx))
1380 			break;
1381 
1382 		rxstat = letoh32(cur_rx->sis_rxstat);
1383 		m = cur_rx->sis_mbuf;
1384 		cur_rx->sis_mbuf = NULL;
1385 		total_len = SIS_RXBYTES(cur_rx);
1386 		/* from here on the buffer is consumed */
1387 		SIS_INC(sc->sis_cdata.sis_rx_cons, SIS_RX_LIST_CNT);
1388 		if_rxr_put(&sc->sis_cdata.sis_rx_ring, 1);
1389 
1390 		/*
1391 		 * DP83816A sometimes produces zero-length packets
1392 		 * shortly after initialisation.
1393 		 */
1394 		if (total_len == 0) {
1395 			m_freem(m);
1396 			continue;
1397 		}
1398 
1399 		/* The ethernet CRC is always included */
1400 		total_len -= ETHER_CRC_LEN;
1401 
1402 		/*
1403 		 * If an error occurs, update stats, clear the
1404 		 * status word and leave the mbuf cluster in place:
1405 		 * it should simply get re-used next time this descriptor
1406 	 	 * comes up in the ring. However, don't report long
1407 		 * frames as errors since they could be VLANs.
1408 		 */
1409 		if (rxstat & SIS_RXSTAT_GIANT &&
1410 		    total_len <= (ETHER_MAX_DIX_LEN - ETHER_CRC_LEN))
1411 			rxstat &= ~SIS_RXSTAT_GIANT;
1412 		if (SIS_RXSTAT_ERROR(rxstat)) {
1413 			ifp->if_ierrors++;
1414 			if (rxstat & SIS_RXSTAT_COLL)
1415 				ifp->if_collisions++;
1416 			m_freem(m);
1417 			continue;
1418 		}
1419 
1420 		/* No errors; receive the packet. */
1421 		bus_dmamap_sync(sc->sc_dmat, cur_rx->map, 0,
1422 		    cur_rx->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1423 #ifdef __STRICT_ALIGNMENT
1424 		/*
1425 		 * On some architectures, we do not have alignment problems,
1426 		 * so try to allocate a new buffer for the receive ring, and
1427 		 * pass up the one where the packet is already, saving the
1428 		 * expensive copy done in m_devget().
1429 		 * If we are on an architecture with alignment problems, or
1430 		 * if the allocation fails, then use m_devget and leave the
1431 		 * existing buffer in the receive ring.
1432 		 */
1433 		{
1434 			struct mbuf *m0;
1435 			m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN);
1436 			m_freem(m);
1437 			if (m0 == NULL) {
1438 				ifp->if_ierrors++;
1439 				continue;
1440 			}
1441 			m = m0;
1442 		}
1443 #else
1444 		m->m_pkthdr.len = m->m_len = total_len;
1445 #endif
1446 
1447 		ml_enqueue(&ml, m);
1448 	}
1449 
1450 	if (ifiq_input(&ifp->if_rcv, &ml))
1451 		if_rxr_livelocked(&sc->sis_cdata.sis_rx_ring);
1452 
1453 	sis_fill_rx_ring(sc);
1454 }
1455 
1456 /*
1457  * A frame was downloaded to the chip. It's safe for us to clean up
1458  * the list buffers.
1459  */
1460 
1461 void
1462 sis_txeof(struct sis_softc *sc)
1463 {
1464 	struct ifnet		*ifp;
1465 	u_int32_t		idx, ctl, txstat;
1466 
1467 	ifp = &sc->arpcom.ac_if;
1468 
1469 	/*
1470 	 * Go through our tx list and free mbufs for those
1471 	 * frames that have been transmitted.
1472 	 */
1473 	for (idx = sc->sis_cdata.sis_tx_cons; sc->sis_cdata.sis_tx_cnt > 0;
1474 	    sc->sis_cdata.sis_tx_cnt--, SIS_INC(idx, SIS_TX_LIST_CNT)) {
1475 		struct sis_desc *cur_tx = &sc->sis_ldata->sis_tx_list[idx];
1476 
1477 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1478 		    ((caddr_t)cur_tx - sc->sc_listkva),
1479 		    sizeof(struct sis_desc),
1480 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1481 
1482 		if (SIS_OWNDESC(cur_tx))
1483 			break;
1484 
1485 		ctl = letoh32(cur_tx->sis_ctl);
1486 
1487 		if (ctl & SIS_CMDSTS_MORE)
1488 			continue;
1489 
1490 		txstat = letoh32(cur_tx->sis_txstat);
1491 
1492 		if (!(ctl & SIS_CMDSTS_PKT_OK)) {
1493 			ifp->if_oerrors++;
1494 			if (txstat & SIS_TXSTAT_EXCESSCOLLS)
1495 				ifp->if_collisions++;
1496 			if (txstat & SIS_TXSTAT_OUTOFWINCOLL)
1497 				ifp->if_collisions++;
1498 		}
1499 
1500 		ifp->if_collisions += (txstat & SIS_TXSTAT_COLLCNT) >> 16;
1501 
1502 		if (cur_tx->map->dm_nsegs != 0) {
1503 			bus_dmamap_t map = cur_tx->map;
1504 
1505 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1506 			    BUS_DMASYNC_POSTWRITE);
1507 			bus_dmamap_unload(sc->sc_dmat, map);
1508 		}
1509 		if (cur_tx->sis_mbuf != NULL) {
1510 			m_freem(cur_tx->sis_mbuf);
1511 			cur_tx->sis_mbuf = NULL;
1512 		}
1513 	}
1514 
1515 	if (idx != sc->sis_cdata.sis_tx_cons) {
1516 		/* we freed up some buffers */
1517 		sc->sis_cdata.sis_tx_cons = idx;
1518 		ifq_clr_oactive(&ifp->if_snd);
1519 	}
1520 
1521 	ifp->if_timer = (sc->sis_cdata.sis_tx_cnt == 0) ? 0 : 5;
1522 }
1523 
1524 void
1525 sis_tick(void *xsc)
1526 {
1527 	struct sis_softc	*sc = (struct sis_softc *)xsc;
1528 	struct mii_data		*mii;
1529 	int			s;
1530 
1531 	s = splnet();
1532 
1533 	mii = &sc->sc_mii;
1534 	mii_tick(mii);
1535 
1536 	if (!sc->sis_link)
1537 		sis_miibus_statchg(&sc->sc_dev);
1538 
1539 	timeout_add_sec(&sc->sis_timeout, 1);
1540 
1541 	splx(s);
1542 }
1543 
1544 int
1545 sis_intr(void *arg)
1546 {
1547 	struct sis_softc	*sc = arg;
1548 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1549 	u_int32_t		status;
1550 
1551 	if (sc->sis_stopped)	/* Most likely shared interrupt */
1552 		return (0);
1553 
1554 	/* Reading the ISR register clears all interrupts. */
1555 	status = CSR_READ_4(sc, SIS_ISR);
1556 	if ((status & SIS_INTRS) == 0)
1557 		return (0);
1558 
1559 	if (status &
1560 	    (SIS_ISR_TX_DESC_OK | SIS_ISR_TX_ERR |
1561 	     SIS_ISR_TX_OK | SIS_ISR_TX_IDLE))
1562 		sis_txeof(sc);
1563 
1564 	if (status &
1565 	    (SIS_ISR_RX_DESC_OK | SIS_ISR_RX_OK |
1566 	     SIS_ISR_RX_ERR | SIS_ISR_RX_IDLE))
1567 		sis_rxeof(sc);
1568 
1569 	if (status & (SIS_ISR_RX_IDLE)) {
1570 		/* consume what's there so that sis_rx_cons points
1571 		 * to the first HW owned descriptor. */
1572 		sis_rxeof(sc);
1573 		/* reprogram the RX listptr */
1574 		CSR_WRITE_4(sc, SIS_RX_LISTPTR,
1575 		    sc->sc_listmap->dm_segs[0].ds_addr +
1576 		    offsetof(struct sis_list_data,
1577 		    sis_rx_list[sc->sis_cdata.sis_rx_cons]));
1578 	}
1579 
1580 	if (status & SIS_ISR_SYSERR)
1581 		sis_init(sc);
1582 
1583 	/*
1584 	 * XXX: Re-enable RX engine every time otherwise it occasionally
1585 	 * stops under unknown circumstances.
1586 	 */
1587 	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE);
1588 
1589 	if (!ifq_empty(&ifp->if_snd))
1590 		sis_start(ifp);
1591 
1592 	return (1);
1593 }
1594 
1595 /*
1596  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1597  * pointers to the fragment pointers.
1598  */
1599 int
1600 sis_encap(struct sis_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
1601 {
1602 	struct sis_desc		*f = NULL;
1603 	bus_dmamap_t		map;
1604 	int			frag, cur, i, error;
1605 
1606 	map = sc->sc_tx_sparemap;
1607 
1608 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m_head,
1609 	    BUS_DMA_NOWAIT);
1610 	switch (error) {
1611 	case 0:
1612 		break;
1613 
1614 	case EFBIG:
1615 		if (m_defrag(m_head, M_DONTWAIT) == 0 &&
1616 		    bus_dmamap_load_mbuf(sc->sc_dmat, map, m_head,
1617 		    BUS_DMA_NOWAIT) == 0)
1618 			break;
1619 
1620 		/* FALLTHROUGH */
1621 	default:
1622 		return (ENOBUFS);
1623 	}
1624 
1625 	if ((SIS_TX_LIST_CNT - (sc->sis_cdata.sis_tx_cnt + map->dm_nsegs)) < 2) {
1626 		bus_dmamap_unload(sc->sc_dmat, map);
1627 		return (ENOBUFS);
1628 	}
1629 
1630 	/*
1631  	 * Start packing the mbufs in this chain into
1632 	 * the fragment pointers. Stop when we run out
1633  	 * of fragments or hit the end of the mbuf chain.
1634 	 */
1635 	cur = frag = *txidx;
1636 
1637 	for (i = 0; i < map->dm_nsegs; i++) {
1638 		f = &sc->sis_ldata->sis_tx_list[frag];
1639 		f->sis_ctl = htole32(SIS_CMDSTS_MORE | map->dm_segs[i].ds_len);
1640 		f->sis_ptr = htole32(map->dm_segs[i].ds_addr);
1641 		if (i != 0)
1642 			f->sis_ctl |= htole32(SIS_CMDSTS_OWN);
1643 		cur = frag;
1644 		SIS_INC(frag, SIS_TX_LIST_CNT);
1645 	}
1646 
1647 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1648 	    BUS_DMASYNC_PREWRITE);
1649 
1650 	sc->sis_ldata->sis_tx_list[cur].sis_mbuf = m_head;
1651 	sc->sis_ldata->sis_tx_list[cur].sis_ctl &= ~htole32(SIS_CMDSTS_MORE);
1652 	sc->sis_ldata->sis_tx_list[*txidx].sis_ctl |= htole32(SIS_CMDSTS_OWN);
1653 	sc->sis_cdata.sis_tx_cnt += map->dm_nsegs;
1654 	*txidx = frag;
1655 
1656 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1657 	    offsetof(struct sis_list_data, sis_tx_list[0]),
1658 	    sizeof(struct sis_desc) * SIS_TX_LIST_CNT,
1659 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1660 
1661 	return (0);
1662 }
1663 
1664 /*
1665  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1666  * to the mbuf data regions directly in the transmit lists. We also save a
1667  * copy of the pointers since the transmit list fragment pointers are
1668  * physical addresses.
1669  */
1670 
1671 void
1672 sis_start(struct ifnet *ifp)
1673 {
1674 	struct sis_softc	*sc;
1675 	struct mbuf		*m_head = NULL;
1676 	u_int32_t		idx, queued = 0;
1677 
1678 	sc = ifp->if_softc;
1679 
1680 	if (!sc->sis_link)
1681 		return;
1682 
1683 	idx = sc->sis_cdata.sis_tx_prod;
1684 
1685 	if (ifq_is_oactive(&ifp->if_snd))
1686 		return;
1687 
1688 	while(sc->sis_ldata->sis_tx_list[idx].sis_mbuf == NULL) {
1689 		m_head = ifq_deq_begin(&ifp->if_snd);
1690 		if (m_head == NULL)
1691 			break;
1692 
1693 		if (sis_encap(sc, m_head, &idx)) {
1694 			ifq_deq_rollback(&ifp->if_snd, m_head);
1695 			ifq_set_oactive(&ifp->if_snd);
1696 			break;
1697 		}
1698 
1699 		/* now we are committed to transmit the packet */
1700 		ifq_deq_commit(&ifp->if_snd, m_head);
1701 
1702 		queued++;
1703 
1704 		/*
1705 		 * If there's a BPF listener, bounce a copy of this frame
1706 		 * to him.
1707 		 */
1708 #if NBPFILTER > 0
1709 		if (ifp->if_bpf)
1710 			bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
1711 #endif
1712 	}
1713 
1714 	if (queued) {
1715 		/* Transmit */
1716 		sc->sis_cdata.sis_tx_prod = idx;
1717 		SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_ENABLE);
1718 
1719 		/*
1720 		 * Set a timeout in case the chip goes out to lunch.
1721 		 */
1722 		ifp->if_timer = 5;
1723 	}
1724 }
1725 
1726 void
1727 sis_init(void *xsc)
1728 {
1729 	struct sis_softc	*sc = (struct sis_softc *)xsc;
1730 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1731 	struct mii_data		*mii;
1732 	int			s;
1733 
1734 	s = splnet();
1735 
1736 	/*
1737 	 * Cancel pending I/O and free all RX/TX buffers.
1738 	 */
1739 	sis_stop(sc);
1740 
1741 	/*
1742 	 * Reset the chip to a known state.
1743 	 */
1744 	sis_reset(sc);
1745 
1746 #if NS_IHR_DELAY > 0
1747 	/* Configure interrupt holdoff register. */
1748 	if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr == NS_SRR_16A)
1749 		CSR_WRITE_4(sc, NS_IHR, NS_IHR_VALUE);
1750 #endif
1751 
1752 	mii = &sc->sc_mii;
1753 
1754 	/* Set MAC address */
1755 	if (sc->sis_type == SIS_TYPE_83815) {
1756 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR0);
1757 		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1758 		    htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[0]));
1759 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR1);
1760 		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1761 		    htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[1]));
1762 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR2);
1763 		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1764 		    htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[2]));
1765 	} else {
1766 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0);
1767 		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1768 		    htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[0]));
1769 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR1);
1770 		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1771 		    htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[1]));
1772 		CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2);
1773 		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1774 		    htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[2]));
1775 	}
1776 
1777 	/* Init circular TX/RX lists. */
1778 	if (sis_ring_init(sc) != 0) {
1779 		printf("%s: initialization failed: no memory for rx buffers\n",
1780 		    sc->sc_dev.dv_xname);
1781 		sis_stop(sc);
1782 		splx(s);
1783 		return;
1784 	}
1785 
1786         /*
1787 	 * Page 78 of the DP83815 data sheet (september 2002 version)
1788 	 * recommends the following register settings "for optimum
1789 	 * performance." for rev 15C.  The driver from NS also sets
1790 	 * the PHY_CR register for later versions.
1791 	 *
1792 	 * This resolves an issue with tons of errors in AcceptPerfectMatch
1793 	 * (non-IFF_PROMISC) mode.
1794 	 */
1795 	 if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr <= NS_SRR_15D) {
1796 		CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001);
1797 		CSR_WRITE_4(sc, NS_PHY_CR, 0x189C);
1798 		/* set val for c2 */
1799 		CSR_WRITE_4(sc, NS_PHY_TDATA, 0x0000);
1800 		/* load/kill c2 */
1801 		CSR_WRITE_4(sc, NS_PHY_DSPCFG, 0x5040);
1802 		/* raise SD off, from 4 to c */
1803 		CSR_WRITE_4(sc, NS_PHY_SDCFG, 0x008C);
1804 		CSR_WRITE_4(sc, NS_PHY_PAGE, 0);
1805 	}
1806 
1807 	/*
1808 	 * Program promiscuous mode and multicast filters.
1809 	 */
1810 	sis_iff(sc);
1811 
1812 	/*
1813 	 * Load the address of the RX and TX lists.
1814 	 */
1815 	CSR_WRITE_4(sc, SIS_RX_LISTPTR, sc->sc_listmap->dm_segs[0].ds_addr +
1816 	    offsetof(struct sis_list_data, sis_rx_list[0]));
1817 	CSR_WRITE_4(sc, SIS_TX_LISTPTR, sc->sc_listmap->dm_segs[0].ds_addr +
1818 	    offsetof(struct sis_list_data, sis_tx_list[0]));
1819 
1820 	/* SIS_CFG_EDB_MASTER_EN indicates the EDB bus is used instead of
1821 	 * the PCI bus. When this bit is set, the Max DMA Burst Size
1822 	 * for TX/RX DMA should be no larger than 16 double words.
1823 	 */
1824 	if (CSR_READ_4(sc, SIS_CFG) & SIS_CFG_EDB_MASTER_EN)
1825 		CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG64);
1826 	else
1827 		CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG256);
1828 
1829 	/* Accept Long Packets for VLAN support */
1830 	SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_JABBER);
1831 
1832 	/*
1833 	 * Assume 100Mbps link, actual MAC configuration is done
1834 	 * after getting a valid link.
1835 	 */
1836 	CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_100);
1837 
1838 	/*
1839 	 * Enable interrupts.
1840 	 */
1841 	CSR_WRITE_4(sc, SIS_IMR, SIS_INTRS);
1842 	CSR_WRITE_4(sc, SIS_IER, 1);
1843 
1844 	/* Clear MAC disable. */
1845 	SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE | SIS_CSR_RX_DISABLE);
1846 
1847 	sc->sis_link = 0;
1848 	mii_mediachg(mii);
1849 
1850 	sc->sis_stopped = 0;
1851 	ifp->if_flags |= IFF_RUNNING;
1852 	ifq_clr_oactive(&ifp->if_snd);
1853 
1854 	splx(s);
1855 
1856 	timeout_add_sec(&sc->sis_timeout, 1);
1857 }
1858 
1859 /*
1860  * Set media options.
1861  */
1862 int
1863 sis_ifmedia_upd(struct ifnet *ifp)
1864 {
1865 	struct sis_softc	*sc;
1866 	struct mii_data		*mii;
1867 
1868 	sc = ifp->if_softc;
1869 
1870 	mii = &sc->sc_mii;
1871 	if (mii->mii_instance) {
1872 		struct mii_softc	*miisc;
1873 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1874 			mii_phy_reset(miisc);
1875 	}
1876 	mii_mediachg(mii);
1877 
1878 	return (0);
1879 }
1880 
1881 /*
1882  * Report current media status.
1883  */
1884 void
1885 sis_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1886 {
1887 	struct sis_softc	*sc;
1888 	struct mii_data		*mii;
1889 
1890 	sc = ifp->if_softc;
1891 
1892 	mii = &sc->sc_mii;
1893 	mii_pollstat(mii);
1894 	ifmr->ifm_active = mii->mii_media_active;
1895 	ifmr->ifm_status = mii->mii_media_status;
1896 }
1897 
1898 int
1899 sis_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1900 {
1901 	struct sis_softc	*sc = ifp->if_softc;
1902 	struct ifreq		*ifr = (struct ifreq *) data;
1903 	struct mii_data		*mii;
1904 	int			s, error = 0;
1905 
1906 	s = splnet();
1907 
1908 	switch(command) {
1909 	case SIOCSIFADDR:
1910 		ifp->if_flags |= IFF_UP;
1911 		if (!(ifp->if_flags & IFF_RUNNING))
1912 			sis_init(sc);
1913 		break;
1914 
1915 	case SIOCSIFFLAGS:
1916 		if (ifp->if_flags & IFF_UP) {
1917 			if (ifp->if_flags & IFF_RUNNING)
1918 				error = ENETRESET;
1919 			else
1920 				sis_init(sc);
1921 		} else {
1922 			if (ifp->if_flags & IFF_RUNNING)
1923 				sis_stop(sc);
1924 		}
1925 		break;
1926 
1927 	case SIOCGIFMEDIA:
1928 	case SIOCSIFMEDIA:
1929 		mii = &sc->sc_mii;
1930 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1931 		break;
1932 
1933 	case SIOCGIFRXR:
1934 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
1935 		    NULL, MCLBYTES, &sc->sis_cdata.sis_rx_ring);
1936 		break;
1937 
1938 	default:
1939 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
1940 	}
1941 
1942 	if (error == ENETRESET) {
1943 		if (ifp->if_flags & IFF_RUNNING)
1944 			sis_iff(sc);
1945 		error = 0;
1946 	}
1947 
1948 	splx(s);
1949 	return(error);
1950 }
1951 
1952 void
1953 sis_watchdog(struct ifnet *ifp)
1954 {
1955 	struct sis_softc	*sc;
1956 	int			s;
1957 
1958 	sc = ifp->if_softc;
1959 
1960 	if (sc->sis_stopped)
1961 		return;
1962 
1963 	ifp->if_oerrors++;
1964 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1965 
1966 	s = splnet();
1967 	sis_init(sc);
1968 
1969 	if (!ifq_empty(&ifp->if_snd))
1970 		sis_start(ifp);
1971 
1972 	splx(s);
1973 }
1974 
1975 /*
1976  * Stop the adapter and free any mbufs allocated to the
1977  * RX and TX lists.
1978  */
1979 void
1980 sis_stop(struct sis_softc *sc)
1981 {
1982 	int			i;
1983 	struct ifnet		*ifp;
1984 
1985 	if (sc->sis_stopped)
1986 		return;
1987 
1988 	ifp = &sc->arpcom.ac_if;
1989 	ifp->if_timer = 0;
1990 
1991 	timeout_del(&sc->sis_timeout);
1992 
1993 	ifp->if_flags &= ~IFF_RUNNING;
1994 	ifq_clr_oactive(&ifp->if_snd);
1995 	sc->sis_stopped = 1;
1996 
1997 	CSR_WRITE_4(sc, SIS_IER, 0);
1998 	CSR_WRITE_4(sc, SIS_IMR, 0);
1999 	CSR_READ_4(sc, SIS_ISR); /* clear any interrupts already pending */
2000 	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE | SIS_CSR_RX_DISABLE);
2001 	DELAY(1000);
2002 	CSR_WRITE_4(sc, SIS_TX_LISTPTR, 0);
2003 	CSR_WRITE_4(sc, SIS_RX_LISTPTR, 0);
2004 
2005 	sc->sis_link = 0;
2006 
2007 	/*
2008 	 * Free data in the RX lists.
2009 	 */
2010 	for (i = 0; i < SIS_RX_LIST_CNT; i++) {
2011 		if (sc->sis_ldata->sis_rx_list[i].map->dm_nsegs != 0) {
2012 			bus_dmamap_t map = sc->sis_ldata->sis_rx_list[i].map;
2013 
2014 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2015 			    BUS_DMASYNC_POSTREAD);
2016 			bus_dmamap_unload(sc->sc_dmat, map);
2017 		}
2018 		if (sc->sis_ldata->sis_rx_list[i].sis_mbuf != NULL) {
2019 			m_freem(sc->sis_ldata->sis_rx_list[i].sis_mbuf);
2020 			sc->sis_ldata->sis_rx_list[i].sis_mbuf = NULL;
2021 		}
2022 		bzero(&sc->sis_ldata->sis_rx_list[i],
2023 		    sizeof(struct sis_desc) - sizeof(bus_dmamap_t));
2024 	}
2025 
2026 	/*
2027 	 * Free the TX list buffers.
2028 	 */
2029 	for (i = 0; i < SIS_TX_LIST_CNT; i++) {
2030 		if (sc->sis_ldata->sis_tx_list[i].map->dm_nsegs != 0) {
2031 			bus_dmamap_t map = sc->sis_ldata->sis_tx_list[i].map;
2032 
2033 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2034 			    BUS_DMASYNC_POSTWRITE);
2035 			bus_dmamap_unload(sc->sc_dmat, map);
2036 		}
2037 		if (sc->sis_ldata->sis_tx_list[i].sis_mbuf != NULL) {
2038 			m_freem(sc->sis_ldata->sis_tx_list[i].sis_mbuf);
2039 			sc->sis_ldata->sis_tx_list[i].sis_mbuf = NULL;
2040 		}
2041 		bzero(&sc->sis_ldata->sis_tx_list[i],
2042 		    sizeof(struct sis_desc) - sizeof(bus_dmamap_t));
2043 	}
2044 }
2045