1 /* $OpenBSD: if_sis.c,v 1.146 2024/08/31 16:23:09 deraadt Exp $ */
2 /*
3 * Copyright (c) 1997, 1998, 1999
4 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 * $FreeBSD: src/sys/pci/if_sis.c,v 1.30 2001/02/06 10:11:47 phk Exp $
34 */
35
36 /*
37 * SiS 900/SiS 7016 fast ethernet PCI NIC driver. Datasheets are
38 * available from http://www.sis.com.tw.
39 *
40 * This driver also supports the NatSemi DP83815. Datasheets are
41 * available from http://www.national.com.
42 *
43 * Written by Bill Paul <wpaul@ee.columbia.edu>
44 * Electrical Engineering Department
45 * Columbia University, New York City
46 */
47
48 /*
49 * The SiS 900 is a fairly simple chip. It uses bus master DMA with
50 * simple TX and RX descriptors of 3 longwords in size. The receiver
51 * has a single perfect filter entry for the station address and a
52 * 128-bit multicast hash table. The SiS 900 has a built-in MII-based
53 * transceiver while the 7016 requires an external transceiver chip.
54 * Both chips offer the standard bit-bang MII interface as well as
55 * an enhanced PHY interface which simplifies accessing MII registers.
56 *
57 * The only downside to this chipset is that RX descriptors must be
58 * longword aligned.
59 */
60
61 #include "bpfilter.h"
62
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/mbuf.h>
66 #include <sys/ioctl.h>
67 #include <sys/errno.h>
68 #include <sys/timeout.h>
69
70 #include <net/if.h>
71
72 #include <netinet/in.h>
73 #include <netinet/if_ether.h>
74
75 #include <net/if_media.h>
76
77 #if NBPFILTER > 0
78 #include <net/bpf.h>
79 #endif
80
81 #include <sys/device.h>
82
83 #include <dev/mii/miivar.h>
84
85 #include <dev/pci/pcireg.h>
86 #include <dev/pci/pcivar.h>
87 #include <dev/pci/pcidevs.h>
88
89 #define SIS_USEIOSPACE
90
91 #include <dev/pci/if_sisreg.h>
92
93 int sis_probe(struct device *, void *, void *);
94 void sis_attach(struct device *, struct device *, void *);
95 int sis_activate(struct device *, int);
96
97 const struct cfattach sis_ca = {
98 sizeof(struct sis_softc), sis_probe, sis_attach, NULL,
99 sis_activate
100 };
101
102 struct cfdriver sis_cd = {
103 NULL, "sis", DV_IFNET
104 };
105
106 int sis_intr(void *);
107 void sis_fill_rx_ring(struct sis_softc *);
108 int sis_newbuf(struct sis_softc *, struct sis_desc *);
109 int sis_encap(struct sis_softc *, struct mbuf *, u_int32_t *);
110 void sis_rxeof(struct sis_softc *);
111 void sis_txeof(struct sis_softc *);
112 void sis_tick(void *);
113 void sis_start(struct ifnet *);
114 int sis_ioctl(struct ifnet *, u_long, caddr_t);
115 void sis_init(void *);
116 void sis_stop(struct sis_softc *);
117 void sis_watchdog(struct ifnet *);
118 int sis_ifmedia_upd(struct ifnet *);
119 void sis_ifmedia_sts(struct ifnet *, struct ifmediareq *);
120
121 u_int16_t sis_reverse(u_int16_t);
122 void sis_delay(struct sis_softc *);
123 void sis_eeprom_idle(struct sis_softc *);
124 void sis_eeprom_putbyte(struct sis_softc *, int);
125 void sis_eeprom_getword(struct sis_softc *, int, u_int16_t *);
126 #if defined(__amd64__) || defined(__i386__)
127 void sis_read_cmos(struct sis_softc *, struct pci_attach_args *, caddr_t, int, int);
128 #endif
129 void sis_read_mac(struct sis_softc *, struct pci_attach_args *);
130 void sis_read_eeprom(struct sis_softc *, caddr_t, int, int, int);
131 void sis_read96x_mac(struct sis_softc *);
132
133 void sis_mii_sync(struct sis_softc *);
134 void sis_mii_send(struct sis_softc *, u_int32_t, int);
135 int sis_mii_readreg(struct sis_softc *, struct sis_mii_frame *);
136 int sis_mii_writereg(struct sis_softc *, struct sis_mii_frame *);
137 int sis_miibus_readreg(struct device *, int, int);
138 void sis_miibus_writereg(struct device *, int, int, int);
139 void sis_miibus_statchg(struct device *);
140
141 u_int32_t sis_mchash(struct sis_softc *, const uint8_t *);
142 void sis_iff(struct sis_softc *);
143 void sis_iff_ns(struct sis_softc *);
144 void sis_iff_sis(struct sis_softc *);
145 void sis_reset(struct sis_softc *);
146 int sis_ring_init(struct sis_softc *);
147
148 #define SIS_SETBIT(sc, reg, x) \
149 CSR_WRITE_4(sc, reg, \
150 CSR_READ_4(sc, reg) | (x))
151
152 #define SIS_CLRBIT(sc, reg, x) \
153 CSR_WRITE_4(sc, reg, \
154 CSR_READ_4(sc, reg) & ~(x))
155
156 #define SIO_SET(x) \
157 CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) | x)
158
159 #define SIO_CLR(x) \
160 CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) & ~x)
161
162 const struct pci_matchid sis_devices[] = {
163 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_900 },
164 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_7016 },
165 { PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815 }
166 };
167
168 /*
169 * Routine to reverse the bits in a word. Stolen almost
170 * verbatim from /usr/games/fortune.
171 */
172 u_int16_t
sis_reverse(u_int16_t n)173 sis_reverse(u_int16_t n)
174 {
175 n = ((n >> 1) & 0x5555) | ((n << 1) & 0xaaaa);
176 n = ((n >> 2) & 0x3333) | ((n << 2) & 0xcccc);
177 n = ((n >> 4) & 0x0f0f) | ((n << 4) & 0xf0f0);
178 n = ((n >> 8) & 0x00ff) | ((n << 8) & 0xff00);
179
180 return (n);
181 }
182
183 void
sis_delay(struct sis_softc * sc)184 sis_delay(struct sis_softc *sc)
185 {
186 int idx;
187
188 for (idx = (300 / 33) + 1; idx > 0; idx--)
189 CSR_READ_4(sc, SIS_CSR);
190 }
191
192 void
sis_eeprom_idle(struct sis_softc * sc)193 sis_eeprom_idle(struct sis_softc *sc)
194 {
195 int i;
196
197 SIO_SET(SIS_EECTL_CSEL);
198 sis_delay(sc);
199 SIO_SET(SIS_EECTL_CLK);
200 sis_delay(sc);
201
202 for (i = 0; i < 25; i++) {
203 SIO_CLR(SIS_EECTL_CLK);
204 sis_delay(sc);
205 SIO_SET(SIS_EECTL_CLK);
206 sis_delay(sc);
207 }
208
209 SIO_CLR(SIS_EECTL_CLK);
210 sis_delay(sc);
211 SIO_CLR(SIS_EECTL_CSEL);
212 sis_delay(sc);
213 CSR_WRITE_4(sc, SIS_EECTL, 0x00000000);
214 }
215
216 /*
217 * Send a read command and address to the EEPROM, check for ACK.
218 */
219 void
sis_eeprom_putbyte(struct sis_softc * sc,int addr)220 sis_eeprom_putbyte(struct sis_softc *sc, int addr)
221 {
222 int d, i;
223
224 d = addr | SIS_EECMD_READ;
225
226 /*
227 * Feed in each bit and strobe the clock.
228 */
229 for (i = 0x400; i; i >>= 1) {
230 if (d & i)
231 SIO_SET(SIS_EECTL_DIN);
232 else
233 SIO_CLR(SIS_EECTL_DIN);
234 sis_delay(sc);
235 SIO_SET(SIS_EECTL_CLK);
236 sis_delay(sc);
237 SIO_CLR(SIS_EECTL_CLK);
238 sis_delay(sc);
239 }
240 }
241
242 /*
243 * Read a word of data stored in the EEPROM at address 'addr.'
244 */
245 void
sis_eeprom_getword(struct sis_softc * sc,int addr,u_int16_t * dest)246 sis_eeprom_getword(struct sis_softc *sc, int addr, u_int16_t *dest)
247 {
248 int i;
249 u_int16_t word = 0;
250
251 /* Force EEPROM to idle state. */
252 sis_eeprom_idle(sc);
253
254 /* Enter EEPROM access mode. */
255 sis_delay(sc);
256 SIO_CLR(SIS_EECTL_CLK);
257 sis_delay(sc);
258 SIO_SET(SIS_EECTL_CSEL);
259 sis_delay(sc);
260
261 /*
262 * Send address of word we want to read.
263 */
264 sis_eeprom_putbyte(sc, addr);
265
266 /*
267 * Start reading bits from EEPROM.
268 */
269 for (i = 0x8000; i; i >>= 1) {
270 SIO_SET(SIS_EECTL_CLK);
271 sis_delay(sc);
272 if (CSR_READ_4(sc, SIS_EECTL) & SIS_EECTL_DOUT)
273 word |= i;
274 sis_delay(sc);
275 SIO_CLR(SIS_EECTL_CLK);
276 sis_delay(sc);
277 }
278
279 /* Turn off EEPROM access mode. */
280 sis_eeprom_idle(sc);
281
282 *dest = word;
283 }
284
285 /*
286 * Read a sequence of words from the EEPROM.
287 */
288 void
sis_read_eeprom(struct sis_softc * sc,caddr_t dest,int off,int cnt,int swap)289 sis_read_eeprom(struct sis_softc *sc, caddr_t dest,
290 int off, int cnt, int swap)
291 {
292 int i;
293 u_int16_t word = 0, *ptr;
294
295 for (i = 0; i < cnt; i++) {
296 sis_eeprom_getword(sc, off + i, &word);
297 ptr = (u_int16_t *)(dest + (i * 2));
298 if (swap)
299 *ptr = letoh16(word);
300 else
301 *ptr = word;
302 }
303 }
304
305 #if defined(__amd64__) || defined(__i386__)
306 void
sis_read_cmos(struct sis_softc * sc,struct pci_attach_args * pa,caddr_t dest,int off,int cnt)307 sis_read_cmos(struct sis_softc *sc, struct pci_attach_args *pa,
308 caddr_t dest, int off, int cnt)
309 {
310 u_int32_t reg;
311 int i;
312
313 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, 0x48);
314 pci_conf_write(pa->pa_pc, pa->pa_tag, 0x48, reg | 0x40);
315
316 for (i = 0; i < cnt; i++) {
317 bus_space_write_1(pa->pa_iot, 0x0, 0x70, i + off);
318 *(dest + i) = bus_space_read_1(pa->pa_iot, 0x0, 0x71);
319 }
320
321 pci_conf_write(pa->pa_pc, pa->pa_tag, 0x48, reg & ~0x40);
322 }
323 #endif
324
325 void
sis_read_mac(struct sis_softc * sc,struct pci_attach_args * pa)326 sis_read_mac(struct sis_softc *sc, struct pci_attach_args *pa)
327 {
328 uint32_t rxfilt, csrsave;
329 u_int16_t *enaddr = (u_int16_t *) &sc->arpcom.ac_enaddr;
330
331 rxfilt = CSR_READ_4(sc, SIS_RXFILT_CTL);
332 csrsave = CSR_READ_4(sc, SIS_CSR);
333
334 CSR_WRITE_4(sc, SIS_CSR, SIS_CSR_RELOAD | csrsave);
335 CSR_WRITE_4(sc, SIS_CSR, 0);
336
337 CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt & ~SIS_RXFILTCTL_ENABLE);
338
339 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0);
340 enaddr[0] = letoh16(CSR_READ_4(sc, SIS_RXFILT_DATA) & 0xffff);
341 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR1);
342 enaddr[1] = letoh16(CSR_READ_4(sc, SIS_RXFILT_DATA) & 0xffff);
343 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2);
344 enaddr[2] = letoh16(CSR_READ_4(sc, SIS_RXFILT_DATA) & 0xffff);
345
346 CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt);
347 CSR_WRITE_4(sc, SIS_CSR, csrsave);
348 }
349
350 void
sis_read96x_mac(struct sis_softc * sc)351 sis_read96x_mac(struct sis_softc *sc)
352 {
353 int i;
354
355 SIO_SET(SIS96x_EECTL_REQ);
356
357 for (i = 0; i < 2000; i++) {
358 if ((CSR_READ_4(sc, SIS_EECTL) & SIS96x_EECTL_GNT)) {
359 sis_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
360 SIS_EE_NODEADDR, 3, 1);
361 break;
362 } else
363 DELAY(1);
364 }
365
366 SIO_SET(SIS96x_EECTL_DONE);
367 }
368
369 /*
370 * Sync the PHYs by setting data bit and strobing the clock 32 times.
371 */
372 void
sis_mii_sync(struct sis_softc * sc)373 sis_mii_sync(struct sis_softc *sc)
374 {
375 int i;
376
377 SIO_SET(SIS_MII_DIR|SIS_MII_DATA);
378
379 for (i = 0; i < 32; i++) {
380 SIO_SET(SIS_MII_CLK);
381 DELAY(1);
382 SIO_CLR(SIS_MII_CLK);
383 DELAY(1);
384 }
385 }
386
387 /*
388 * Clock a series of bits through the MII.
389 */
390 void
sis_mii_send(struct sis_softc * sc,u_int32_t bits,int cnt)391 sis_mii_send(struct sis_softc *sc, u_int32_t bits, int cnt)
392 {
393 int i;
394
395 SIO_CLR(SIS_MII_CLK);
396
397 for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
398 if (bits & i)
399 SIO_SET(SIS_MII_DATA);
400 else
401 SIO_CLR(SIS_MII_DATA);
402 DELAY(1);
403 SIO_CLR(SIS_MII_CLK);
404 DELAY(1);
405 SIO_SET(SIS_MII_CLK);
406 }
407 }
408
409 /*
410 * Read an PHY register through the MII.
411 */
412 int
sis_mii_readreg(struct sis_softc * sc,struct sis_mii_frame * frame)413 sis_mii_readreg(struct sis_softc *sc, struct sis_mii_frame *frame)
414 {
415 int i, ack, s;
416
417 s = splnet();
418
419 /*
420 * Set up frame for RX.
421 */
422 frame->mii_stdelim = SIS_MII_STARTDELIM;
423 frame->mii_opcode = SIS_MII_READOP;
424 frame->mii_turnaround = 0;
425 frame->mii_data = 0;
426
427 /*
428 * Turn on data xmit.
429 */
430 SIO_SET(SIS_MII_DIR);
431
432 sis_mii_sync(sc);
433
434 /*
435 * Send command/address info.
436 */
437 sis_mii_send(sc, frame->mii_stdelim, 2);
438 sis_mii_send(sc, frame->mii_opcode, 2);
439 sis_mii_send(sc, frame->mii_phyaddr, 5);
440 sis_mii_send(sc, frame->mii_regaddr, 5);
441
442 /* Idle bit */
443 SIO_CLR((SIS_MII_CLK|SIS_MII_DATA));
444 DELAY(1);
445 SIO_SET(SIS_MII_CLK);
446 DELAY(1);
447
448 /* Turn off xmit. */
449 SIO_CLR(SIS_MII_DIR);
450
451 /* Check for ack */
452 SIO_CLR(SIS_MII_CLK);
453 DELAY(1);
454 ack = CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA;
455 SIO_SET(SIS_MII_CLK);
456 DELAY(1);
457
458 /*
459 * Now try reading data bits. If the ack failed, we still
460 * need to clock through 16 cycles to keep the PHY(s) in sync.
461 */
462 if (ack) {
463 for(i = 0; i < 16; i++) {
464 SIO_CLR(SIS_MII_CLK);
465 DELAY(1);
466 SIO_SET(SIS_MII_CLK);
467 DELAY(1);
468 }
469 goto fail;
470 }
471
472 for (i = 0x8000; i; i >>= 1) {
473 SIO_CLR(SIS_MII_CLK);
474 DELAY(1);
475 if (!ack) {
476 if (CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA)
477 frame->mii_data |= i;
478 DELAY(1);
479 }
480 SIO_SET(SIS_MII_CLK);
481 DELAY(1);
482 }
483
484 fail:
485
486 SIO_CLR(SIS_MII_CLK);
487 DELAY(1);
488 SIO_SET(SIS_MII_CLK);
489 DELAY(1);
490
491 splx(s);
492
493 if (ack)
494 return (1);
495 return (0);
496 }
497
498 /*
499 * Write to a PHY register through the MII.
500 */
501 int
sis_mii_writereg(struct sis_softc * sc,struct sis_mii_frame * frame)502 sis_mii_writereg(struct sis_softc *sc, struct sis_mii_frame *frame)
503 {
504 int s;
505
506 s = splnet();
507 /*
508 * Set up frame for TX.
509 */
510
511 frame->mii_stdelim = SIS_MII_STARTDELIM;
512 frame->mii_opcode = SIS_MII_WRITEOP;
513 frame->mii_turnaround = SIS_MII_TURNAROUND;
514
515 /*
516 * Turn on data output.
517 */
518 SIO_SET(SIS_MII_DIR);
519
520 sis_mii_sync(sc);
521
522 sis_mii_send(sc, frame->mii_stdelim, 2);
523 sis_mii_send(sc, frame->mii_opcode, 2);
524 sis_mii_send(sc, frame->mii_phyaddr, 5);
525 sis_mii_send(sc, frame->mii_regaddr, 5);
526 sis_mii_send(sc, frame->mii_turnaround, 2);
527 sis_mii_send(sc, frame->mii_data, 16);
528
529 /* Idle bit. */
530 SIO_SET(SIS_MII_CLK);
531 DELAY(1);
532 SIO_CLR(SIS_MII_CLK);
533 DELAY(1);
534
535 /*
536 * Turn off xmit.
537 */
538 SIO_CLR(SIS_MII_DIR);
539
540 splx(s);
541
542 return (0);
543 }
544
545 int
sis_miibus_readreg(struct device * self,int phy,int reg)546 sis_miibus_readreg(struct device *self, int phy, int reg)
547 {
548 struct sis_softc *sc = (struct sis_softc *)self;
549 struct sis_mii_frame frame;
550
551 if (sc->sis_type == SIS_TYPE_83815) {
552 if (phy != 0)
553 return (0);
554 /*
555 * The NatSemi chip can take a while after
556 * a reset to come ready, during which the BMSR
557 * returns a value of 0. This is *never* supposed
558 * to happen: some of the BMSR bits are meant to
559 * be hardwired in the on position, and this can
560 * confuse the miibus code a bit during the probe
561 * and attach phase. So we make an effort to check
562 * for this condition and wait for it to clear.
563 */
564 if (!CSR_READ_4(sc, NS_BMSR))
565 DELAY(1000);
566 return CSR_READ_4(sc, NS_BMCR + (reg * 4));
567 }
568
569 /*
570 * Chipsets < SIS_635 seem not to be able to read/write
571 * through mdio. Use the enhanced PHY access register
572 * again for them.
573 */
574 if (sc->sis_type == SIS_TYPE_900 &&
575 sc->sis_rev < SIS_REV_635) {
576 int i, val = 0;
577
578 if (phy != 0)
579 return (0);
580
581 CSR_WRITE_4(sc, SIS_PHYCTL,
582 (phy << 11) | (reg << 6) | SIS_PHYOP_READ);
583 SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS);
584
585 for (i = 0; i < SIS_TIMEOUT; i++) {
586 if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS))
587 break;
588 }
589
590 if (i == SIS_TIMEOUT) {
591 printf("%s: PHY failed to come ready\n",
592 sc->sc_dev.dv_xname);
593 return (0);
594 }
595
596 val = (CSR_READ_4(sc, SIS_PHYCTL) >> 16) & 0xFFFF;
597
598 if (val == 0xFFFF)
599 return (0);
600
601 return (val);
602 } else {
603 bzero(&frame, sizeof(frame));
604
605 frame.mii_phyaddr = phy;
606 frame.mii_regaddr = reg;
607 sis_mii_readreg(sc, &frame);
608
609 return (frame.mii_data);
610 }
611 }
612
613 void
sis_miibus_writereg(struct device * self,int phy,int reg,int data)614 sis_miibus_writereg(struct device *self, int phy, int reg, int data)
615 {
616 struct sis_softc *sc = (struct sis_softc *)self;
617 struct sis_mii_frame frame;
618
619 if (sc->sis_type == SIS_TYPE_83815) {
620 if (phy != 0)
621 return;
622 CSR_WRITE_4(sc, NS_BMCR + (reg * 4), data);
623 return;
624 }
625
626 /*
627 * Chipsets < SIS_635 seem not to be able to read/write
628 * through mdio. Use the enhanced PHY access register
629 * again for them.
630 */
631 if (sc->sis_type == SIS_TYPE_900 &&
632 sc->sis_rev < SIS_REV_635) {
633 int i;
634
635 if (phy != 0)
636 return;
637
638 CSR_WRITE_4(sc, SIS_PHYCTL, (data << 16) | (phy << 11) |
639 (reg << 6) | SIS_PHYOP_WRITE);
640 SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS);
641
642 for (i = 0; i < SIS_TIMEOUT; i++) {
643 if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS))
644 break;
645 }
646
647 if (i == SIS_TIMEOUT)
648 printf("%s: PHY failed to come ready\n",
649 sc->sc_dev.dv_xname);
650 } else {
651 bzero(&frame, sizeof(frame));
652
653 frame.mii_phyaddr = phy;
654 frame.mii_regaddr = reg;
655 frame.mii_data = data;
656 sis_mii_writereg(sc, &frame);
657 }
658 }
659
660 void
sis_miibus_statchg(struct device * self)661 sis_miibus_statchg(struct device *self)
662 {
663 struct sis_softc *sc = (struct sis_softc *)self;
664 struct ifnet *ifp = &sc->arpcom.ac_if;
665 struct mii_data *mii = &sc->sc_mii;
666
667 if ((ifp->if_flags & IFF_RUNNING) == 0)
668 return;
669
670 sc->sis_link = 0;
671 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
672 (IFM_ACTIVE | IFM_AVALID)) {
673 switch (IFM_SUBTYPE(mii->mii_media_active)) {
674 case IFM_10_T:
675 CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_10);
676 sc->sis_link++;
677 break;
678 case IFM_100_TX:
679 CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_100);
680 sc->sis_link++;
681 break;
682 default:
683 break;
684 }
685 }
686
687 if (!sc->sis_link) {
688 /*
689 * Stopping MACs seem to reset SIS_TX_LISTPTR and
690 * SIS_RX_LISTPTR which in turn requires resetting
691 * TX/RX buffers. So just don't do anything for
692 * lost link.
693 */
694 return;
695 }
696
697 /* Set full/half duplex mode. */
698 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
699 SIS_SETBIT(sc, SIS_TX_CFG,
700 (SIS_TXCFG_IGN_HBEAT | SIS_TXCFG_IGN_CARR));
701 SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS);
702 } else {
703 SIS_CLRBIT(sc, SIS_TX_CFG,
704 (SIS_TXCFG_IGN_HBEAT | SIS_TXCFG_IGN_CARR));
705 SIS_CLRBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS);
706 }
707
708 if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr >= NS_SRR_16A) {
709 /*
710 * MPII03.D: Half Duplex Excessive Collisions.
711 * Also page 49 in 83816 manual
712 */
713 SIS_SETBIT(sc, SIS_TX_CFG, SIS_TXCFG_MPII03D);
714 }
715
716 /*
717 * Some DP83815s experience problems when used with short
718 * (< 30m/100ft) Ethernet cables in 100baseTX mode. This
719 * sequence adjusts the DSP's signal attenuation to fix the
720 * problem.
721 */
722 if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr < NS_SRR_16A &&
723 IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
724 uint32_t reg;
725
726 CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001);
727 reg = CSR_READ_4(sc, NS_PHY_DSPCFG) & 0xfff;
728 CSR_WRITE_4(sc, NS_PHY_DSPCFG, reg | 0x1000);
729 DELAY(100);
730 reg = CSR_READ_4(sc, NS_PHY_TDATA) & 0xff;
731 if ((reg & 0x0080) == 0 || (reg > 0xd8 && reg <= 0xff)) {
732 #ifdef DEBUG
733 printf("%s: Applying short cable fix (reg=%x)\n",
734 sc->sc_dev.dv_xname, reg);
735 #endif
736 CSR_WRITE_4(sc, NS_PHY_TDATA, 0x00e8);
737 SIS_SETBIT(sc, NS_PHY_DSPCFG, 0x20);
738 }
739 CSR_WRITE_4(sc, NS_PHY_PAGE, 0);
740 }
741 /* Enable TX/RX MACs. */
742 SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE | SIS_CSR_RX_DISABLE);
743 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_ENABLE | SIS_CSR_RX_ENABLE);
744 }
745
746 u_int32_t
sis_mchash(struct sis_softc * sc,const uint8_t * addr)747 sis_mchash(struct sis_softc *sc, const uint8_t *addr)
748 {
749 uint32_t crc;
750
751 /* Compute CRC for the address value. */
752 crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
753
754 /*
755 * return the filter bit position
756 *
757 * The NatSemi chip has a 512-bit filter, which is
758 * different than the SiS, so we special-case it.
759 */
760 if (sc->sis_type == SIS_TYPE_83815)
761 return (crc >> 23);
762 else if (sc->sis_rev >= SIS_REV_635 ||
763 sc->sis_rev == SIS_REV_900B)
764 return (crc >> 24);
765 else
766 return (crc >> 25);
767 }
768
769 void
sis_iff(struct sis_softc * sc)770 sis_iff(struct sis_softc *sc)
771 {
772 if (sc->sis_type == SIS_TYPE_83815)
773 sis_iff_ns(sc);
774 else
775 sis_iff_sis(sc);
776 }
777
778 void
sis_iff_ns(struct sis_softc * sc)779 sis_iff_ns(struct sis_softc *sc)
780 {
781 struct ifnet *ifp = &sc->arpcom.ac_if;
782 struct arpcom *ac = &sc->arpcom;
783 struct ether_multi *enm;
784 struct ether_multistep step;
785 u_int32_t h = 0, i, rxfilt;
786 int bit, index;
787
788 rxfilt = CSR_READ_4(sc, SIS_RXFILT_CTL);
789 if (rxfilt & SIS_RXFILTCTL_ENABLE) {
790 /*
791 * Filter should be disabled to program other bits.
792 */
793 CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt & ~SIS_RXFILTCTL_ENABLE);
794 CSR_READ_4(sc, SIS_RXFILT_CTL);
795 }
796 rxfilt &= ~(SIS_RXFILTCTL_ALLMULTI | SIS_RXFILTCTL_ALLPHYS |
797 NS_RXFILTCTL_ARP | SIS_RXFILTCTL_BROAD | NS_RXFILTCTL_MCHASH |
798 NS_RXFILTCTL_PERFECT);
799 ifp->if_flags &= ~IFF_ALLMULTI;
800
801 /*
802 * Always accept ARP frames.
803 * Always accept broadcast frames.
804 * Always accept frames destined to our station address.
805 */
806 rxfilt |= NS_RXFILTCTL_ARP | SIS_RXFILTCTL_BROAD |
807 NS_RXFILTCTL_PERFECT;
808
809 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
810 ifp->if_flags |= IFF_ALLMULTI;
811 rxfilt |= SIS_RXFILTCTL_ALLMULTI;
812 if (ifp->if_flags & IFF_PROMISC)
813 rxfilt |= SIS_RXFILTCTL_ALLPHYS;
814 } else {
815 /*
816 * We have to explicitly enable the multicast hash table
817 * on the NatSemi chip if we want to use it, which we do.
818 */
819 rxfilt |= NS_RXFILTCTL_MCHASH;
820
821 /* first, zot all the existing hash bits */
822 for (i = 0; i < 32; i++) {
823 CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + (i * 2));
824 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 0);
825 }
826
827 ETHER_FIRST_MULTI(step, ac, enm);
828 while (enm != NULL) {
829 h = sis_mchash(sc, enm->enm_addrlo);
830
831 index = h >> 3;
832 bit = h & 0x1F;
833
834 CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + index);
835
836 if (bit > 0xF)
837 bit -= 0x10;
838
839 SIS_SETBIT(sc, SIS_RXFILT_DATA, (1 << bit));
840
841 ETHER_NEXT_MULTI(step, enm);
842 }
843 }
844
845 CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt);
846 /* Turn the receive filter on. */
847 CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt | SIS_RXFILTCTL_ENABLE);
848 CSR_READ_4(sc, SIS_RXFILT_CTL);
849 }
850
851 void
sis_iff_sis(struct sis_softc * sc)852 sis_iff_sis(struct sis_softc *sc)
853 {
854 struct ifnet *ifp = &sc->arpcom.ac_if;
855 struct arpcom *ac = &sc->arpcom;
856 struct ether_multi *enm;
857 struct ether_multistep step;
858 u_int32_t h, i, maxmulti, rxfilt;
859 u_int16_t hashes[16];
860
861 /* hash table size */
862 if (sc->sis_rev >= SIS_REV_635 ||
863 sc->sis_rev == SIS_REV_900B)
864 maxmulti = 16;
865 else
866 maxmulti = 8;
867
868 rxfilt = CSR_READ_4(sc, SIS_RXFILT_CTL);
869 if (rxfilt & SIS_RXFILTCTL_ENABLE) {
870 /*
871 * Filter should be disabled to program other bits.
872 */
873 CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt & ~SIS_RXFILTCTL_ENABLE);
874 CSR_READ_4(sc, SIS_RXFILT_CTL);
875 }
876 rxfilt &= ~(SIS_RXFILTCTL_ALLMULTI | SIS_RXFILTCTL_ALLPHYS |
877 SIS_RXFILTCTL_BROAD);
878 ifp->if_flags &= ~IFF_ALLMULTI;
879
880 /*
881 * Always accept broadcast frames.
882 */
883 rxfilt |= SIS_RXFILTCTL_BROAD;
884
885 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
886 ac->ac_multicnt > maxmulti) {
887 ifp->if_flags |= IFF_ALLMULTI;
888 rxfilt |= SIS_RXFILTCTL_ALLMULTI;
889 if (ifp->if_flags & IFF_PROMISC)
890 rxfilt |= SIS_RXFILTCTL_ALLPHYS;
891
892 for (i = 0; i < maxmulti; i++)
893 hashes[i] = ~0;
894 } else {
895 for (i = 0; i < maxmulti; i++)
896 hashes[i] = 0;
897
898 ETHER_FIRST_MULTI(step, ac, enm);
899 while (enm != NULL) {
900 h = sis_mchash(sc, enm->enm_addrlo);
901
902 hashes[h >> 4] |= 1 << (h & 0xf);
903
904 ETHER_NEXT_MULTI(step, enm);
905 }
906 }
907
908 for (i = 0; i < maxmulti; i++) {
909 CSR_WRITE_4(sc, SIS_RXFILT_CTL, (4 + i) << 16);
910 CSR_WRITE_4(sc, SIS_RXFILT_DATA, hashes[i]);
911 }
912
913 CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt);
914 /* Turn the receive filter on. */
915 CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt | SIS_RXFILTCTL_ENABLE);
916 CSR_READ_4(sc, SIS_RXFILT_CTL);
917 }
918
919 void
sis_reset(struct sis_softc * sc)920 sis_reset(struct sis_softc *sc)
921 {
922 int i;
923
924 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RESET);
925
926 for (i = 0; i < SIS_TIMEOUT; i++) {
927 if (!(CSR_READ_4(sc, SIS_CSR) & SIS_CSR_RESET))
928 break;
929 }
930
931 if (i == SIS_TIMEOUT)
932 printf("%s: reset never completed\n", sc->sc_dev.dv_xname);
933
934 /* Wait a little while for the chip to get its brains in order. */
935 DELAY(1000);
936
937 /*
938 * If this is a NetSemi chip, make sure to clear
939 * PME mode.
940 */
941 if (sc->sis_type == SIS_TYPE_83815) {
942 CSR_WRITE_4(sc, NS_CLKRUN, NS_CLKRUN_PMESTS);
943 CSR_WRITE_4(sc, NS_CLKRUN, 0);
944 }
945 }
946
947 /*
948 * Probe for an SiS chip. Check the PCI vendor and device
949 * IDs against our list and return a device name if we find a match.
950 */
951 int
sis_probe(struct device * parent,void * match,void * aux)952 sis_probe(struct device *parent, void *match, void *aux)
953 {
954 return (pci_matchbyid((struct pci_attach_args *)aux, sis_devices,
955 nitems(sis_devices)));
956 }
957
958 /*
959 * Attach the interface. Allocate softc structures, do ifmedia
960 * setup and ethernet/BPF attach.
961 */
962 void
sis_attach(struct device * parent,struct device * self,void * aux)963 sis_attach(struct device *parent, struct device *self, void *aux)
964 {
965 int i;
966 const char *intrstr = NULL;
967 struct sis_softc *sc = (struct sis_softc *)self;
968 struct pci_attach_args *pa = aux;
969 pci_chipset_tag_t pc = pa->pa_pc;
970 pci_intr_handle_t ih;
971 struct ifnet *ifp;
972 bus_size_t size;
973
974 sc->sis_stopped = 1;
975
976 pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
977
978 /*
979 * Map control/status registers.
980 */
981
982 #ifdef SIS_USEIOSPACE
983 if (pci_mapreg_map(pa, SIS_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0,
984 &sc->sis_btag, &sc->sis_bhandle, NULL, &size, 0)) {
985 printf(": can't map i/o space\n");
986 return;
987 }
988 #else
989 if (pci_mapreg_map(pa, SIS_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
990 &sc->sis_btag, &sc->sis_bhandle, NULL, &size, 0)) {
991 printf(": can't map mem space\n");
992 return;
993 }
994 #endif
995
996 /* Allocate interrupt */
997 if (pci_intr_map(pa, &ih)) {
998 printf(": couldn't map interrupt\n");
999 goto fail_1;
1000 }
1001 intrstr = pci_intr_string(pc, ih);
1002 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, sis_intr, sc,
1003 self->dv_xname);
1004 if (sc->sc_ih == NULL) {
1005 printf(": couldn't establish interrupt");
1006 if (intrstr != NULL)
1007 printf(" at %s", intrstr);
1008 printf("\n");
1009 goto fail_1;
1010 }
1011
1012 switch (PCI_PRODUCT(pa->pa_id)) {
1013 case PCI_PRODUCT_SIS_900:
1014 sc->sis_type = SIS_TYPE_900;
1015 break;
1016 case PCI_PRODUCT_SIS_7016:
1017 sc->sis_type = SIS_TYPE_7016;
1018 break;
1019 case PCI_PRODUCT_NS_DP83815:
1020 sc->sis_type = SIS_TYPE_83815;
1021 break;
1022 default:
1023 break;
1024 }
1025 sc->sis_rev = PCI_REVISION(pa->pa_class);
1026
1027 /* Reset the adapter. */
1028 sis_reset(sc);
1029
1030 if (sc->sis_type == SIS_TYPE_900 &&
1031 (sc->sis_rev == SIS_REV_635 ||
1032 sc->sis_rev == SIS_REV_900B)) {
1033 SIO_SET(SIS_CFG_RND_CNT);
1034 SIO_SET(SIS_CFG_PERR_DETECT);
1035 }
1036
1037 /*
1038 * Get station address from the EEPROM.
1039 */
1040 switch (PCI_VENDOR(pa->pa_id)) {
1041 case PCI_VENDOR_NS:
1042 sc->sis_srr = CSR_READ_4(sc, NS_SRR);
1043
1044 if (sc->sis_srr == NS_SRR_15C)
1045 printf(", DP83815C");
1046 else if (sc->sis_srr == NS_SRR_15D)
1047 printf(", DP83815D");
1048 else if (sc->sis_srr == NS_SRR_16A)
1049 printf(", DP83816A");
1050 else
1051 printf(", srr %x", sc->sis_srr);
1052
1053 /*
1054 * Reading the MAC address out of the EEPROM on
1055 * the NatSemi chip takes a bit more work than
1056 * you'd expect. The address spans 4 16-bit words,
1057 * with the first word containing only a single bit.
1058 * You have to shift everything over one bit to
1059 * get it aligned properly. Also, the bits are
1060 * stored backwards (the LSB is really the MSB,
1061 * and so on) so you have to reverse them in order
1062 * to get the MAC address into the form we want.
1063 * Why? Who the hell knows.
1064 */
1065 {
1066 u_int16_t tmp[4];
1067
1068 sis_read_eeprom(sc, (caddr_t)&tmp, NS_EE_NODEADDR,
1069 4, 0);
1070
1071 /* Shift everything over one bit. */
1072 tmp[3] = tmp[3] >> 1;
1073 tmp[3] |= tmp[2] << 15;
1074 tmp[2] = tmp[2] >> 1;
1075 tmp[2] |= tmp[1] << 15;
1076 tmp[1] = tmp[1] >> 1;
1077 tmp[1] |= tmp[0] << 15;
1078
1079 /* Now reverse all the bits. */
1080 tmp[3] = letoh16(sis_reverse(tmp[3]));
1081 tmp[2] = letoh16(sis_reverse(tmp[2]));
1082 tmp[1] = letoh16(sis_reverse(tmp[1]));
1083
1084 bcopy(&tmp[1], sc->arpcom.ac_enaddr,
1085 ETHER_ADDR_LEN);
1086 }
1087 break;
1088 case PCI_VENDOR_SIS:
1089 default:
1090 #if defined(__amd64__) || defined(__i386__)
1091 /*
1092 * If this is a SiS 630E chipset with an embedded
1093 * SiS 900 controller, we have to read the MAC address
1094 * from the APC CMOS RAM. Our method for doing this
1095 * is very ugly since we have to reach out and grab
1096 * ahold of hardware for which we cannot properly
1097 * allocate resources. This code is only compiled on
1098 * the i386 architecture since the SiS 630E chipset
1099 * is for x86 motherboards only. Note that there are
1100 * a lot of magic numbers in this hack. These are
1101 * taken from SiS's Linux driver. I'd like to replace
1102 * them with proper symbolic definitions, but that
1103 * requires some datasheets that I don't have access
1104 * to at the moment.
1105 */
1106 if (sc->sis_rev == SIS_REV_630S ||
1107 sc->sis_rev == SIS_REV_630E)
1108 sis_read_cmos(sc, pa, (caddr_t)&sc->arpcom.ac_enaddr,
1109 0x9, 6);
1110 else
1111 #endif
1112 if (sc->sis_rev == SIS_REV_96x)
1113 sis_read96x_mac(sc);
1114 else if (sc->sis_rev == SIS_REV_635 ||
1115 sc->sis_rev == SIS_REV_630ET ||
1116 sc->sis_rev == SIS_REV_630EA1)
1117 sis_read_mac(sc, pa);
1118 else
1119 sis_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
1120 SIS_EE_NODEADDR, 3, 1);
1121 break;
1122 }
1123
1124 printf(": %s, address %s\n", intrstr,
1125 ether_sprintf(sc->arpcom.ac_enaddr));
1126
1127 sc->sc_dmat = pa->pa_dmat;
1128
1129 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sis_list_data),
1130 PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg,
1131 BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) {
1132 printf(": can't alloc list mem\n");
1133 goto fail_2;
1134 }
1135 if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg,
1136 sizeof(struct sis_list_data), &sc->sc_listkva,
1137 BUS_DMA_NOWAIT) != 0) {
1138 printf(": can't map list mem\n");
1139 goto fail_2;
1140 }
1141 if (bus_dmamap_create(sc->sc_dmat, sizeof(struct sis_list_data), 1,
1142 sizeof(struct sis_list_data), 0, BUS_DMA_NOWAIT,
1143 &sc->sc_listmap) != 0) {
1144 printf(": can't alloc list map\n");
1145 goto fail_2;
1146 }
1147 if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva,
1148 sizeof(struct sis_list_data), NULL, BUS_DMA_NOWAIT) != 0) {
1149 printf(": can't load list map\n");
1150 goto fail_2;
1151 }
1152 sc->sis_ldata = (struct sis_list_data *)sc->sc_listkva;
1153
1154 for (i = 0; i < SIS_RX_LIST_CNT; i++) {
1155 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
1156 BUS_DMA_NOWAIT, &sc->sis_ldata->sis_rx_list[i].map) != 0) {
1157 printf(": can't create rx map\n");
1158 goto fail_2;
1159 }
1160 }
1161
1162 for (i = 0; i < SIS_TX_LIST_CNT; i++) {
1163 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
1164 SIS_MAXTXSEGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
1165 &sc->sis_ldata->sis_tx_list[i].map) != 0) {
1166 printf(": can't create tx map\n");
1167 goto fail_2;
1168 }
1169 }
1170 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, SIS_MAXTXSEGS,
1171 MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) {
1172 printf(": can't create tx spare map\n");
1173 goto fail_2;
1174 }
1175
1176 timeout_set(&sc->sis_timeout, sis_tick, sc);
1177
1178 ifp = &sc->arpcom.ac_if;
1179 ifp->if_softc = sc;
1180 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1181 ifp->if_ioctl = sis_ioctl;
1182 ifp->if_start = sis_start;
1183 ifp->if_watchdog = sis_watchdog;
1184 ifq_init_maxlen(&ifp->if_snd, SIS_TX_LIST_CNT - 1);
1185 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1186 ifp->if_hardmtu = 1518; /* determined experimentally on DP83815 */
1187
1188 ifp->if_capabilities = IFCAP_VLAN_MTU;
1189
1190 sc->sc_mii.mii_ifp = ifp;
1191 sc->sc_mii.mii_readreg = sis_miibus_readreg;
1192 sc->sc_mii.mii_writereg = sis_miibus_writereg;
1193 sc->sc_mii.mii_statchg = sis_miibus_statchg;
1194 ifmedia_init(&sc->sc_mii.mii_media, 0, sis_ifmedia_upd,sis_ifmedia_sts);
1195 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
1196 0);
1197 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
1198 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
1199 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
1200 } else
1201 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1202
1203 /*
1204 * Call MI attach routines.
1205 */
1206 if_attach(ifp);
1207 ether_ifattach(ifp);
1208 return;
1209
1210 fail_2:
1211 pci_intr_disestablish(pc, sc->sc_ih);
1212
1213 fail_1:
1214 bus_space_unmap(sc->sis_btag, sc->sis_bhandle, size);
1215 }
1216
1217 int
sis_activate(struct device * self,int act)1218 sis_activate(struct device *self, int act)
1219 {
1220 struct sis_softc *sc = (struct sis_softc *)self;
1221 struct ifnet *ifp = &sc->arpcom.ac_if;
1222
1223 switch (act) {
1224 case DVACT_SUSPEND:
1225 if (ifp->if_flags & IFF_RUNNING)
1226 sis_stop(sc);
1227 break;
1228 case DVACT_RESUME:
1229 if (ifp->if_flags & IFF_UP)
1230 sis_init(sc);
1231 break;
1232 }
1233 return (0);
1234 }
1235
1236 /*
1237 * Initialize the TX and RX descriptors and allocate mbufs for them. Note that
1238 * we arrange the descriptors in a closed ring, so that the last descriptor
1239 * points back to the first.
1240 */
1241 int
sis_ring_init(struct sis_softc * sc)1242 sis_ring_init(struct sis_softc *sc)
1243 {
1244 struct sis_list_data *ld;
1245 struct sis_ring_data *cd;
1246 int i, nexti;
1247
1248 cd = &sc->sis_cdata;
1249 ld = sc->sis_ldata;
1250
1251 for (i = 0; i < SIS_TX_LIST_CNT; i++) {
1252 if (i == (SIS_TX_LIST_CNT - 1))
1253 nexti = 0;
1254 else
1255 nexti = i + 1;
1256 ld->sis_tx_list[i].sis_nextdesc = &ld->sis_tx_list[nexti];
1257 ld->sis_tx_list[i].sis_next =
1258 htole32(sc->sc_listmap->dm_segs[0].ds_addr +
1259 offsetof(struct sis_list_data, sis_tx_list[nexti]));
1260 ld->sis_tx_list[i].sis_mbuf = NULL;
1261 ld->sis_tx_list[i].sis_ptr = 0;
1262 ld->sis_tx_list[i].sis_ctl = 0;
1263 }
1264
1265 cd->sis_tx_prod = cd->sis_tx_cons = cd->sis_tx_cnt = 0;
1266
1267 for (i = 0; i < SIS_RX_LIST_CNT; i++) {
1268 if (i == SIS_RX_LIST_CNT - 1)
1269 nexti = 0;
1270 else
1271 nexti = i + 1;
1272 ld->sis_rx_list[i].sis_nextdesc = &ld->sis_rx_list[nexti];
1273 ld->sis_rx_list[i].sis_next =
1274 htole32(sc->sc_listmap->dm_segs[0].ds_addr +
1275 offsetof(struct sis_list_data, sis_rx_list[nexti]));
1276 ld->sis_rx_list[i].sis_ctl = 0;
1277 }
1278
1279 cd->sis_rx_prod = cd->sis_rx_cons = 0;
1280 if_rxr_init(&cd->sis_rx_ring, 2, SIS_RX_LIST_CNT - 1);
1281 sis_fill_rx_ring(sc);
1282
1283 return (0);
1284 }
1285
1286 void
sis_fill_rx_ring(struct sis_softc * sc)1287 sis_fill_rx_ring(struct sis_softc *sc)
1288 {
1289 struct sis_list_data *ld;
1290 struct sis_ring_data *cd;
1291 u_int slots;
1292
1293 cd = &sc->sis_cdata;
1294 ld = sc->sis_ldata;
1295
1296 for (slots = if_rxr_get(&cd->sis_rx_ring, SIS_RX_LIST_CNT);
1297 slots > 0; slots--) {
1298 if (sis_newbuf(sc, &ld->sis_rx_list[cd->sis_rx_prod]))
1299 break;
1300
1301 SIS_INC(cd->sis_rx_prod, SIS_RX_LIST_CNT);
1302 }
1303 if_rxr_put(&cd->sis_rx_ring, slots);
1304 }
1305
1306 /*
1307 * Initialize an RX descriptor and attach an MBUF cluster.
1308 */
1309 int
sis_newbuf(struct sis_softc * sc,struct sis_desc * c)1310 sis_newbuf(struct sis_softc *sc, struct sis_desc *c)
1311 {
1312 struct mbuf *m_new = NULL;
1313
1314 if (c == NULL)
1315 return (EINVAL);
1316
1317 m_new = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
1318 if (!m_new)
1319 return (ENOBUFS);
1320
1321 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1322
1323 if (bus_dmamap_load_mbuf(sc->sc_dmat, c->map, m_new,
1324 BUS_DMA_NOWAIT)) {
1325 m_free(m_new);
1326 return (ENOBUFS);
1327 }
1328
1329 bus_dmamap_sync(sc->sc_dmat, c->map, 0, c->map->dm_mapsize,
1330 BUS_DMASYNC_PREREAD);
1331
1332 c->sis_mbuf = m_new;
1333 c->sis_ptr = htole32(c->map->dm_segs[0].ds_addr);
1334
1335 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1336 ((caddr_t)c - sc->sc_listkva), sizeof(struct sis_desc),
1337 BUS_DMASYNC_PREWRITE);
1338
1339 c->sis_ctl = htole32(ETHER_MAX_DIX_LEN);
1340
1341 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1342 ((caddr_t)c - sc->sc_listkva), sizeof(struct sis_desc),
1343 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1344
1345 return (0);
1346 }
1347
1348 /*
1349 * A frame has been uploaded: pass the resulting mbuf chain up to
1350 * the higher level protocols.
1351 */
1352 void
sis_rxeof(struct sis_softc * sc)1353 sis_rxeof(struct sis_softc *sc)
1354 {
1355 struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1356 struct mbuf *m;
1357 struct ifnet *ifp;
1358 struct sis_desc *cur_rx;
1359 int total_len = 0;
1360 u_int32_t rxstat;
1361
1362 ifp = &sc->arpcom.ac_if;
1363
1364 while (if_rxr_inuse(&sc->sis_cdata.sis_rx_ring) > 0) {
1365 cur_rx = &sc->sis_ldata->sis_rx_list[sc->sis_cdata.sis_rx_cons];
1366 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1367 ((caddr_t)cur_rx - sc->sc_listkva),
1368 sizeof(struct sis_desc),
1369 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1370 if (!SIS_OWNDESC(cur_rx))
1371 break;
1372
1373 rxstat = letoh32(cur_rx->sis_rxstat);
1374 m = cur_rx->sis_mbuf;
1375 cur_rx->sis_mbuf = NULL;
1376 total_len = SIS_RXBYTES(cur_rx);
1377 /* from here on the buffer is consumed */
1378 SIS_INC(sc->sis_cdata.sis_rx_cons, SIS_RX_LIST_CNT);
1379 if_rxr_put(&sc->sis_cdata.sis_rx_ring, 1);
1380
1381 /*
1382 * DP83816A sometimes produces zero-length packets
1383 * shortly after initialisation.
1384 */
1385 if (total_len == 0) {
1386 m_freem(m);
1387 continue;
1388 }
1389
1390 /* The ethernet CRC is always included */
1391 total_len -= ETHER_CRC_LEN;
1392
1393 /*
1394 * If an error occurs, update stats, clear the
1395 * status word and leave the mbuf cluster in place:
1396 * it should simply get re-used next time this descriptor
1397 * comes up in the ring. However, don't report long
1398 * frames as errors since they could be VLANs.
1399 */
1400 if (rxstat & SIS_RXSTAT_GIANT &&
1401 total_len <= (ETHER_MAX_DIX_LEN - ETHER_CRC_LEN))
1402 rxstat &= ~SIS_RXSTAT_GIANT;
1403 if (SIS_RXSTAT_ERROR(rxstat)) {
1404 ifp->if_ierrors++;
1405 if (rxstat & SIS_RXSTAT_COLL)
1406 ifp->if_collisions++;
1407 m_freem(m);
1408 continue;
1409 }
1410
1411 /* No errors; receive the packet. */
1412 bus_dmamap_sync(sc->sc_dmat, cur_rx->map, 0,
1413 cur_rx->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1414 #ifdef __STRICT_ALIGNMENT
1415 /*
1416 * On some architectures, we do not have alignment problems,
1417 * so try to allocate a new buffer for the receive ring, and
1418 * pass up the one where the packet is already, saving the
1419 * expensive copy done in m_devget().
1420 * If we are on an architecture with alignment problems, or
1421 * if the allocation fails, then use m_devget and leave the
1422 * existing buffer in the receive ring.
1423 */
1424 {
1425 struct mbuf *m0;
1426 m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN);
1427 m_freem(m);
1428 if (m0 == NULL) {
1429 ifp->if_ierrors++;
1430 continue;
1431 }
1432 m = m0;
1433 }
1434 #else
1435 m->m_pkthdr.len = m->m_len = total_len;
1436 #endif
1437
1438 ml_enqueue(&ml, m);
1439 }
1440
1441 if (ifiq_input(&ifp->if_rcv, &ml))
1442 if_rxr_livelocked(&sc->sis_cdata.sis_rx_ring);
1443
1444 sis_fill_rx_ring(sc);
1445 }
1446
1447 /*
1448 * A frame was downloaded to the chip. It's safe for us to clean up
1449 * the list buffers.
1450 */
1451
1452 void
sis_txeof(struct sis_softc * sc)1453 sis_txeof(struct sis_softc *sc)
1454 {
1455 struct ifnet *ifp;
1456 u_int32_t idx, ctl, txstat;
1457
1458 ifp = &sc->arpcom.ac_if;
1459
1460 /*
1461 * Go through our tx list and free mbufs for those
1462 * frames that have been transmitted.
1463 */
1464 for (idx = sc->sis_cdata.sis_tx_cons; sc->sis_cdata.sis_tx_cnt > 0;
1465 sc->sis_cdata.sis_tx_cnt--, SIS_INC(idx, SIS_TX_LIST_CNT)) {
1466 struct sis_desc *cur_tx = &sc->sis_ldata->sis_tx_list[idx];
1467
1468 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1469 ((caddr_t)cur_tx - sc->sc_listkva),
1470 sizeof(struct sis_desc),
1471 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1472
1473 if (SIS_OWNDESC(cur_tx))
1474 break;
1475
1476 ctl = letoh32(cur_tx->sis_ctl);
1477
1478 if (ctl & SIS_CMDSTS_MORE)
1479 continue;
1480
1481 txstat = letoh32(cur_tx->sis_txstat);
1482
1483 if (!(ctl & SIS_CMDSTS_PKT_OK)) {
1484 ifp->if_oerrors++;
1485 if (txstat & SIS_TXSTAT_EXCESSCOLLS)
1486 ifp->if_collisions++;
1487 if (txstat & SIS_TXSTAT_OUTOFWINCOLL)
1488 ifp->if_collisions++;
1489 }
1490
1491 ifp->if_collisions += (txstat & SIS_TXSTAT_COLLCNT) >> 16;
1492
1493 if (cur_tx->map->dm_nsegs != 0) {
1494 bus_dmamap_t map = cur_tx->map;
1495
1496 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1497 BUS_DMASYNC_POSTWRITE);
1498 bus_dmamap_unload(sc->sc_dmat, map);
1499 }
1500 if (cur_tx->sis_mbuf != NULL) {
1501 m_freem(cur_tx->sis_mbuf);
1502 cur_tx->sis_mbuf = NULL;
1503 }
1504 }
1505
1506 if (idx != sc->sis_cdata.sis_tx_cons) {
1507 /* we freed up some buffers */
1508 sc->sis_cdata.sis_tx_cons = idx;
1509 ifq_clr_oactive(&ifp->if_snd);
1510 }
1511
1512 ifp->if_timer = (sc->sis_cdata.sis_tx_cnt == 0) ? 0 : 5;
1513 }
1514
1515 void
sis_tick(void * xsc)1516 sis_tick(void *xsc)
1517 {
1518 struct sis_softc *sc = (struct sis_softc *)xsc;
1519 struct mii_data *mii;
1520 int s;
1521
1522 s = splnet();
1523
1524 mii = &sc->sc_mii;
1525 mii_tick(mii);
1526
1527 if (!sc->sis_link)
1528 sis_miibus_statchg(&sc->sc_dev);
1529
1530 timeout_add_sec(&sc->sis_timeout, 1);
1531
1532 splx(s);
1533 }
1534
1535 int
sis_intr(void * arg)1536 sis_intr(void *arg)
1537 {
1538 struct sis_softc *sc = arg;
1539 struct ifnet *ifp = &sc->arpcom.ac_if;
1540 u_int32_t status;
1541
1542 if (sc->sis_stopped) /* Most likely shared interrupt */
1543 return (0);
1544
1545 /* Reading the ISR register clears all interrupts. */
1546 status = CSR_READ_4(sc, SIS_ISR);
1547 if ((status & SIS_INTRS) == 0)
1548 return (0);
1549
1550 if (status &
1551 (SIS_ISR_TX_DESC_OK | SIS_ISR_TX_ERR |
1552 SIS_ISR_TX_OK | SIS_ISR_TX_IDLE))
1553 sis_txeof(sc);
1554
1555 if (status &
1556 (SIS_ISR_RX_DESC_OK | SIS_ISR_RX_OK |
1557 SIS_ISR_RX_ERR | SIS_ISR_RX_IDLE))
1558 sis_rxeof(sc);
1559
1560 if (status & (SIS_ISR_RX_IDLE)) {
1561 /* consume what's there so that sis_rx_cons points
1562 * to the first HW owned descriptor. */
1563 sis_rxeof(sc);
1564 /* reprogram the RX listptr */
1565 CSR_WRITE_4(sc, SIS_RX_LISTPTR,
1566 sc->sc_listmap->dm_segs[0].ds_addr +
1567 offsetof(struct sis_list_data,
1568 sis_rx_list[sc->sis_cdata.sis_rx_cons]));
1569 }
1570
1571 if (status & SIS_ISR_SYSERR)
1572 sis_init(sc);
1573
1574 /*
1575 * XXX: Re-enable RX engine every time otherwise it occasionally
1576 * stops under unknown circumstances.
1577 */
1578 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE);
1579
1580 if (!ifq_empty(&ifp->if_snd))
1581 sis_start(ifp);
1582
1583 return (1);
1584 }
1585
1586 /*
1587 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1588 * pointers to the fragment pointers.
1589 */
1590 int
sis_encap(struct sis_softc * sc,struct mbuf * m_head,u_int32_t * txidx)1591 sis_encap(struct sis_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
1592 {
1593 struct sis_desc *f = NULL;
1594 bus_dmamap_t map;
1595 int frag, cur, i, error;
1596
1597 map = sc->sc_tx_sparemap;
1598
1599 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m_head,
1600 BUS_DMA_NOWAIT);
1601 switch (error) {
1602 case 0:
1603 break;
1604
1605 case EFBIG:
1606 if (m_defrag(m_head, M_DONTWAIT) == 0 &&
1607 bus_dmamap_load_mbuf(sc->sc_dmat, map, m_head,
1608 BUS_DMA_NOWAIT) == 0)
1609 break;
1610
1611 /* FALLTHROUGH */
1612 default:
1613 return (ENOBUFS);
1614 }
1615
1616 if ((SIS_TX_LIST_CNT - (sc->sis_cdata.sis_tx_cnt + map->dm_nsegs)) < 2) {
1617 bus_dmamap_unload(sc->sc_dmat, map);
1618 return (ENOBUFS);
1619 }
1620
1621 /*
1622 * Start packing the mbufs in this chain into
1623 * the fragment pointers. Stop when we run out
1624 * of fragments or hit the end of the mbuf chain.
1625 */
1626 cur = frag = *txidx;
1627
1628 for (i = 0; i < map->dm_nsegs; i++) {
1629 f = &sc->sis_ldata->sis_tx_list[frag];
1630 f->sis_ctl = htole32(SIS_CMDSTS_MORE | map->dm_segs[i].ds_len);
1631 f->sis_ptr = htole32(map->dm_segs[i].ds_addr);
1632 if (i != 0)
1633 f->sis_ctl |= htole32(SIS_CMDSTS_OWN);
1634 cur = frag;
1635 SIS_INC(frag, SIS_TX_LIST_CNT);
1636 }
1637
1638 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1639 BUS_DMASYNC_PREWRITE);
1640
1641 sc->sis_ldata->sis_tx_list[cur].sis_mbuf = m_head;
1642 sc->sis_ldata->sis_tx_list[cur].sis_ctl &= ~htole32(SIS_CMDSTS_MORE);
1643 sc->sis_ldata->sis_tx_list[*txidx].sis_ctl |= htole32(SIS_CMDSTS_OWN);
1644 sc->sis_cdata.sis_tx_cnt += map->dm_nsegs;
1645 *txidx = frag;
1646
1647 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1648 offsetof(struct sis_list_data, sis_tx_list[0]),
1649 sizeof(struct sis_desc) * SIS_TX_LIST_CNT,
1650 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1651
1652 return (0);
1653 }
1654
1655 /*
1656 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1657 * to the mbuf data regions directly in the transmit lists. We also save a
1658 * copy of the pointers since the transmit list fragment pointers are
1659 * physical addresses.
1660 */
1661
1662 void
sis_start(struct ifnet * ifp)1663 sis_start(struct ifnet *ifp)
1664 {
1665 struct sis_softc *sc;
1666 struct mbuf *m_head = NULL;
1667 u_int32_t idx, queued = 0;
1668
1669 sc = ifp->if_softc;
1670
1671 if (!sc->sis_link)
1672 return;
1673
1674 idx = sc->sis_cdata.sis_tx_prod;
1675
1676 if (ifq_is_oactive(&ifp->if_snd))
1677 return;
1678
1679 while(sc->sis_ldata->sis_tx_list[idx].sis_mbuf == NULL) {
1680 m_head = ifq_deq_begin(&ifp->if_snd);
1681 if (m_head == NULL)
1682 break;
1683
1684 if (sis_encap(sc, m_head, &idx)) {
1685 ifq_deq_rollback(&ifp->if_snd, m_head);
1686 ifq_set_oactive(&ifp->if_snd);
1687 break;
1688 }
1689
1690 /* now we are committed to transmit the packet */
1691 ifq_deq_commit(&ifp->if_snd, m_head);
1692
1693 queued++;
1694
1695 /*
1696 * If there's a BPF listener, bounce a copy of this frame
1697 * to him.
1698 */
1699 #if NBPFILTER > 0
1700 if (ifp->if_bpf)
1701 bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
1702 #endif
1703 }
1704
1705 if (queued) {
1706 /* Transmit */
1707 sc->sis_cdata.sis_tx_prod = idx;
1708 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_ENABLE);
1709
1710 /*
1711 * Set a timeout in case the chip goes out to lunch.
1712 */
1713 ifp->if_timer = 5;
1714 }
1715 }
1716
1717 void
sis_init(void * xsc)1718 sis_init(void *xsc)
1719 {
1720 struct sis_softc *sc = (struct sis_softc *)xsc;
1721 struct ifnet *ifp = &sc->arpcom.ac_if;
1722 struct mii_data *mii;
1723 int s;
1724
1725 s = splnet();
1726
1727 /*
1728 * Cancel pending I/O and free all RX/TX buffers.
1729 */
1730 sis_stop(sc);
1731
1732 /*
1733 * Reset the chip to a known state.
1734 */
1735 sis_reset(sc);
1736
1737 #if NS_IHR_DELAY > 0
1738 /* Configure interrupt holdoff register. */
1739 if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr == NS_SRR_16A)
1740 CSR_WRITE_4(sc, NS_IHR, NS_IHR_VALUE);
1741 #endif
1742
1743 mii = &sc->sc_mii;
1744
1745 /* Set MAC address */
1746 if (sc->sis_type == SIS_TYPE_83815) {
1747 CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR0);
1748 CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1749 htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[0]));
1750 CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR1);
1751 CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1752 htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[1]));
1753 CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR2);
1754 CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1755 htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[2]));
1756 } else {
1757 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0);
1758 CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1759 htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[0]));
1760 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR1);
1761 CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1762 htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[1]));
1763 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2);
1764 CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1765 htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[2]));
1766 }
1767
1768 /* Init circular TX/RX lists. */
1769 if (sis_ring_init(sc) != 0) {
1770 printf("%s: initialization failed: no memory for rx buffers\n",
1771 sc->sc_dev.dv_xname);
1772 sis_stop(sc);
1773 splx(s);
1774 return;
1775 }
1776
1777 /*
1778 * Page 78 of the DP83815 data sheet (september 2002 version)
1779 * recommends the following register settings "for optimum
1780 * performance." for rev 15C. The driver from NS also sets
1781 * the PHY_CR register for later versions.
1782 *
1783 * This resolves an issue with tons of errors in AcceptPerfectMatch
1784 * (non-IFF_PROMISC) mode.
1785 */
1786 if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr <= NS_SRR_15D) {
1787 CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001);
1788 CSR_WRITE_4(sc, NS_PHY_CR, 0x189C);
1789 /* set val for c2 */
1790 CSR_WRITE_4(sc, NS_PHY_TDATA, 0x0000);
1791 /* load/kill c2 */
1792 CSR_WRITE_4(sc, NS_PHY_DSPCFG, 0x5040);
1793 /* raise SD off, from 4 to c */
1794 CSR_WRITE_4(sc, NS_PHY_SDCFG, 0x008C);
1795 CSR_WRITE_4(sc, NS_PHY_PAGE, 0);
1796 }
1797
1798 /*
1799 * Program promiscuous mode and multicast filters.
1800 */
1801 sis_iff(sc);
1802
1803 /*
1804 * Load the address of the RX and TX lists.
1805 */
1806 CSR_WRITE_4(sc, SIS_RX_LISTPTR, sc->sc_listmap->dm_segs[0].ds_addr +
1807 offsetof(struct sis_list_data, sis_rx_list[0]));
1808 CSR_WRITE_4(sc, SIS_TX_LISTPTR, sc->sc_listmap->dm_segs[0].ds_addr +
1809 offsetof(struct sis_list_data, sis_tx_list[0]));
1810
1811 /* SIS_CFG_EDB_MASTER_EN indicates the EDB bus is used instead of
1812 * the PCI bus. When this bit is set, the Max DMA Burst Size
1813 * for TX/RX DMA should be no larger than 16 double words.
1814 */
1815 if (CSR_READ_4(sc, SIS_CFG) & SIS_CFG_EDB_MASTER_EN)
1816 CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG64);
1817 else
1818 CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG256);
1819
1820 /* Accept Long Packets for VLAN support */
1821 SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_JABBER);
1822
1823 /*
1824 * Assume 100Mbps link, actual MAC configuration is done
1825 * after getting a valid link.
1826 */
1827 CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_100);
1828
1829 /*
1830 * Enable interrupts.
1831 */
1832 CSR_WRITE_4(sc, SIS_IMR, SIS_INTRS);
1833 CSR_WRITE_4(sc, SIS_IER, 1);
1834
1835 /* Clear MAC disable. */
1836 SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE | SIS_CSR_RX_DISABLE);
1837
1838 sc->sis_link = 0;
1839 mii_mediachg(mii);
1840
1841 sc->sis_stopped = 0;
1842 ifp->if_flags |= IFF_RUNNING;
1843 ifq_clr_oactive(&ifp->if_snd);
1844
1845 splx(s);
1846
1847 timeout_add_sec(&sc->sis_timeout, 1);
1848 }
1849
1850 /*
1851 * Set media options.
1852 */
1853 int
sis_ifmedia_upd(struct ifnet * ifp)1854 sis_ifmedia_upd(struct ifnet *ifp)
1855 {
1856 struct sis_softc *sc;
1857 struct mii_data *mii;
1858
1859 sc = ifp->if_softc;
1860
1861 mii = &sc->sc_mii;
1862 if (mii->mii_instance) {
1863 struct mii_softc *miisc;
1864 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1865 mii_phy_reset(miisc);
1866 }
1867 mii_mediachg(mii);
1868
1869 return (0);
1870 }
1871
1872 /*
1873 * Report current media status.
1874 */
1875 void
sis_ifmedia_sts(struct ifnet * ifp,struct ifmediareq * ifmr)1876 sis_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1877 {
1878 struct sis_softc *sc;
1879 struct mii_data *mii;
1880
1881 sc = ifp->if_softc;
1882
1883 mii = &sc->sc_mii;
1884 mii_pollstat(mii);
1885 ifmr->ifm_active = mii->mii_media_active;
1886 ifmr->ifm_status = mii->mii_media_status;
1887 }
1888
1889 int
sis_ioctl(struct ifnet * ifp,u_long command,caddr_t data)1890 sis_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1891 {
1892 struct sis_softc *sc = ifp->if_softc;
1893 struct ifreq *ifr = (struct ifreq *) data;
1894 struct mii_data *mii;
1895 int s, error = 0;
1896
1897 s = splnet();
1898
1899 switch(command) {
1900 case SIOCSIFADDR:
1901 ifp->if_flags |= IFF_UP;
1902 if (!(ifp->if_flags & IFF_RUNNING))
1903 sis_init(sc);
1904 break;
1905
1906 case SIOCSIFFLAGS:
1907 if (ifp->if_flags & IFF_UP) {
1908 if (ifp->if_flags & IFF_RUNNING)
1909 error = ENETRESET;
1910 else
1911 sis_init(sc);
1912 } else {
1913 if (ifp->if_flags & IFF_RUNNING)
1914 sis_stop(sc);
1915 }
1916 break;
1917
1918 case SIOCGIFMEDIA:
1919 case SIOCSIFMEDIA:
1920 mii = &sc->sc_mii;
1921 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1922 break;
1923
1924 case SIOCGIFRXR:
1925 error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
1926 NULL, MCLBYTES, &sc->sis_cdata.sis_rx_ring);
1927 break;
1928
1929 default:
1930 error = ether_ioctl(ifp, &sc->arpcom, command, data);
1931 }
1932
1933 if (error == ENETRESET) {
1934 if (ifp->if_flags & IFF_RUNNING)
1935 sis_iff(sc);
1936 error = 0;
1937 }
1938
1939 splx(s);
1940 return(error);
1941 }
1942
1943 void
sis_watchdog(struct ifnet * ifp)1944 sis_watchdog(struct ifnet *ifp)
1945 {
1946 struct sis_softc *sc;
1947 int s;
1948
1949 sc = ifp->if_softc;
1950
1951 if (sc->sis_stopped)
1952 return;
1953
1954 ifp->if_oerrors++;
1955 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1956
1957 s = splnet();
1958 sis_init(sc);
1959
1960 if (!ifq_empty(&ifp->if_snd))
1961 sis_start(ifp);
1962
1963 splx(s);
1964 }
1965
1966 /*
1967 * Stop the adapter and free any mbufs allocated to the
1968 * RX and TX lists.
1969 */
1970 void
sis_stop(struct sis_softc * sc)1971 sis_stop(struct sis_softc *sc)
1972 {
1973 int i;
1974 struct ifnet *ifp;
1975
1976 if (sc->sis_stopped)
1977 return;
1978
1979 ifp = &sc->arpcom.ac_if;
1980 ifp->if_timer = 0;
1981
1982 timeout_del(&sc->sis_timeout);
1983
1984 ifp->if_flags &= ~IFF_RUNNING;
1985 ifq_clr_oactive(&ifp->if_snd);
1986 sc->sis_stopped = 1;
1987
1988 CSR_WRITE_4(sc, SIS_IER, 0);
1989 CSR_WRITE_4(sc, SIS_IMR, 0);
1990 CSR_READ_4(sc, SIS_ISR); /* clear any interrupts already pending */
1991 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE | SIS_CSR_RX_DISABLE);
1992 DELAY(1000);
1993 CSR_WRITE_4(sc, SIS_TX_LISTPTR, 0);
1994 CSR_WRITE_4(sc, SIS_RX_LISTPTR, 0);
1995
1996 sc->sis_link = 0;
1997
1998 /*
1999 * Free data in the RX lists.
2000 */
2001 for (i = 0; i < SIS_RX_LIST_CNT; i++) {
2002 if (sc->sis_ldata->sis_rx_list[i].map->dm_nsegs != 0) {
2003 bus_dmamap_t map = sc->sis_ldata->sis_rx_list[i].map;
2004
2005 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2006 BUS_DMASYNC_POSTREAD);
2007 bus_dmamap_unload(sc->sc_dmat, map);
2008 }
2009 if (sc->sis_ldata->sis_rx_list[i].sis_mbuf != NULL) {
2010 m_freem(sc->sis_ldata->sis_rx_list[i].sis_mbuf);
2011 sc->sis_ldata->sis_rx_list[i].sis_mbuf = NULL;
2012 }
2013 bzero(&sc->sis_ldata->sis_rx_list[i],
2014 sizeof(struct sis_desc) - sizeof(bus_dmamap_t));
2015 }
2016
2017 /*
2018 * Free the TX list buffers.
2019 */
2020 for (i = 0; i < SIS_TX_LIST_CNT; i++) {
2021 if (sc->sis_ldata->sis_tx_list[i].map->dm_nsegs != 0) {
2022 bus_dmamap_t map = sc->sis_ldata->sis_tx_list[i].map;
2023
2024 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2025 BUS_DMASYNC_POSTWRITE);
2026 bus_dmamap_unload(sc->sc_dmat, map);
2027 }
2028 if (sc->sis_ldata->sis_tx_list[i].sis_mbuf != NULL) {
2029 m_freem(sc->sis_ldata->sis_tx_list[i].sis_mbuf);
2030 sc->sis_ldata->sis_tx_list[i].sis_mbuf = NULL;
2031 }
2032 bzero(&sc->sis_ldata->sis_tx_list[i],
2033 sizeof(struct sis_desc) - sizeof(bus_dmamap_t));
2034 }
2035 }
2036