1 /*	$NetBSD: rtl81x9.c,v 1.100 2016/06/10 13:27:13 ozaki-r Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1998
5  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  *	FreeBSD Id: if_rl.c,v 1.17 1999/06/19 20:17:37 wpaul Exp
35  */
36 
37 /*
38  * RealTek 8129/8139 PCI NIC driver
39  *
40  * Supports several extremely cheap PCI 10/100 adapters based on
41  * the RealTek chipset. Datasheets can be obtained from
42  * www.realtek.com.tw.
43  *
44  * Written by Bill Paul <wpaul@ctr.columbia.edu>
45  * Electrical Engineering Department
46  * Columbia University, New York City
47  */
48 
49 /*
50  * The RealTek 8139 PCI NIC redefines the meaning of 'low end.' This is
51  * probably the worst PCI ethernet controller ever made, with the possible
52  * exception of the FEAST chip made by SMC. The 8139 supports bus-master
53  * DMA, but it has a terrible interface that nullifies any performance
54  * gains that bus-master DMA usually offers.
55  *
56  * For transmission, the chip offers a series of four TX descriptor
57  * registers. Each transmit frame must be in a contiguous buffer, aligned
58  * on a longword (32-bit) boundary. This means we almost always have to
59  * do mbuf copies in order to transmit a frame, except in the unlikely
60  * case where a) the packet fits into a single mbuf, and b) the packet
61  * is 32-bit aligned within the mbuf's data area. The presence of only
62  * four descriptor registers means that we can never have more than four
63  * packets queued for transmission at any one time.
64  *
65  * Reception is not much better. The driver has to allocate a single large
66  * buffer area (up to 64K in size) into which the chip will DMA received
67  * frames. Because we don't know where within this region received packets
68  * will begin or end, we have no choice but to copy data from the buffer
69  * area into mbufs in order to pass the packets up to the higher protocol
70  * levels.
71  *
72  * It's impossible given this rotten design to really achieve decent
73  * performance at 100Mbps, unless you happen to have a 400MHz PII or
74  * some equally overmuscled CPU to drive it.
75  *
76  * On the bright side, the 8139 does have a built-in PHY, although
77  * rather than using an MDIO serial interface like most other NICs, the
78  * PHY registers are directly accessible through the 8139's register
79  * space. The 8139 supports autonegotiation, as well as a 64-bit multicast
80  * filter.
81  *
82  * The 8129 chip is an older version of the 8139 that uses an external PHY
83  * chip. The 8129 has a serial MDIO interface for accessing the MII where
84  * the 8139 lets you directly access the on-board PHY registers. We need
85  * to select which interface to use depending on the chip type.
86  */
87 
88 #include <sys/cdefs.h>
89 __KERNEL_RCSID(0, "$NetBSD: rtl81x9.c,v 1.100 2016/06/10 13:27:13 ozaki-r Exp $");
90 
91 
92 #include <sys/param.h>
93 #include <sys/systm.h>
94 #include <sys/callout.h>
95 #include <sys/device.h>
96 #include <sys/sockio.h>
97 #include <sys/mbuf.h>
98 #include <sys/malloc.h>
99 #include <sys/kernel.h>
100 #include <sys/socket.h>
101 
102 #include <net/if.h>
103 #include <net/if_arp.h>
104 #include <net/if_ether.h>
105 #include <net/if_dl.h>
106 #include <net/if_media.h>
107 
108 #include <net/bpf.h>
109 #include <sys/rndsource.h>
110 
111 #include <sys/bus.h>
112 #include <machine/endian.h>
113 
114 #include <dev/mii/mii.h>
115 #include <dev/mii/miivar.h>
116 
117 #include <dev/ic/rtl81x9reg.h>
118 #include <dev/ic/rtl81x9var.h>
119 
120 static void rtk_reset(struct rtk_softc *);
121 static void rtk_rxeof(struct rtk_softc *);
122 static void rtk_txeof(struct rtk_softc *);
123 static void rtk_start(struct ifnet *);
124 static int rtk_ioctl(struct ifnet *, u_long, void *);
125 static int rtk_init(struct ifnet *);
126 static void rtk_stop(struct ifnet *, int);
127 
128 static void rtk_watchdog(struct ifnet *);
129 
130 static void rtk_eeprom_putbyte(struct rtk_softc *, int, int);
131 static void rtk_mii_sync(struct rtk_softc *);
132 static void rtk_mii_send(struct rtk_softc *, uint32_t, int);
133 static int rtk_mii_readreg(struct rtk_softc *, struct rtk_mii_frame *);
134 static int rtk_mii_writereg(struct rtk_softc *, struct rtk_mii_frame *);
135 
136 static int rtk_phy_readreg(device_t, int, int);
137 static void rtk_phy_writereg(device_t, int, int, int);
138 static void rtk_phy_statchg(struct ifnet *);
139 static void rtk_tick(void *);
140 
141 static int rtk_enable(struct rtk_softc *);
142 static void rtk_disable(struct rtk_softc *);
143 
144 static void rtk_list_tx_init(struct rtk_softc *);
145 
146 #define EE_SET(x)					\
147 	CSR_WRITE_1(sc, RTK_EECMD,			\
148 		CSR_READ_1(sc, RTK_EECMD) | (x))
149 
150 #define EE_CLR(x)					\
151 	CSR_WRITE_1(sc, RTK_EECMD,			\
152 		CSR_READ_1(sc, RTK_EECMD) & ~(x))
153 
154 #define EE_DELAY()	DELAY(100)
155 
156 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
157 
158 /*
159  * Send a read command and address to the EEPROM, check for ACK.
160  */
161 static void
rtk_eeprom_putbyte(struct rtk_softc * sc,int addr,int addr_len)162 rtk_eeprom_putbyte(struct rtk_softc *sc, int addr, int addr_len)
163 {
164 	int d, i;
165 
166 	d = (RTK_EECMD_READ << addr_len) | addr;
167 
168 	/*
169 	 * Feed in each bit and stobe the clock.
170 	 */
171 	for (i = RTK_EECMD_LEN + addr_len; i > 0; i--) {
172 		if (d & (1 << (i - 1))) {
173 			EE_SET(RTK_EE_DATAIN);
174 		} else {
175 			EE_CLR(RTK_EE_DATAIN);
176 		}
177 		EE_DELAY();
178 		EE_SET(RTK_EE_CLK);
179 		EE_DELAY();
180 		EE_CLR(RTK_EE_CLK);
181 		EE_DELAY();
182 	}
183 }
184 
185 /*
186  * Read a word of data stored in the EEPROM at address 'addr.'
187  */
188 uint16_t
rtk_read_eeprom(struct rtk_softc * sc,int addr,int addr_len)189 rtk_read_eeprom(struct rtk_softc *sc, int addr, int addr_len)
190 {
191 	uint16_t word;
192 	int i;
193 
194 	/* Enter EEPROM access mode. */
195 	CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_PROGRAM);
196 	EE_DELAY();
197 	EE_SET(RTK_EE_SEL);
198 
199 	/*
200 	 * Send address of word we want to read.
201 	 */
202 	rtk_eeprom_putbyte(sc, addr, addr_len);
203 
204 	/*
205 	 * Start reading bits from EEPROM.
206 	 */
207 	word = 0;
208 	for (i = 16; i > 0; i--) {
209 		EE_SET(RTK_EE_CLK);
210 		EE_DELAY();
211 		if (CSR_READ_1(sc, RTK_EECMD) & RTK_EE_DATAOUT)
212 			word |= 1 << (i - 1);
213 		EE_CLR(RTK_EE_CLK);
214 		EE_DELAY();
215 	}
216 
217 	/* Turn off EEPROM access mode. */
218 	CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_OFF);
219 
220 	return word;
221 }
222 
223 /*
224  * MII access routines are provided for the 8129, which
225  * doesn't have a built-in PHY. For the 8139, we fake things
226  * up by diverting rtk_phy_readreg()/rtk_phy_writereg() to the
227  * direct access PHY registers.
228  */
229 #define MII_SET(x)					\
230 	CSR_WRITE_1(sc, RTK_MII,			\
231 		CSR_READ_1(sc, RTK_MII) | (x))
232 
233 #define MII_CLR(x)					\
234 	CSR_WRITE_1(sc, RTK_MII,			\
235 		CSR_READ_1(sc, RTK_MII) & ~(x))
236 
237 /*
238  * Sync the PHYs by setting data bit and strobing the clock 32 times.
239  */
240 static void
rtk_mii_sync(struct rtk_softc * sc)241 rtk_mii_sync(struct rtk_softc *sc)
242 {
243 	int i;
244 
245 	MII_SET(RTK_MII_DIR|RTK_MII_DATAOUT);
246 
247 	for (i = 0; i < 32; i++) {
248 		MII_SET(RTK_MII_CLK);
249 		DELAY(1);
250 		MII_CLR(RTK_MII_CLK);
251 		DELAY(1);
252 	}
253 }
254 
255 /*
256  * Clock a series of bits through the MII.
257  */
258 static void
rtk_mii_send(struct rtk_softc * sc,uint32_t bits,int cnt)259 rtk_mii_send(struct rtk_softc *sc, uint32_t bits, int cnt)
260 {
261 	int i;
262 
263 	MII_CLR(RTK_MII_CLK);
264 
265 	for (i = cnt; i > 0; i--) {
266 		if (bits & (1 << (i - 1))) {
267 			MII_SET(RTK_MII_DATAOUT);
268 		} else {
269 			MII_CLR(RTK_MII_DATAOUT);
270 		}
271 		DELAY(1);
272 		MII_CLR(RTK_MII_CLK);
273 		DELAY(1);
274 		MII_SET(RTK_MII_CLK);
275 	}
276 }
277 
278 /*
279  * Read an PHY register through the MII.
280  */
281 static int
rtk_mii_readreg(struct rtk_softc * sc,struct rtk_mii_frame * frame)282 rtk_mii_readreg(struct rtk_softc *sc, struct rtk_mii_frame *frame)
283 {
284 	int i, ack, s;
285 
286 	s = splnet();
287 
288 	/*
289 	 * Set up frame for RX.
290 	 */
291 	frame->mii_stdelim = RTK_MII_STARTDELIM;
292 	frame->mii_opcode = RTK_MII_READOP;
293 	frame->mii_turnaround = 0;
294 	frame->mii_data = 0;
295 
296 	CSR_WRITE_2(sc, RTK_MII, 0);
297 
298 	/*
299 	 * Turn on data xmit.
300 	 */
301 	MII_SET(RTK_MII_DIR);
302 
303 	rtk_mii_sync(sc);
304 
305 	/*
306 	 * Send command/address info.
307 	 */
308 	rtk_mii_send(sc, frame->mii_stdelim, 2);
309 	rtk_mii_send(sc, frame->mii_opcode, 2);
310 	rtk_mii_send(sc, frame->mii_phyaddr, 5);
311 	rtk_mii_send(sc, frame->mii_regaddr, 5);
312 
313 	/* Idle bit */
314 	MII_CLR((RTK_MII_CLK|RTK_MII_DATAOUT));
315 	DELAY(1);
316 	MII_SET(RTK_MII_CLK);
317 	DELAY(1);
318 
319 	/* Turn off xmit. */
320 	MII_CLR(RTK_MII_DIR);
321 
322 	/* Check for ack */
323 	MII_CLR(RTK_MII_CLK);
324 	DELAY(1);
325 	ack = CSR_READ_2(sc, RTK_MII) & RTK_MII_DATAIN;
326 	MII_SET(RTK_MII_CLK);
327 	DELAY(1);
328 
329 	/*
330 	 * Now try reading data bits. If the ack failed, we still
331 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
332 	 */
333 	if (ack) {
334 		for (i = 0; i < 16; i++) {
335 			MII_CLR(RTK_MII_CLK);
336 			DELAY(1);
337 			MII_SET(RTK_MII_CLK);
338 			DELAY(1);
339 		}
340 		goto fail;
341 	}
342 
343 	for (i = 16; i > 0; i--) {
344 		MII_CLR(RTK_MII_CLK);
345 		DELAY(1);
346 		if (!ack) {
347 			if (CSR_READ_2(sc, RTK_MII) & RTK_MII_DATAIN)
348 				frame->mii_data |= 1 << (i - 1);
349 			DELAY(1);
350 		}
351 		MII_SET(RTK_MII_CLK);
352 		DELAY(1);
353 	}
354 
355  fail:
356 	MII_CLR(RTK_MII_CLK);
357 	DELAY(1);
358 	MII_SET(RTK_MII_CLK);
359 	DELAY(1);
360 
361 	splx(s);
362 
363 	if (ack)
364 		return 1;
365 	return 0;
366 }
367 
368 /*
369  * Write to a PHY register through the MII.
370  */
371 static int
rtk_mii_writereg(struct rtk_softc * sc,struct rtk_mii_frame * frame)372 rtk_mii_writereg(struct rtk_softc *sc, struct rtk_mii_frame *frame)
373 {
374 	int s;
375 
376 	s = splnet();
377 	/*
378 	 * Set up frame for TX.
379 	 */
380 	frame->mii_stdelim = RTK_MII_STARTDELIM;
381 	frame->mii_opcode = RTK_MII_WRITEOP;
382 	frame->mii_turnaround = RTK_MII_TURNAROUND;
383 
384 	/*
385 	 * Turn on data output.
386 	 */
387 	MII_SET(RTK_MII_DIR);
388 
389 	rtk_mii_sync(sc);
390 
391 	rtk_mii_send(sc, frame->mii_stdelim, 2);
392 	rtk_mii_send(sc, frame->mii_opcode, 2);
393 	rtk_mii_send(sc, frame->mii_phyaddr, 5);
394 	rtk_mii_send(sc, frame->mii_regaddr, 5);
395 	rtk_mii_send(sc, frame->mii_turnaround, 2);
396 	rtk_mii_send(sc, frame->mii_data, 16);
397 
398 	/* Idle bit. */
399 	MII_SET(RTK_MII_CLK);
400 	DELAY(1);
401 	MII_CLR(RTK_MII_CLK);
402 	DELAY(1);
403 
404 	/*
405 	 * Turn off xmit.
406 	 */
407 	MII_CLR(RTK_MII_DIR);
408 
409 	splx(s);
410 
411 	return 0;
412 }
413 
414 static int
rtk_phy_readreg(device_t self,int phy,int reg)415 rtk_phy_readreg(device_t self, int phy, int reg)
416 {
417 	struct rtk_softc *sc = device_private(self);
418 	struct rtk_mii_frame frame;
419 	int rval;
420 	int rtk8139_reg;
421 
422 	if ((sc->sc_quirk & RTKQ_8129) == 0) {
423 		if (phy != 7)
424 			return 0;
425 
426 		switch (reg) {
427 		case MII_BMCR:
428 			rtk8139_reg = RTK_BMCR;
429 			break;
430 		case MII_BMSR:
431 			rtk8139_reg = RTK_BMSR;
432 			break;
433 		case MII_ANAR:
434 			rtk8139_reg = RTK_ANAR;
435 			break;
436 		case MII_ANER:
437 			rtk8139_reg = RTK_ANER;
438 			break;
439 		case MII_ANLPAR:
440 			rtk8139_reg = RTK_LPAR;
441 			break;
442 		default:
443 #if 0
444 			printf("%s: bad phy register\n", device_xname(self));
445 #endif
446 			return 0;
447 		}
448 		rval = CSR_READ_2(sc, rtk8139_reg);
449 		return rval;
450 	}
451 
452 	memset(&frame, 0, sizeof(frame));
453 
454 	frame.mii_phyaddr = phy;
455 	frame.mii_regaddr = reg;
456 	rtk_mii_readreg(sc, &frame);
457 
458 	return frame.mii_data;
459 }
460 
461 static void
rtk_phy_writereg(device_t self,int phy,int reg,int data)462 rtk_phy_writereg(device_t self, int phy, int reg, int data)
463 {
464 	struct rtk_softc *sc = device_private(self);
465 	struct rtk_mii_frame frame;
466 	int rtk8139_reg;
467 
468 	if ((sc->sc_quirk & RTKQ_8129) == 0) {
469 		if (phy != 7)
470 			return;
471 
472 		switch (reg) {
473 		case MII_BMCR:
474 			rtk8139_reg = RTK_BMCR;
475 			break;
476 		case MII_BMSR:
477 			rtk8139_reg = RTK_BMSR;
478 			break;
479 		case MII_ANAR:
480 			rtk8139_reg = RTK_ANAR;
481 			break;
482 		case MII_ANER:
483 			rtk8139_reg = RTK_ANER;
484 			break;
485 		case MII_ANLPAR:
486 			rtk8139_reg = RTK_LPAR;
487 			break;
488 		default:
489 #if 0
490 			printf("%s: bad phy register\n", device_xname(self));
491 #endif
492 			return;
493 		}
494 		CSR_WRITE_2(sc, rtk8139_reg, data);
495 		return;
496 	}
497 
498 	memset(&frame, 0, sizeof(frame));
499 
500 	frame.mii_phyaddr = phy;
501 	frame.mii_regaddr = reg;
502 	frame.mii_data = data;
503 
504 	rtk_mii_writereg(sc, &frame);
505 }
506 
507 static void
rtk_phy_statchg(struct ifnet * ifp)508 rtk_phy_statchg(struct ifnet *ifp)
509 {
510 
511 	/* Nothing to do. */
512 }
513 
514 #define	rtk_calchash(addr) \
515 	(ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26)
516 
517 /*
518  * Program the 64-bit multicast hash filter.
519  */
520 void
rtk_setmulti(struct rtk_softc * sc)521 rtk_setmulti(struct rtk_softc *sc)
522 {
523 	struct ifnet *ifp;
524 	uint32_t hashes[2] = { 0, 0 };
525 	uint32_t rxfilt;
526 	struct ether_multi *enm;
527 	struct ether_multistep step;
528 	int h, mcnt;
529 
530 	ifp = &sc->ethercom.ec_if;
531 
532 	rxfilt = CSR_READ_4(sc, RTK_RXCFG);
533 
534 	if (ifp->if_flags & IFF_PROMISC) {
535  allmulti:
536 		ifp->if_flags |= IFF_ALLMULTI;
537 		rxfilt |= RTK_RXCFG_RX_MULTI;
538 		CSR_WRITE_4(sc, RTK_RXCFG, rxfilt);
539 		CSR_WRITE_4(sc, RTK_MAR0, 0xFFFFFFFF);
540 		CSR_WRITE_4(sc, RTK_MAR4, 0xFFFFFFFF);
541 		return;
542 	}
543 
544 	/* first, zot all the existing hash bits */
545 	CSR_WRITE_4(sc, RTK_MAR0, 0);
546 	CSR_WRITE_4(sc, RTK_MAR4, 0);
547 
548 	/* now program new ones */
549 	ETHER_FIRST_MULTI(step, &sc->ethercom, enm);
550 	mcnt = 0;
551 	while (enm != NULL) {
552 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
553 		    ETHER_ADDR_LEN) != 0)
554 			goto allmulti;
555 
556 		h = rtk_calchash(enm->enm_addrlo);
557 		if (h < 32)
558 			hashes[0] |= (1 << h);
559 		else
560 			hashes[1] |= (1 << (h - 32));
561 		mcnt++;
562 		ETHER_NEXT_MULTI(step, enm);
563 	}
564 
565 	ifp->if_flags &= ~IFF_ALLMULTI;
566 
567 	if (mcnt)
568 		rxfilt |= RTK_RXCFG_RX_MULTI;
569 	else
570 		rxfilt &= ~RTK_RXCFG_RX_MULTI;
571 
572 	CSR_WRITE_4(sc, RTK_RXCFG, rxfilt);
573 
574 	/*
575 	 * For some unfathomable reason, RealTek decided to reverse
576 	 * the order of the multicast hash registers in the PCI Express
577 	 * parts. This means we have to write the hash pattern in reverse
578 	 * order for those devices.
579 	 */
580 	if ((sc->sc_quirk & RTKQ_PCIE) != 0) {
581 		CSR_WRITE_4(sc, RTK_MAR0, bswap32(hashes[1]));
582 		CSR_WRITE_4(sc, RTK_MAR4, bswap32(hashes[0]));
583 	} else {
584 		CSR_WRITE_4(sc, RTK_MAR0, hashes[0]);
585 		CSR_WRITE_4(sc, RTK_MAR4, hashes[1]);
586 	}
587 }
588 
589 void
rtk_reset(struct rtk_softc * sc)590 rtk_reset(struct rtk_softc *sc)
591 {
592 	int i;
593 
594 	CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_RESET);
595 
596 	for (i = 0; i < RTK_TIMEOUT; i++) {
597 		DELAY(10);
598 		if ((CSR_READ_1(sc, RTK_COMMAND) & RTK_CMD_RESET) == 0)
599 			break;
600 	}
601 	if (i == RTK_TIMEOUT)
602 		printf("%s: reset never completed!\n",
603 		    device_xname(sc->sc_dev));
604 }
605 
606 /*
607  * Attach the interface. Allocate softc structures, do ifmedia
608  * setup and ethernet/BPF attach.
609  */
610 void
rtk_attach(struct rtk_softc * sc)611 rtk_attach(struct rtk_softc *sc)
612 {
613 	device_t self = sc->sc_dev;
614 	struct ifnet *ifp;
615 	struct rtk_tx_desc *txd;
616 	uint16_t val;
617 	uint8_t eaddr[ETHER_ADDR_LEN];
618 	int error;
619 	int i, addr_len;
620 
621 	callout_init(&sc->rtk_tick_ch, 0);
622 
623 	/*
624 	 * Check EEPROM type 9346 or 9356.
625 	 */
626 	if (rtk_read_eeprom(sc, RTK_EE_ID, RTK_EEADDR_LEN1) == 0x8129)
627 		addr_len = RTK_EEADDR_LEN1;
628 	else
629 		addr_len = RTK_EEADDR_LEN0;
630 
631 	/*
632 	 * Get station address.
633 	 */
634 	val = rtk_read_eeprom(sc, RTK_EE_EADDR0, addr_len);
635 	eaddr[0] = val & 0xff;
636 	eaddr[1] = val >> 8;
637 	val = rtk_read_eeprom(sc, RTK_EE_EADDR1, addr_len);
638 	eaddr[2] = val & 0xff;
639 	eaddr[3] = val >> 8;
640 	val = rtk_read_eeprom(sc, RTK_EE_EADDR2, addr_len);
641 	eaddr[4] = val & 0xff;
642 	eaddr[5] = val >> 8;
643 
644 	if ((error = bus_dmamem_alloc(sc->sc_dmat,
645 	    RTK_RXBUFLEN + 16, PAGE_SIZE, 0, &sc->sc_dmaseg, 1, &sc->sc_dmanseg,
646 	    BUS_DMA_NOWAIT)) != 0) {
647 		aprint_error_dev(self,
648 		    "can't allocate recv buffer, error = %d\n", error);
649 		goto fail_0;
650 	}
651 
652 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg,
653 	    RTK_RXBUFLEN + 16, (void **)&sc->rtk_rx_buf,
654 	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
655 		aprint_error_dev(self,
656 		    "can't map recv buffer, error = %d\n", error);
657 		goto fail_1;
658 	}
659 
660 	if ((error = bus_dmamap_create(sc->sc_dmat,
661 	    RTK_RXBUFLEN + 16, 1, RTK_RXBUFLEN + 16, 0, BUS_DMA_NOWAIT,
662 	    &sc->recv_dmamap)) != 0) {
663 		aprint_error_dev(self,
664 		    "can't create recv buffer DMA map, error = %d\n", error);
665 		goto fail_2;
666 	}
667 
668 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->recv_dmamap,
669 	    sc->rtk_rx_buf, RTK_RXBUFLEN + 16,
670 	    NULL, BUS_DMA_READ|BUS_DMA_NOWAIT)) != 0) {
671 		aprint_error_dev(self,
672 		    "can't load recv buffer DMA map, error = %d\n", error);
673 		goto fail_3;
674 	}
675 
676 	for (i = 0; i < RTK_TX_LIST_CNT; i++) {
677 		txd = &sc->rtk_tx_descs[i];
678 		if ((error = bus_dmamap_create(sc->sc_dmat,
679 		    MCLBYTES, 1, MCLBYTES, 0, BUS_DMA_NOWAIT,
680 		    &txd->txd_dmamap)) != 0) {
681 			aprint_error_dev(self,
682 			    "can't create snd buffer DMA map, error = %d\n",
683 			    error);
684 			goto fail_4;
685 		}
686 		txd->txd_txaddr = RTK_TXADDR0 + (i * 4);
687 		txd->txd_txstat = RTK_TXSTAT0 + (i * 4);
688 	}
689 	SIMPLEQ_INIT(&sc->rtk_tx_free);
690 	SIMPLEQ_INIT(&sc->rtk_tx_dirty);
691 
692 	/*
693 	 * From this point forward, the attachment cannot fail. A failure
694 	 * before this releases all resources thar may have been
695 	 * allocated.
696 	 */
697 	sc->sc_flags |= RTK_ATTACHED;
698 
699 	/* Reset the adapter. */
700 	rtk_reset(sc);
701 
702 	aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(eaddr));
703 
704 	ifp = &sc->ethercom.ec_if;
705 	ifp->if_softc = sc;
706 	strcpy(ifp->if_xname, device_xname(self));
707 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
708 	ifp->if_ioctl = rtk_ioctl;
709 	ifp->if_start = rtk_start;
710 	ifp->if_watchdog = rtk_watchdog;
711 	ifp->if_init = rtk_init;
712 	ifp->if_stop = rtk_stop;
713 	IFQ_SET_READY(&ifp->if_snd);
714 
715 	/*
716 	 * Do ifmedia setup.
717 	 */
718 	sc->mii.mii_ifp = ifp;
719 	sc->mii.mii_readreg = rtk_phy_readreg;
720 	sc->mii.mii_writereg = rtk_phy_writereg;
721 	sc->mii.mii_statchg = rtk_phy_statchg;
722 	sc->ethercom.ec_mii = &sc->mii;
723 	ifmedia_init(&sc->mii.mii_media, IFM_IMASK, ether_mediachange,
724 	    ether_mediastatus);
725 	mii_attach(self, &sc->mii, 0xffffffff,
726 	    MII_PHY_ANY, MII_OFFSET_ANY, 0);
727 
728 	/* Choose a default media. */
729 	if (LIST_FIRST(&sc->mii.mii_phys) == NULL) {
730 		ifmedia_add(&sc->mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
731 		ifmedia_set(&sc->mii.mii_media, IFM_ETHER|IFM_NONE);
732 	} else {
733 		ifmedia_set(&sc->mii.mii_media, IFM_ETHER|IFM_AUTO);
734 	}
735 
736 	/*
737 	 * Call MI attach routines.
738 	 */
739 	if_attach(ifp);
740 	ether_ifattach(ifp, eaddr);
741 
742 	rnd_attach_source(&sc->rnd_source, device_xname(self),
743 	    RND_TYPE_NET, RND_FLAG_DEFAULT);
744 
745 	return;
746  fail_4:
747 	for (i = 0; i < RTK_TX_LIST_CNT; i++) {
748 		txd = &sc->rtk_tx_descs[i];
749 		if (txd->txd_dmamap != NULL)
750 			bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmamap);
751 	}
752  fail_3:
753 	bus_dmamap_destroy(sc->sc_dmat, sc->recv_dmamap);
754  fail_2:
755 	bus_dmamem_unmap(sc->sc_dmat, sc->rtk_rx_buf,
756 	    RTK_RXBUFLEN + 16);
757  fail_1:
758 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg);
759  fail_0:
760 	return;
761 }
762 
763 /*
764  * Initialize the transmit descriptors.
765  */
766 static void
rtk_list_tx_init(struct rtk_softc * sc)767 rtk_list_tx_init(struct rtk_softc *sc)
768 {
769 	struct rtk_tx_desc *txd;
770 	int i;
771 
772 	while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_dirty)) != NULL)
773 		SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_dirty, txd_q);
774 	while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_free)) != NULL)
775 		SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_free, txd_q);
776 
777 	for (i = 0; i < RTK_TX_LIST_CNT; i++) {
778 		txd = &sc->rtk_tx_descs[i];
779 		CSR_WRITE_4(sc, txd->txd_txaddr, 0);
780 		SIMPLEQ_INSERT_TAIL(&sc->rtk_tx_free, txd, txd_q);
781 	}
782 }
783 
784 /*
785  * rtk_activate:
786  *     Handle device activation/deactivation requests.
787  */
788 int
rtk_activate(device_t self,enum devact act)789 rtk_activate(device_t self, enum devact act)
790 {
791 	struct rtk_softc *sc = device_private(self);
792 
793 	switch (act) {
794 	case DVACT_DEACTIVATE:
795 		if_deactivate(&sc->ethercom.ec_if);
796 		return 0;
797 	default:
798 		return EOPNOTSUPP;
799 	}
800 }
801 
802 /*
803  * rtk_detach:
804  *     Detach a rtk interface.
805  */
806 int
rtk_detach(struct rtk_softc * sc)807 rtk_detach(struct rtk_softc *sc)
808 {
809 	struct ifnet *ifp = &sc->ethercom.ec_if;
810 	struct rtk_tx_desc *txd;
811 	int i;
812 
813 	/*
814 	 * Succeed now if there isn't any work to do.
815 	 */
816 	if ((sc->sc_flags & RTK_ATTACHED) == 0)
817 		return 0;
818 
819 	/* Unhook our tick handler. */
820 	callout_stop(&sc->rtk_tick_ch);
821 
822 	/* Detach all PHYs. */
823 	mii_detach(&sc->mii, MII_PHY_ANY, MII_OFFSET_ANY);
824 
825 	/* Delete all remaining media. */
826 	ifmedia_delete_instance(&sc->mii.mii_media, IFM_INST_ANY);
827 
828 	rnd_detach_source(&sc->rnd_source);
829 
830 	ether_ifdetach(ifp);
831 	if_detach(ifp);
832 
833 	for (i = 0; i < RTK_TX_LIST_CNT; i++) {
834 		txd = &sc->rtk_tx_descs[i];
835 		if (txd->txd_dmamap != NULL)
836 			bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmamap);
837 	}
838 	bus_dmamap_destroy(sc->sc_dmat, sc->recv_dmamap);
839 	bus_dmamem_unmap(sc->sc_dmat, sc->rtk_rx_buf,
840 	    RTK_RXBUFLEN + 16);
841 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg);
842 
843 	/* we don't want to run again */
844 	sc->sc_flags &= ~RTK_ATTACHED;
845 
846 	return 0;
847 }
848 
849 /*
850  * rtk_enable:
851  *     Enable the RTL81X9 chip.
852  */
853 int
rtk_enable(struct rtk_softc * sc)854 rtk_enable(struct rtk_softc *sc)
855 {
856 
857 	if (RTK_IS_ENABLED(sc) == 0 && sc->sc_enable != NULL) {
858 		if ((*sc->sc_enable)(sc) != 0) {
859 			printf("%s: device enable failed\n",
860 			    device_xname(sc->sc_dev));
861 			return EIO;
862 		}
863 		sc->sc_flags |= RTK_ENABLED;
864 	}
865 	return 0;
866 }
867 
868 /*
869  * rtk_disable:
870  *     Disable the RTL81X9 chip.
871  */
872 void
rtk_disable(struct rtk_softc * sc)873 rtk_disable(struct rtk_softc *sc)
874 {
875 
876 	if (RTK_IS_ENABLED(sc) && sc->sc_disable != NULL) {
877 		(*sc->sc_disable)(sc);
878 		sc->sc_flags &= ~RTK_ENABLED;
879 	}
880 }
881 
882 /*
883  * A frame has been uploaded: pass the resulting mbuf chain up to
884  * the higher level protocols.
885  *
886  * You know there's something wrong with a PCI bus-master chip design.
887  *
888  * The receive operation is badly documented in the datasheet, so I'll
889  * attempt to document it here. The driver provides a buffer area and
890  * places its base address in the RX buffer start address register.
891  * The chip then begins copying frames into the RX buffer. Each frame
892  * is preceded by a 32-bit RX status word which specifies the length
893  * of the frame and certain other status bits. Each frame (starting with
894  * the status word) is also 32-bit aligned. The frame length is in the
895  * first 16 bits of the status word; the lower 15 bits correspond with
896  * the 'rx status register' mentioned in the datasheet.
897  *
898  * Note: to make the Alpha happy, the frame payload needs to be aligned
899  * on a 32-bit boundary. To achieve this, we copy the data to mbuf
900  * shifted forward 2 bytes.
901  */
902 static void
rtk_rxeof(struct rtk_softc * sc)903 rtk_rxeof(struct rtk_softc *sc)
904 {
905 	struct mbuf *m;
906 	struct ifnet *ifp;
907 	uint8_t *rxbufpos, *dst;
908 	u_int total_len, wrap;
909 	uint32_t rxstat;
910 	uint16_t cur_rx, new_rx;
911 	uint16_t limit;
912 	uint16_t rx_bytes, max_bytes;
913 
914 	ifp = &sc->ethercom.ec_if;
915 
916 	cur_rx = (CSR_READ_2(sc, RTK_CURRXADDR) + 16) % RTK_RXBUFLEN;
917 
918 	/* Do not try to read past this point. */
919 	limit = CSR_READ_2(sc, RTK_CURRXBUF) % RTK_RXBUFLEN;
920 
921 	if (limit < cur_rx)
922 		max_bytes = (RTK_RXBUFLEN - cur_rx) + limit;
923 	else
924 		max_bytes = limit - cur_rx;
925 	rx_bytes = 0;
926 
927 	while ((CSR_READ_1(sc, RTK_COMMAND) & RTK_CMD_EMPTY_RXBUF) == 0) {
928 		rxbufpos = sc->rtk_rx_buf + cur_rx;
929 		bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, cur_rx,
930 		    RTK_RXSTAT_LEN, BUS_DMASYNC_POSTREAD);
931 		rxstat = le32toh(*(uint32_t *)rxbufpos);
932 		bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, cur_rx,
933 		    RTK_RXSTAT_LEN, BUS_DMASYNC_PREREAD);
934 
935 		/*
936 		 * Here's a totally undocumented fact for you. When the
937 		 * RealTek chip is in the process of copying a packet into
938 		 * RAM for you, the length will be 0xfff0. If you spot a
939 		 * packet header with this value, you need to stop. The
940 		 * datasheet makes absolutely no mention of this and
941 		 * RealTek should be shot for this.
942 		 */
943 		total_len = rxstat >> 16;
944 		if (total_len == RTK_RXSTAT_UNFINISHED)
945 			break;
946 
947 		if ((rxstat & RTK_RXSTAT_RXOK) == 0 ||
948 		    total_len < ETHER_MIN_LEN ||
949 		    total_len > (MCLBYTES - RTK_ETHER_ALIGN)) {
950 			ifp->if_ierrors++;
951 
952 			/*
953 			 * submitted by:[netbsd-pcmcia:00484]
954 			 *	Takahiro Kambe <taca@sky.yamashina.kyoto.jp>
955 			 * obtain from:
956 			 *     FreeBSD if_rl.c rev 1.24->1.25
957 			 *
958 			 */
959 #if 0
960 			if (rxstat & (RTK_RXSTAT_BADSYM|RTK_RXSTAT_RUNT|
961 			    RTK_RXSTAT_GIANT|RTK_RXSTAT_CRCERR|
962 			    RTK_RXSTAT_ALIGNERR)) {
963 				CSR_WRITE_2(sc, RTK_COMMAND, RTK_CMD_TX_ENB);
964 				CSR_WRITE_2(sc, RTK_COMMAND,
965 				    RTK_CMD_TX_ENB|RTK_CMD_RX_ENB);
966 				CSR_WRITE_4(sc, RTK_RXCFG, RTK_RXCFG_CONFIG);
967 				CSR_WRITE_4(sc, RTK_RXADDR,
968 				    sc->recv_dmamap->dm_segs[0].ds_addr);
969 				cur_rx = 0;
970 			}
971 			break;
972 #else
973 			rtk_init(ifp);
974 			return;
975 #endif
976 		}
977 
978 		/* No errors; receive the packet. */
979 		rx_bytes += total_len + RTK_RXSTAT_LEN;
980 
981 		/*
982 		 * Avoid trying to read more bytes than we know
983 		 * the chip has prepared for us.
984 		 */
985 		if (rx_bytes > max_bytes)
986 			break;
987 
988 		/*
989 		 * Skip the status word, wrapping around to the beginning
990 		 * of the Rx area, if necessary.
991 		 */
992 		cur_rx = (cur_rx + RTK_RXSTAT_LEN) % RTK_RXBUFLEN;
993 		rxbufpos = sc->rtk_rx_buf + cur_rx;
994 
995 		/*
996 		 * Compute the number of bytes at which the packet
997 		 * will wrap to the beginning of the ring buffer.
998 		 */
999 		wrap = RTK_RXBUFLEN - cur_rx;
1000 
1001 		/*
1002 		 * Compute where the next pending packet is.
1003 		 */
1004 		if (total_len > wrap)
1005 			new_rx = total_len - wrap;
1006 		else
1007 			new_rx = cur_rx + total_len;
1008 		/* Round up to 32-bit boundary. */
1009 		new_rx = roundup2(new_rx, sizeof(uint32_t)) % RTK_RXBUFLEN;
1010 
1011 		/*
1012 		 * The RealTek chip includes the CRC with every
1013 		 * incoming packet; trim it off here.
1014 		 */
1015 		total_len -= ETHER_CRC_LEN;
1016 
1017 		/*
1018 		 * Now allocate an mbuf (and possibly a cluster) to hold
1019 		 * the packet. Note we offset the packet 2 bytes so that
1020 		 * data after the Ethernet header will be 4-byte aligned.
1021 		 */
1022 		MGETHDR(m, M_DONTWAIT, MT_DATA);
1023 		if (m == NULL) {
1024 			printf("%s: unable to allocate Rx mbuf\n",
1025 			    device_xname(sc->sc_dev));
1026 			ifp->if_ierrors++;
1027 			goto next_packet;
1028 		}
1029 		if (total_len > (MHLEN - RTK_ETHER_ALIGN)) {
1030 			MCLGET(m, M_DONTWAIT);
1031 			if ((m->m_flags & M_EXT) == 0) {
1032 				printf("%s: unable to allocate Rx cluster\n",
1033 				    device_xname(sc->sc_dev));
1034 				ifp->if_ierrors++;
1035 				m_freem(m);
1036 				m = NULL;
1037 				goto next_packet;
1038 			}
1039 		}
1040 		m->m_data += RTK_ETHER_ALIGN;	/* for alignment */
1041 		m_set_rcvif(m, ifp);
1042 		m->m_pkthdr.len = m->m_len = total_len;
1043 		dst = mtod(m, void *);
1044 
1045 		/*
1046 		 * If the packet wraps, copy up to the wrapping point.
1047 		 */
1048 		if (total_len > wrap) {
1049 			bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap,
1050 			    cur_rx, wrap, BUS_DMASYNC_POSTREAD);
1051 			memcpy(dst, rxbufpos, wrap);
1052 			bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap,
1053 			    cur_rx, wrap, BUS_DMASYNC_PREREAD);
1054 			cur_rx = 0;
1055 			rxbufpos = sc->rtk_rx_buf;
1056 			total_len -= wrap;
1057 			dst += wrap;
1058 		}
1059 
1060 		/*
1061 		 * ...and now the rest.
1062 		 */
1063 		bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap,
1064 		    cur_rx, total_len, BUS_DMASYNC_POSTREAD);
1065 		memcpy(dst, rxbufpos, total_len);
1066 		bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap,
1067 		    cur_rx, total_len, BUS_DMASYNC_PREREAD);
1068 
1069  next_packet:
1070 		CSR_WRITE_2(sc, RTK_CURRXADDR, (new_rx - 16) % RTK_RXBUFLEN);
1071 		cur_rx = new_rx;
1072 
1073 		if (m == NULL)
1074 			continue;
1075 
1076 		ifp->if_ipackets++;
1077 
1078 		bpf_mtap(ifp, m);
1079 		/* pass it on. */
1080 		if_percpuq_enqueue(ifp->if_percpuq, m);
1081 	}
1082 }
1083 
1084 /*
1085  * A frame was downloaded to the chip. It's safe for us to clean up
1086  * the list buffers.
1087  */
1088 static void
rtk_txeof(struct rtk_softc * sc)1089 rtk_txeof(struct rtk_softc *sc)
1090 {
1091 	struct ifnet *ifp;
1092 	struct rtk_tx_desc *txd;
1093 	uint32_t txstat;
1094 
1095 	ifp = &sc->ethercom.ec_if;
1096 
1097 	/*
1098 	 * Go through our tx list and free mbufs for those
1099 	 * frames that have been uploaded.
1100 	 */
1101 	while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_dirty)) != NULL) {
1102 		txstat = CSR_READ_4(sc, txd->txd_txstat);
1103 		if ((txstat & (RTK_TXSTAT_TX_OK|
1104 		    RTK_TXSTAT_TX_UNDERRUN|RTK_TXSTAT_TXABRT)) == 0)
1105 			break;
1106 
1107 		SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_dirty, txd_q);
1108 
1109 		bus_dmamap_sync(sc->sc_dmat, txd->txd_dmamap, 0,
1110 		    txd->txd_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1111 		bus_dmamap_unload(sc->sc_dmat, txd->txd_dmamap);
1112 		m_freem(txd->txd_mbuf);
1113 		txd->txd_mbuf = NULL;
1114 
1115 		ifp->if_collisions += (txstat & RTK_TXSTAT_COLLCNT) >> 24;
1116 
1117 		if (txstat & RTK_TXSTAT_TX_OK)
1118 			ifp->if_opackets++;
1119 		else {
1120 			ifp->if_oerrors++;
1121 
1122 			/*
1123 			 * Increase Early TX threshold if underrun occurred.
1124 			 * Increase step 64 bytes.
1125 			 */
1126 			if (txstat & RTK_TXSTAT_TX_UNDERRUN) {
1127 #ifdef DEBUG
1128 				printf("%s: transmit underrun;",
1129 				    device_xname(sc->sc_dev));
1130 #endif
1131 				if (sc->sc_txthresh < RTK_TXTH_MAX) {
1132 					sc->sc_txthresh += 2;
1133 #ifdef DEBUG
1134 					printf(" new threshold: %d bytes",
1135 					    sc->sc_txthresh * 32);
1136 #endif
1137 				}
1138 #ifdef DEBUG
1139 				printf("\n");
1140 #endif
1141 			}
1142 			if (txstat & (RTK_TXSTAT_TXABRT|RTK_TXSTAT_OUTOFWIN))
1143 				CSR_WRITE_4(sc, RTK_TXCFG, RTK_TXCFG_CONFIG);
1144 		}
1145 		SIMPLEQ_INSERT_TAIL(&sc->rtk_tx_free, txd, txd_q);
1146 		ifp->if_flags &= ~IFF_OACTIVE;
1147 	}
1148 
1149 	/* Clear the timeout timer if there is no pending packet. */
1150 	if (SIMPLEQ_EMPTY(&sc->rtk_tx_dirty))
1151 		ifp->if_timer = 0;
1152 
1153 }
1154 
1155 int
rtk_intr(void * arg)1156 rtk_intr(void *arg)
1157 {
1158 	struct rtk_softc *sc;
1159 	struct ifnet *ifp;
1160 	uint16_t status;
1161 	int handled;
1162 
1163 	sc = arg;
1164 	ifp = &sc->ethercom.ec_if;
1165 
1166 	if (!device_has_power(sc->sc_dev))
1167 		return 0;
1168 
1169 	/* Disable interrupts. */
1170 	CSR_WRITE_2(sc, RTK_IMR, 0x0000);
1171 
1172 	handled = 0;
1173 	for (;;) {
1174 
1175 		status = CSR_READ_2(sc, RTK_ISR);
1176 
1177 		if (status == 0xffff)
1178 			break; /* Card is gone... */
1179 
1180 		if (status)
1181 			CSR_WRITE_2(sc, RTK_ISR, status);
1182 
1183 		if ((status & RTK_INTRS) == 0)
1184 			break;
1185 
1186 		handled = 1;
1187 
1188 		if (status & RTK_ISR_RX_OK)
1189 			rtk_rxeof(sc);
1190 
1191 		if (status & RTK_ISR_RX_ERR)
1192 			rtk_rxeof(sc);
1193 
1194 		if (status & (RTK_ISR_TX_OK|RTK_ISR_TX_ERR))
1195 			rtk_txeof(sc);
1196 
1197 		if (status & RTK_ISR_SYSTEM_ERR) {
1198 			rtk_reset(sc);
1199 			rtk_init(ifp);
1200 		}
1201 	}
1202 
1203 	/* Re-enable interrupts. */
1204 	CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS);
1205 
1206 	if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
1207 		rtk_start(ifp);
1208 
1209 	rnd_add_uint32(&sc->rnd_source, status);
1210 
1211 	return handled;
1212 }
1213 
1214 /*
1215  * Main transmit routine.
1216  */
1217 
1218 static void
rtk_start(struct ifnet * ifp)1219 rtk_start(struct ifnet *ifp)
1220 {
1221 	struct rtk_softc *sc;
1222 	struct rtk_tx_desc *txd;
1223 	struct mbuf *m_head, *m_new;
1224 	int error, len;
1225 
1226 	sc = ifp->if_softc;
1227 
1228 	while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_free)) != NULL) {
1229 		IFQ_POLL(&ifp->if_snd, m_head);
1230 		if (m_head == NULL)
1231 			break;
1232 		m_new = NULL;
1233 
1234 		/*
1235 		 * Load the DMA map.  If this fails, the packet didn't
1236 		 * fit in one DMA segment, and we need to copy.  Note,
1237 		 * the packet must also be aligned.
1238 		 * if the packet is too small, copy it too, so we're sure
1239 		 * so have enough room for the pad buffer.
1240 		 */
1241 		if ((mtod(m_head, uintptr_t) & 3) != 0 ||
1242 		    m_head->m_pkthdr.len < ETHER_PAD_LEN ||
1243 		    bus_dmamap_load_mbuf(sc->sc_dmat, txd->txd_dmamap,
1244 			m_head, BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
1245 			MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1246 			if (m_new == NULL) {
1247 				printf("%s: unable to allocate Tx mbuf\n",
1248 				    device_xname(sc->sc_dev));
1249 				break;
1250 			}
1251 			if (m_head->m_pkthdr.len > MHLEN) {
1252 				MCLGET(m_new, M_DONTWAIT);
1253 				if ((m_new->m_flags & M_EXT) == 0) {
1254 					printf("%s: unable to allocate Tx "
1255 					    "cluster\n",
1256 					    device_xname(sc->sc_dev));
1257 					m_freem(m_new);
1258 					break;
1259 				}
1260 			}
1261 			m_copydata(m_head, 0, m_head->m_pkthdr.len,
1262 			    mtod(m_new, void *));
1263 			m_new->m_pkthdr.len = m_new->m_len =
1264 			    m_head->m_pkthdr.len;
1265 			if (m_head->m_pkthdr.len < ETHER_PAD_LEN) {
1266 				memset(
1267 				    mtod(m_new, char *) + m_head->m_pkthdr.len,
1268 				    0, ETHER_PAD_LEN - m_head->m_pkthdr.len);
1269 				m_new->m_pkthdr.len = m_new->m_len =
1270 				    ETHER_PAD_LEN;
1271 			}
1272 			error = bus_dmamap_load_mbuf(sc->sc_dmat,
1273 			    txd->txd_dmamap, m_new,
1274 			    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1275 			if (error) {
1276 				printf("%s: unable to load Tx buffer, "
1277 				    "error = %d\n",
1278 				    device_xname(sc->sc_dev), error);
1279 				break;
1280 			}
1281 		}
1282 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1283 		/*
1284 		 * If there's a BPF listener, bounce a copy of this frame
1285 		 * to him.
1286 		 */
1287 		bpf_mtap(ifp, m_head);
1288 		if (m_new != NULL) {
1289 			m_freem(m_head);
1290 			m_head = m_new;
1291 		}
1292 		txd->txd_mbuf = m_head;
1293 
1294 		SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_free, txd_q);
1295 		SIMPLEQ_INSERT_TAIL(&sc->rtk_tx_dirty, txd, txd_q);
1296 
1297 		/*
1298 		 * Transmit the frame.
1299 		 */
1300 		bus_dmamap_sync(sc->sc_dmat,
1301 		    txd->txd_dmamap, 0, txd->txd_dmamap->dm_mapsize,
1302 		    BUS_DMASYNC_PREWRITE);
1303 
1304 		len = txd->txd_dmamap->dm_segs[0].ds_len;
1305 
1306 		CSR_WRITE_4(sc, txd->txd_txaddr,
1307 		    txd->txd_dmamap->dm_segs[0].ds_addr);
1308 		CSR_WRITE_4(sc, txd->txd_txstat,
1309 		    RTK_TXSTAT_THRESH(sc->sc_txthresh) | len);
1310 
1311 		/*
1312 		 * Set a timeout in case the chip goes out to lunch.
1313 		 */
1314 		ifp->if_timer = 5;
1315 	}
1316 
1317 	/*
1318 	 * We broke out of the loop because all our TX slots are
1319 	 * full. Mark the NIC as busy until it drains some of the
1320 	 * packets from the queue.
1321 	 */
1322 	if (SIMPLEQ_EMPTY(&sc->rtk_tx_free))
1323 		ifp->if_flags |= IFF_OACTIVE;
1324 }
1325 
1326 static int
rtk_init(struct ifnet * ifp)1327 rtk_init(struct ifnet *ifp)
1328 {
1329 	struct rtk_softc *sc = ifp->if_softc;
1330 	int error, i;
1331 	uint32_t rxcfg;
1332 
1333 	if ((error = rtk_enable(sc)) != 0)
1334 		goto out;
1335 
1336 	/*
1337 	 * Cancel pending I/O.
1338 	 */
1339 	rtk_stop(ifp, 0);
1340 
1341 	/* Init our MAC address */
1342 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
1343 		CSR_WRITE_1(sc, RTK_IDR0 + i, CLLADDR(ifp->if_sadl)[i]);
1344 	}
1345 
1346 	/* Init the RX buffer pointer register. */
1347 	bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, 0,
1348 	    sc->recv_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1349 	CSR_WRITE_4(sc, RTK_RXADDR, sc->recv_dmamap->dm_segs[0].ds_addr);
1350 
1351 	/* Init TX descriptors. */
1352 	rtk_list_tx_init(sc);
1353 
1354 	/* Init Early TX threshold. */
1355 	sc->sc_txthresh = RTK_TXTH_256;
1356 	/*
1357 	 * Enable transmit and receive.
1358 	 */
1359 	CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB|RTK_CMD_RX_ENB);
1360 
1361 	/*
1362 	 * Set the initial TX and RX configuration.
1363 	 */
1364 	CSR_WRITE_4(sc, RTK_TXCFG, RTK_TXCFG_CONFIG);
1365 	CSR_WRITE_4(sc, RTK_RXCFG, RTK_RXCFG_CONFIG);
1366 
1367 	/* Set the individual bit to receive frames for this host only. */
1368 	rxcfg = CSR_READ_4(sc, RTK_RXCFG);
1369 	rxcfg |= RTK_RXCFG_RX_INDIV;
1370 
1371 	/* If we want promiscuous mode, set the allframes bit. */
1372 	if (ifp->if_flags & IFF_PROMISC) {
1373 		rxcfg |= RTK_RXCFG_RX_ALLPHYS;
1374 		CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1375 	} else {
1376 		rxcfg &= ~RTK_RXCFG_RX_ALLPHYS;
1377 		CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1378 	}
1379 
1380 	/*
1381 	 * Set capture broadcast bit to capture broadcast frames.
1382 	 */
1383 	if (ifp->if_flags & IFF_BROADCAST) {
1384 		rxcfg |= RTK_RXCFG_RX_BROAD;
1385 		CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1386 	} else {
1387 		rxcfg &= ~RTK_RXCFG_RX_BROAD;
1388 		CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1389 	}
1390 
1391 	/*
1392 	 * Program the multicast filter, if necessary.
1393 	 */
1394 	rtk_setmulti(sc);
1395 
1396 	/*
1397 	 * Enable interrupts.
1398 	 */
1399 	CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS);
1400 
1401 	/* Start RX/TX process. */
1402 	CSR_WRITE_4(sc, RTK_MISSEDPKT, 0);
1403 
1404 	/* Enable receiver and transmitter. */
1405 	CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB|RTK_CMD_RX_ENB);
1406 
1407 	CSR_WRITE_1(sc, RTK_CFG1, RTK_CFG1_DRVLOAD|RTK_CFG1_FULLDUPLEX);
1408 
1409 	/*
1410 	 * Set current media.
1411 	 */
1412 	if ((error = ether_mediachange(ifp)) != 0)
1413 		goto out;
1414 
1415 	ifp->if_flags |= IFF_RUNNING;
1416 	ifp->if_flags &= ~IFF_OACTIVE;
1417 
1418 	callout_reset(&sc->rtk_tick_ch, hz, rtk_tick, sc);
1419 
1420  out:
1421 	if (error) {
1422 		ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1423 		ifp->if_timer = 0;
1424 		printf("%s: interface not running\n", device_xname(sc->sc_dev));
1425 	}
1426 	return error;
1427 }
1428 
1429 static int
rtk_ioctl(struct ifnet * ifp,u_long command,void * data)1430 rtk_ioctl(struct ifnet *ifp, u_long command, void *data)
1431 {
1432 	struct rtk_softc *sc = ifp->if_softc;
1433 	int s, error;
1434 
1435 	s = splnet();
1436 	error = ether_ioctl(ifp, command, data);
1437 	if (error == ENETRESET) {
1438 		if (ifp->if_flags & IFF_RUNNING) {
1439 			/*
1440 			 * Multicast list has changed.  Set the
1441 			 * hardware filter accordingly.
1442 			 */
1443 			rtk_setmulti(sc);
1444 		}
1445 		error = 0;
1446 	}
1447 	splx(s);
1448 
1449 	return error;
1450 }
1451 
1452 static void
rtk_watchdog(struct ifnet * ifp)1453 rtk_watchdog(struct ifnet *ifp)
1454 {
1455 	struct rtk_softc *sc;
1456 
1457 	sc = ifp->if_softc;
1458 
1459 	printf("%s: watchdog timeout\n", device_xname(sc->sc_dev));
1460 	ifp->if_oerrors++;
1461 	rtk_txeof(sc);
1462 	rtk_rxeof(sc);
1463 	rtk_init(ifp);
1464 }
1465 
1466 /*
1467  * Stop the adapter and free any mbufs allocated to the
1468  * RX and TX lists.
1469  */
1470 static void
rtk_stop(struct ifnet * ifp,int disable)1471 rtk_stop(struct ifnet *ifp, int disable)
1472 {
1473 	struct rtk_softc *sc = ifp->if_softc;
1474 	struct rtk_tx_desc *txd;
1475 
1476 	callout_stop(&sc->rtk_tick_ch);
1477 
1478 	mii_down(&sc->mii);
1479 
1480 	CSR_WRITE_1(sc, RTK_COMMAND, 0x00);
1481 	CSR_WRITE_2(sc, RTK_IMR, 0x0000);
1482 
1483 	/*
1484 	 * Free the TX list buffers.
1485 	 */
1486 	while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_dirty)) != NULL) {
1487 		SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_dirty, txd_q);
1488 		bus_dmamap_unload(sc->sc_dmat, txd->txd_dmamap);
1489 		m_freem(txd->txd_mbuf);
1490 		txd->txd_mbuf = NULL;
1491 		CSR_WRITE_4(sc, txd->txd_txaddr, 0);
1492 	}
1493 
1494 	if (disable)
1495 		rtk_disable(sc);
1496 
1497 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1498 	ifp->if_timer = 0;
1499 }
1500 
1501 static void
rtk_tick(void * arg)1502 rtk_tick(void *arg)
1503 {
1504 	struct rtk_softc *sc = arg;
1505 	int s;
1506 
1507 	s = splnet();
1508 	mii_tick(&sc->mii);
1509 	splx(s);
1510 
1511 	callout_reset(&sc->rtk_tick_ch, hz, rtk_tick, sc);
1512 }
1513