xref: /openbsd/sys/dev/pci/if_rge.c (revision 5dea098c)
1 /*	$OpenBSD: if_rge.c,v 1.24 2024/04/13 23:44:11 jsg Exp $	*/
2 
3 /*
4  * Copyright (c) 2019, 2020, 2023 Kevin Lo <kevlo@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bpfilter.h"
20 #include "vlan.h"
21 #include "kstat.h"
22 
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/sockio.h>
26 #include <sys/mbuf.h>
27 #include <sys/malloc.h>
28 #include <sys/kernel.h>
29 #include <sys/socket.h>
30 #include <sys/device.h>
31 #include <sys/endian.h>
32 
33 #include <net/if.h>
34 #include <net/if_media.h>
35 
36 #include <netinet/in.h>
37 #include <netinet/if_ether.h>
38 
39 #if NBPFILTER > 0
40 #include <net/bpf.h>
41 #endif
42 
43 #if NKSTAT > 0
44 #include <sys/kstat.h>
45 #endif
46 
47 #include <machine/bus.h>
48 #include <machine/intr.h>
49 
50 #include <dev/mii/mii.h>
51 
52 #include <dev/pci/pcivar.h>
53 #include <dev/pci/pcireg.h>
54 #include <dev/pci/pcidevs.h>
55 
56 #include <dev/pci/if_rgereg.h>
57 
58 #ifdef RGE_DEBUG
59 #define DPRINTF(x)	do { if (rge_debug > 0) printf x; } while (0)
60 int rge_debug = 0;
61 #else
62 #define DPRINTF(x)
63 #endif
64 
65 int		rge_match(struct device *, void *, void *);
66 void		rge_attach(struct device *, struct device *, void *);
67 int		rge_activate(struct device *, int);
68 int		rge_intr(void *);
69 int		rge_encap(struct rge_queues *, struct mbuf *, int);
70 int		rge_ioctl(struct ifnet *, u_long, caddr_t);
71 void		rge_start(struct ifqueue *);
72 void		rge_watchdog(struct ifnet *);
73 void		rge_init(struct ifnet *);
74 void		rge_stop(struct ifnet *);
75 int		rge_ifmedia_upd(struct ifnet *);
76 void		rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
77 int		rge_allocmem(struct rge_softc *);
78 int		rge_newbuf(struct rge_queues *);
79 void		rge_discard_rxbuf(struct rge_queues *, int);
80 void		rge_rx_list_init(struct rge_queues *);
81 void		rge_tx_list_init(struct rge_queues *);
82 void		rge_fill_rx_ring(struct rge_queues *);
83 int		rge_rxeof(struct rge_queues *);
84 int		rge_txeof(struct rge_queues *);
85 void		rge_reset(struct rge_softc *);
86 void		rge_iff(struct rge_softc *);
87 void		rge_chipinit(struct rge_softc *);
88 void		rge_set_phy_power(struct rge_softc *, int);
89 void		rge_ephy_config(struct rge_softc *);
90 void		rge_ephy_config_mac_cfg3(struct rge_softc *);
91 void		rge_ephy_config_mac_cfg5(struct rge_softc *);
92 int		rge_phy_config(struct rge_softc *);
93 void		rge_phy_config_mac_cfg3(struct rge_softc *);
94 void		rge_phy_config_mac_cfg5(struct rge_softc *);
95 void		rge_phy_config_mcu(struct rge_softc *, uint16_t);
96 void		rge_set_macaddr(struct rge_softc *, const uint8_t *);
97 void		rge_get_macaddr(struct rge_softc *, uint8_t *);
98 void		rge_hw_init(struct rge_softc *);
99 void		rge_hw_reset(struct rge_softc *);
100 void		rge_disable_phy_ocp_pwrsave(struct rge_softc *);
101 void		rge_patch_phy_mcu(struct rge_softc *, int);
102 void		rge_add_media_types(struct rge_softc *);
103 void		rge_config_imtype(struct rge_softc *, int);
104 void		rge_disable_aspm_clkreq(struct rge_softc *);
105 void		rge_disable_hw_im(struct rge_softc *);
106 void		rge_disable_sim_im(struct rge_softc *);
107 void		rge_setup_sim_im(struct rge_softc *);
108 void		rge_setup_intr(struct rge_softc *, int);
109 void		rge_switch_mcu_ram_page(struct rge_softc *, int);
110 void		rge_exit_oob(struct rge_softc *);
111 void		rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
112 uint32_t	rge_read_csi(struct rge_softc *, uint32_t);
113 void		rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
114 uint16_t	rge_read_mac_ocp(struct rge_softc *, uint16_t);
115 void		rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
116 uint16_t	rge_read_ephy(struct rge_softc *, uint16_t);
117 void		rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
118 uint16_t	rge_read_phy(struct rge_softc *, uint16_t, uint16_t);
119 void		rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
120 uint16_t	rge_read_phy_ocp(struct rge_softc *, uint16_t);
121 int		rge_get_link_status(struct rge_softc *);
122 void		rge_txstart(void *);
123 void		rge_tick(void *);
124 void		rge_link_state(struct rge_softc *);
125 #ifndef SMALL_KERNEL
126 int		rge_wol(struct ifnet *, int);
127 void		rge_wol_power(struct rge_softc *);
128 #endif
129 
130 #if NKSTAT > 0
131 void		rge_kstat_attach(struct rge_softc *);
132 #endif
133 
134 static const struct {
135 	uint16_t reg;
136 	uint16_t val;
137 }  rtl8125_mac_cfg3_mcu[] = {
138 	RTL8125_MAC_CFG3_MCU
139 }, rtl8125_mac_cfg5_mcu[] = {
140 	RTL8125_MAC_CFG5_MCU
141 };
142 
143 const struct cfattach rge_ca = {
144 	sizeof(struct rge_softc), rge_match, rge_attach, NULL, rge_activate
145 };
146 
147 struct cfdriver rge_cd = {
148 	NULL, "rge", DV_IFNET
149 };
150 
151 const struct pci_matchid rge_devices[] = {
152 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000 },
153 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8125 }
154 };
155 
156 int
157 rge_match(struct device *parent, void *match, void *aux)
158 {
159 	return (pci_matchbyid((struct pci_attach_args *)aux, rge_devices,
160 	    nitems(rge_devices)));
161 }
162 
163 void
164 rge_attach(struct device *parent, struct device *self, void *aux)
165 {
166 	struct rge_softc *sc = (struct rge_softc *)self;
167 	struct pci_attach_args *pa = aux;
168 	pci_chipset_tag_t pc = pa->pa_pc;
169 	pci_intr_handle_t ih;
170 	const char *intrstr = NULL;
171 	struct ifnet *ifp;
172 	struct rge_queues *q;
173 	pcireg_t reg;
174 	uint32_t hwrev;
175 	uint8_t eaddr[ETHER_ADDR_LEN];
176 	int offset;
177 
178 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
179 
180 	/*
181 	 * Map control/status registers.
182 	 */
183 	if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
184 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
185 	    NULL, &sc->rge_bsize, 0)) {
186 		if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
187 		    PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
188 		    &sc->rge_bhandle, NULL, &sc->rge_bsize, 0)) {
189 			if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
190 			    0, &sc->rge_btag, &sc->rge_bhandle, NULL,
191 			    &sc->rge_bsize, 0)) {
192 				printf(": can't map mem or i/o space\n");
193 				return;
194 			}
195 		}
196 	}
197 
198 	q = malloc(sizeof(struct rge_queues), M_DEVBUF, M_NOWAIT | M_ZERO);
199 	if (q == NULL) {
200 		printf(": unable to allocate queue memory\n");
201 		return;
202 	}
203 	q->q_sc = sc;
204 	q->q_index = 0;
205 
206 	sc->sc_queues = q;
207 	sc->sc_nqueues = 1;
208 
209 	/*
210 	 * Allocate interrupt.
211 	 */
212 	if (pci_intr_map_msi(pa, &ih) == 0)
213 		sc->rge_flags |= RGE_FLAG_MSI;
214 	else if (pci_intr_map(pa, &ih) != 0) {
215 		printf(": couldn't map interrupt\n");
216 		return;
217 	}
218 	intrstr = pci_intr_string(pc, ih);
219 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET | IPL_MPSAFE, rge_intr,
220 	    sc, sc->sc_dev.dv_xname);
221 	if (sc->sc_ih == NULL) {
222 		printf(": couldn't establish interrupt");
223 		if (intrstr != NULL)
224 			printf(" at %s", intrstr);
225 		printf("\n");
226 		return;
227 	}
228 	printf(": %s", intrstr);
229 
230 	sc->sc_dmat = pa->pa_dmat;
231 	sc->sc_pc = pa->pa_pc;
232 	sc->sc_tag = pa->pa_tag;
233 
234 	/* Determine hardware revision */
235 	hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
236 	switch (hwrev) {
237 	case 0x60900000:
238 		sc->rge_type = MAC_CFG3;
239 		break;
240 	case 0x64100000:
241 		sc->rge_type = MAC_CFG5;
242 		break;
243 	default:
244 		printf(": unknown version 0x%08x\n", hwrev);
245 		return;
246 	}
247 
248 	rge_config_imtype(sc, RGE_IMTYPE_SIM);
249 
250 	/*
251 	 * PCI Express check.
252 	 */
253 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
254 	    &offset, NULL)) {
255 		/* Disable PCIe ASPM and ECPM. */
256 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
257 		    offset + PCI_PCIE_LCSR);
258 		reg &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1 |
259 		    PCI_PCIE_LCSR_ECPM);
260 		pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCI_PCIE_LCSR,
261 		    reg);
262 	}
263 
264 	rge_chipinit(sc);
265 
266 	rge_get_macaddr(sc, eaddr);
267 	printf(", address %s\n", ether_sprintf(eaddr));
268 
269 	memcpy(sc->sc_arpcom.ac_enaddr, eaddr, ETHER_ADDR_LEN);
270 
271 	if (rge_allocmem(sc))
272 		return;
273 
274 	ifp = &sc->sc_arpcom.ac_if;
275 	ifp->if_softc = sc;
276 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
277 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
278 	ifp->if_xflags = IFXF_MPSAFE;
279 	ifp->if_ioctl = rge_ioctl;
280 	ifp->if_qstart = rge_start;
281 	ifp->if_watchdog = rge_watchdog;
282 	ifq_init_maxlen(&ifp->if_snd, RGE_TX_LIST_CNT - 1);
283 	ifp->if_hardmtu = RGE_JUMBO_MTU;
284 
285 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
286 	    IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
287 
288 #if NVLAN > 0
289 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
290 #endif
291 
292 #ifndef SMALL_KERNEL
293 	ifp->if_capabilities |= IFCAP_WOL;
294 	ifp->if_wol = rge_wol;
295 	rge_wol(ifp, 0);
296 #endif
297 	timeout_set(&sc->sc_timeout, rge_tick, sc);
298 	task_set(&sc->sc_task, rge_txstart, sc);
299 
300 	/* Initialize ifmedia structures. */
301 	ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
302 	    rge_ifmedia_sts);
303 	rge_add_media_types(sc);
304 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
305 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
306 	sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
307 
308 	if_attach(ifp);
309 	ether_ifattach(ifp);
310 
311 #if NKSTAT > 0
312 	rge_kstat_attach(sc);
313 #endif
314 }
315 
316 int
317 rge_activate(struct device *self, int act)
318 {
319 #ifndef SMALL_KERNEL
320 	struct rge_softc *sc = (struct rge_softc *)self;
321 #endif
322 	int rv = 0;
323 
324 	switch (act) {
325 	case DVACT_POWERDOWN:
326 		rv = config_activate_children(self, act);
327 #ifndef SMALL_KERNEL
328 		rge_wol_power(sc);
329 #endif
330 		break;
331 	default:
332 		rv = config_activate_children(self, act);
333 		break;
334 	}
335 	return (rv);
336 }
337 
338 int
339 rge_intr(void *arg)
340 {
341 	struct rge_softc *sc = arg;
342 	struct rge_queues *q = sc->sc_queues;
343 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
344 	uint32_t status;
345 	int claimed = 0, rv;
346 
347 	if (!(ifp->if_flags & IFF_RUNNING))
348 		return (0);
349 
350 	/* Disable interrupts. */
351 	RGE_WRITE_4(sc, RGE_IMR, 0);
352 
353 	if (!(sc->rge_flags & RGE_FLAG_MSI)) {
354 		if ((RGE_READ_4(sc, RGE_ISR) & sc->rge_intrs) == 0)
355 			return (0);
356 	}
357 
358 	status = RGE_READ_4(sc, RGE_ISR);
359 	if (status)
360 		RGE_WRITE_4(sc, RGE_ISR, status);
361 
362 	if (status & RGE_ISR_PCS_TIMEOUT)
363 		claimed = 1;
364 
365 	rv = 0;
366 	if (status & sc->rge_intrs) {
367 		rv |= rge_rxeof(q);
368 		rv |= rge_txeof(q);
369 
370 		if (status & RGE_ISR_SYSTEM_ERR) {
371 			KERNEL_LOCK();
372 			rge_init(ifp);
373 			KERNEL_UNLOCK();
374 		}
375 		claimed = 1;
376 	}
377 
378 	if (sc->rge_timerintr) {
379 		if (!rv) {
380 			/*
381 			 * Nothing needs to be processed, fallback
382 			 * to use TX/RX interrupts.
383 			 */
384 			rge_setup_intr(sc, RGE_IMTYPE_NONE);
385 
386 			/*
387 			 * Recollect, mainly to avoid the possible
388 			 * race introduced by changing interrupt
389 			 * masks.
390 			 */
391 			rge_rxeof(q);
392 			rge_txeof(q);
393 		} else
394 			RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
395 	} else if (rv) {
396 		/*
397 		 * Assume that using simulated interrupt moderation
398 		 * (hardware timer based) could reduce the interrupt
399 		 * rate.
400 		 */
401 		rge_setup_intr(sc, RGE_IMTYPE_SIM);
402 	}
403 
404 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
405 
406 	return (claimed);
407 }
408 
409 int
410 rge_encap(struct rge_queues *q, struct mbuf *m, int idx)
411 {
412 	struct rge_softc *sc = q->q_sc;
413 	struct rge_tx_desc *d = NULL;
414 	struct rge_txq *txq;
415 	bus_dmamap_t txmap;
416 	uint32_t cmdsts, cflags = 0;
417 	int cur, error, i, last, nsegs;
418 
419 	/*
420 	 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
421 	 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
422 	 * take affect.
423 	 */
424 	if ((m->m_pkthdr.csum_flags &
425 	    (M_IPV4_CSUM_OUT | M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) != 0) {
426 		cflags |= RGE_TDEXTSTS_IPCSUM;
427 		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
428 			cflags |= RGE_TDEXTSTS_TCPCSUM;
429 		if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
430 			cflags |= RGE_TDEXTSTS_UDPCSUM;
431 	}
432 
433 	txq = &q->q_tx.rge_txq[idx];
434 	txmap = txq->txq_dmamap;
435 
436 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
437 	switch (error) {
438 	case 0:
439 		break;
440 	case EFBIG: /* mbuf chain is too fragmented */
441 		if (m_defrag(m, M_DONTWAIT) == 0 &&
442 		    bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
443 		    BUS_DMA_NOWAIT) == 0)
444 			break;
445 
446 		/* FALLTHROUGH */
447 	default:
448 		return (0);
449 	}
450 
451 	bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
452 	    BUS_DMASYNC_PREWRITE);
453 
454 	nsegs = txmap->dm_nsegs;
455 
456 	/* Set up hardware VLAN tagging. */
457 #if NVLAN > 0
458 	if (m->m_flags & M_VLANTAG)
459 		cflags |= swap16(m->m_pkthdr.ether_vtag) | RGE_TDEXTSTS_VTAG;
460 #endif
461 
462 	cur = idx;
463 	cmdsts = RGE_TDCMDSTS_SOF;
464 
465 	for (i = 0; i < txmap->dm_nsegs; i++) {
466 		d = &q->q_tx.rge_tx_list[cur];
467 
468 		d->rge_extsts = htole32(cflags);
469 		d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
470 		d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
471 
472 		cmdsts |= txmap->dm_segs[i].ds_len;
473 
474 		if (cur == RGE_TX_LIST_CNT - 1)
475 			cmdsts |= RGE_TDCMDSTS_EOR;
476 
477 		d->rge_cmdsts = htole32(cmdsts);
478 
479 		last = cur;
480 		cmdsts = RGE_TDCMDSTS_OWN;
481 		cur = RGE_NEXT_TX_DESC(cur);
482 	}
483 
484 	/* Set EOF on the last descriptor. */
485 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
486 
487 	/* Transfer ownership of packet to the chip. */
488 	d = &q->q_tx.rge_tx_list[idx];
489 
490 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
491 
492 	bus_dmamap_sync(sc->sc_dmat, q->q_tx.rge_tx_list_map,
493 	    cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
494 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
495 
496 	/* Update info of TX queue and descriptors. */
497 	txq->txq_mbuf = m;
498 	txq->txq_descidx = last;
499 
500 	return (nsegs);
501 }
502 
503 int
504 rge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
505 {
506 	struct rge_softc *sc = ifp->if_softc;
507 	struct ifreq *ifr = (struct ifreq *)data;
508 	int s, error = 0;
509 
510 	s = splnet();
511 
512 	switch (cmd) {
513 	case SIOCSIFADDR:
514 		ifp->if_flags |= IFF_UP;
515 		if (!(ifp->if_flags & IFF_RUNNING))
516 			rge_init(ifp);
517 		break;
518 	case SIOCSIFFLAGS:
519 		if (ifp->if_flags & IFF_UP) {
520 			if (ifp->if_flags & IFF_RUNNING)
521 				error = ENETRESET;
522 			else
523 				rge_init(ifp);
524 		} else {
525 			if (ifp->if_flags & IFF_RUNNING)
526 				rge_stop(ifp);
527 		}
528 		break;
529 	case SIOCGIFMEDIA:
530 	case SIOCSIFMEDIA:
531 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
532 		break;
533 	case SIOCGIFRXR:
534 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
535 		    NULL, RGE_JUMBO_FRAMELEN, &sc->sc_queues->q_rx.rge_rx_ring);
536 		break;
537 	default:
538 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
539 	}
540 
541 	if (error == ENETRESET) {
542 		if (ifp->if_flags & IFF_RUNNING)
543 			rge_iff(sc);
544 		error = 0;
545 	}
546 
547 	splx(s);
548 	return (error);
549 }
550 
551 void
552 rge_start(struct ifqueue *ifq)
553 {
554 	struct ifnet *ifp = ifq->ifq_if;
555 	struct rge_softc *sc = ifp->if_softc;
556 	struct rge_queues *q = sc->sc_queues;
557 	struct mbuf *m;
558 	int free, idx, used;
559 	int queued = 0;
560 
561 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
562 		ifq_purge(ifq);
563 		return;
564 	}
565 
566 	/* Calculate free space. */
567 	idx = q->q_tx.rge_txq_prodidx;
568 	free = q->q_tx.rge_txq_considx;
569 	if (free <= idx)
570 		free += RGE_TX_LIST_CNT;
571 	free -= idx;
572 
573 	for (;;) {
574 		if (RGE_TX_NSEGS >= free + 2) {
575 			ifq_set_oactive(&ifp->if_snd);
576 			break;
577 		}
578 
579 		m = ifq_dequeue(ifq);
580 		if (m == NULL)
581 			break;
582 
583 		used = rge_encap(q, m, idx);
584 		if (used == 0) {
585 			m_freem(m);
586 			continue;
587 		}
588 
589 		KASSERT(used <= free);
590 		free -= used;
591 
592 #if NBPFILTER > 0
593 		if (ifp->if_bpf)
594 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
595 #endif
596 
597 		idx += used;
598 		if (idx >= RGE_TX_LIST_CNT)
599 			idx -= RGE_TX_LIST_CNT;
600 
601 		queued++;
602 	}
603 
604 	if (queued == 0)
605 		return;
606 
607 	/* Set a timeout in case the chip goes out to lunch. */
608 	ifp->if_timer = 5;
609 
610 	q->q_tx.rge_txq_prodidx = idx;
611 	ifq_serialize(ifq, &sc->sc_task);
612 }
613 
614 void
615 rge_watchdog(struct ifnet *ifp)
616 {
617 	struct rge_softc *sc = ifp->if_softc;
618 
619 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
620 	ifp->if_oerrors++;
621 
622 	rge_init(ifp);
623 }
624 
625 void
626 rge_init(struct ifnet *ifp)
627 {
628 	struct rge_softc *sc = ifp->if_softc;
629 	struct rge_queues *q = sc->sc_queues;
630 	uint32_t val;
631 	int i, num_miti;
632 
633 	rge_stop(ifp);
634 
635 	/* Set MAC address. */
636 	rge_set_macaddr(sc, sc->sc_arpcom.ac_enaddr);
637 
638 	/* Initialize RX and TX descriptors lists. */
639 	rge_rx_list_init(q);
640 	rge_tx_list_init(q);
641 
642 	rge_chipinit(sc);
643 
644 	if (rge_phy_config(sc))
645 		return;
646 
647 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
648 
649 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
650 	rge_disable_aspm_clkreq(sc);
651 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER,
652 	    RGE_JUMBO_MTU + ETHER_HDR_LEN + 32);
653 	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
654 
655 	/* Load the addresses of the RX and TX lists into the chip. */
656 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
657 	    RGE_ADDR_LO(q->q_rx.rge_rx_list_map->dm_segs[0].ds_addr));
658 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
659 	    RGE_ADDR_HI(q->q_rx.rge_rx_list_map->dm_segs[0].ds_addr));
660 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
661 	    RGE_ADDR_LO(q->q_tx.rge_tx_list_map->dm_segs[0].ds_addr));
662 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
663 	    RGE_ADDR_HI(q->q_tx.rge_tx_list_map->dm_segs[0].ds_addr));
664 
665 	/* Set the initial RX and TX configurations. */
666 	RGE_WRITE_4(sc, RGE_RXCFG,
667 	    (sc->rge_type == MAC_CFG3) ? RGE_RXCFG_CONFIG :
668 	    RGE_RXCFG_CONFIG_8125B);
669 	RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
670 
671 	val = rge_read_csi(sc, 0x70c) & ~0xff000000;
672 	rge_write_csi(sc, 0x70c, val | 0x27000000);
673 
674 	RGE_WRITE_2(sc, 0x0382, 0x221b);
675 
676 	RGE_WRITE_1(sc, RGE_RSS_CTRL, 0);
677 
678 	val = RGE_READ_2(sc, RGE_RXQUEUE_CTRL) & ~0x001c;
679 	RGE_WRITE_2(sc, RGE_RXQUEUE_CTRL, val | (fls(sc->sc_nqueues) - 1) << 2);
680 
681 	RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
682 
683 	rge_write_mac_ocp(sc, 0xc140, 0xffff);
684 	rge_write_mac_ocp(sc, 0xc142, 0xffff);
685 
686 	RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
687 
688 	val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
689 	if (sc->rge_type == MAC_CFG3)
690 		rge_write_mac_ocp(sc, 0xe614, val | 0x0300);
691 	else
692 		rge_write_mac_ocp(sc, 0xe614, val | 0x0200);
693 
694 	val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0c00;
695 	rge_write_mac_ocp(sc, 0xe63e, val |
696 	    ((fls(sc->sc_nqueues) - 1) & 0x03) << 10);
697 
698 	RGE_MAC_CLRBIT(sc, 0xe63e, 0x0030);
699 	if (sc->rge_type == MAC_CFG3)
700 		RGE_MAC_SETBIT(sc, 0xe63e, 0x0020);
701 
702 	RGE_MAC_CLRBIT(sc, 0xc0b4, 0x0001);
703 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x0001);
704 
705 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
706 
707 	val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x00ff;
708 	rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
709 
710 	val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
711 	rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
712 
713 	RGE_MAC_CLRBIT(sc, 0xe056, 0x00f0);
714 
715 	RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
716 
717 	RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
718 
719 	val = rge_read_mac_ocp(sc, 0xea1c) & ~0x0003;
720 	rge_write_mac_ocp(sc, 0xea1c, val | 0x0001);
721 
722 	rge_write_mac_ocp(sc, 0xe0c0, 0x4000);
723 
724 	RGE_MAC_SETBIT(sc, 0xe052, 0x0060);
725 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0088);
726 
727 	val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
728 	rge_write_mac_ocp(sc, 0xd430, val | 0x045f);
729 
730 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_PFM_EN | RGE_DLLPR_TX_10M_PS_EN);
731 
732 	if (sc->rge_type == MAC_CFG3)
733 		RGE_SETBIT_1(sc, RGE_MCUCMD, 0x01);
734 
735 	/* Disable EEE plus. */
736 	RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
737 
738 	RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
739 
740 	RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
741 	DELAY(1);
742 	RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
743 
744 	RGE_CLRBIT_2(sc, 0x1880, 0x0030);
745 
746 	/* Config interrupt type for RTL8125B. */
747 	if (sc->rge_type == MAC_CFG5)
748 		RGE_CLRBIT_1(sc, RGE_INT_CFG0, RGE_INT_CFG0_EN);
749 
750 	/* Clear timer interrupts. */
751 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
752 	RGE_WRITE_4(sc, RGE_TIMERINT1, 0);
753 	RGE_WRITE_4(sc, RGE_TIMERINT2, 0);
754 	RGE_WRITE_4(sc, RGE_TIMERINT3, 0);
755 
756 	num_miti = (sc->rge_type == MAC_CFG3) ? 64 : 32;
757 	/* Clear interrupt moderation timer. */
758 	for (i = 0; i < num_miti; i++)
759 		RGE_WRITE_4(sc, RGE_INTMITI(i), 0);
760 
761 	if (sc->rge_type == MAC_CFG5) {
762 		RGE_CLRBIT_1(sc, RGE_INT_CFG0,
763 		    RGE_INT_CFG0_TIMEOUT_BYPASS |
764 		    RGE_INT_CFG0_MITIGATION_BYPASS);
765 		RGE_WRITE_2(sc, RGE_INT_CFG1, 0);
766 	}
767 
768 	RGE_MAC_SETBIT(sc, 0xc0ac, 0x1f80);
769 
770 	rge_write_mac_ocp(sc, 0xe098, 0xc302);
771 
772 	RGE_MAC_CLRBIT(sc, 0xe032, 0x0003);
773 	val = rge_read_csi(sc, 0x98) & ~0x0000ff00;
774 	rge_write_csi(sc, 0x98, val);
775 
776 	val = rge_read_mac_ocp(sc, 0xe092) & ~0x00ff;
777 	rge_write_mac_ocp(sc, 0xe092, val);
778 
779 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
780 		RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
781 
782 	RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
783 
784 	/* Set Maximum frame size. */
785 	RGE_WRITE_2(sc, RGE_RXMAXSIZE, RGE_JUMBO_FRAMELEN);
786 
787 	/* Disable RXDV gate. */
788 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
789 	DELAY(2000);
790 
791 	/* Program promiscuous mode and multicast filters. */
792 	rge_iff(sc);
793 
794 	rge_disable_aspm_clkreq(sc);
795 
796 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
797 	DELAY(10);
798 
799 	rge_ifmedia_upd(ifp);
800 
801 	/* Enable transmit and receive. */
802 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
803 
804 	/* Enable interrupts. */
805 	rge_setup_intr(sc, RGE_IMTYPE_SIM);
806 
807 	ifp->if_flags |= IFF_RUNNING;
808 	ifq_clr_oactive(&ifp->if_snd);
809 
810 	timeout_add_sec(&sc->sc_timeout, 1);
811 }
812 
813 /*
814  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
815  */
816 void
817 rge_stop(struct ifnet *ifp)
818 {
819 	struct rge_softc *sc = ifp->if_softc;
820 	struct rge_queues *q = sc->sc_queues;
821 	int i;
822 
823 	timeout_del(&sc->sc_timeout);
824 
825 	ifp->if_timer = 0;
826 	ifp->if_flags &= ~IFF_RUNNING;
827 	sc->rge_timerintr = 0;
828 
829 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
830 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
831 	    RGE_RXCFG_ERRPKT);
832 
833 	rge_hw_reset(sc);
834 
835 	RGE_MAC_CLRBIT(sc, 0xc0ac, 0x1f80);
836 
837 	intr_barrier(sc->sc_ih);
838 	ifq_barrier(&ifp->if_snd);
839 	ifq_clr_oactive(&ifp->if_snd);
840 
841 	if (q->q_rx.rge_head != NULL) {
842 		m_freem(q->q_rx.rge_head);
843 		q->q_rx.rge_head = q->q_rx.rge_tail = NULL;
844 	}
845 
846 	/* Free the TX list buffers. */
847 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
848 		if (q->q_tx.rge_txq[i].txq_mbuf != NULL) {
849 			bus_dmamap_unload(sc->sc_dmat,
850 			    q->q_tx.rge_txq[i].txq_dmamap);
851 			m_freem(q->q_tx.rge_txq[i].txq_mbuf);
852 			q->q_tx.rge_txq[i].txq_mbuf = NULL;
853 		}
854 	}
855 
856 	/* Free the RX list buffers. */
857 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
858 		if (q->q_rx.rge_rxq[i].rxq_mbuf != NULL) {
859 			bus_dmamap_unload(sc->sc_dmat,
860 			    q->q_rx.rge_rxq[i].rxq_dmamap);
861 			m_freem(q->q_rx.rge_rxq[i].rxq_mbuf);
862 			q->q_rx.rge_rxq[i].rxq_mbuf = NULL;
863 		}
864 	}
865 }
866 
867 /*
868  * Set media options.
869  */
870 int
871 rge_ifmedia_upd(struct ifnet *ifp)
872 {
873 	struct rge_softc *sc = ifp->if_softc;
874 	struct ifmedia *ifm = &sc->sc_media;
875 	int anar, gig, val;
876 
877 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
878 		return (EINVAL);
879 
880 	/* Disable Gigabit Lite. */
881 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
882 	RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
883 
884 	val = rge_read_phy_ocp(sc, 0xa5d4);
885 	val &= ~RGE_ADV_2500TFDX;
886 
887 	anar = gig = 0;
888 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
889 	case IFM_AUTO:
890 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
891 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
892 		val |= RGE_ADV_2500TFDX;
893 		break;
894 	case IFM_2500_T:
895 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
896 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
897 		val |= RGE_ADV_2500TFDX;
898 		ifp->if_baudrate = IF_Mbps(2500);
899 		break;
900 	case IFM_1000_T:
901 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
902 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
903 		ifp->if_baudrate = IF_Gbps(1);
904 		break;
905 	case IFM_100_TX:
906 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
907 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
908 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
909 		    ANAR_TX | ANAR_TX_FD | ANAR_10_FD | ANAR_10 :
910 		    ANAR_TX | ANAR_10_FD | ANAR_10;
911 		ifp->if_baudrate = IF_Mbps(100);
912 		break;
913 	case IFM_10_T:
914 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
915 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
916 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
917 		    ANAR_10_FD | ANAR_10 : ANAR_10;
918 		ifp->if_baudrate = IF_Mbps(10);
919 		break;
920 	default:
921 		printf("%s: unsupported media type\n", sc->sc_dev.dv_xname);
922 		return (EINVAL);
923 	}
924 
925 	rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
926 	rge_write_phy(sc, 0, MII_100T2CR, gig);
927 	rge_write_phy_ocp(sc, 0xa5d4, val);
928 	rge_write_phy(sc, 0, MII_BMCR, BMCR_RESET | BMCR_AUTOEN |
929 	    BMCR_STARTNEG);
930 
931 	return (0);
932 }
933 
934 /*
935  * Report current media status.
936  */
937 void
938 rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
939 {
940 	struct rge_softc *sc = ifp->if_softc;
941 	uint16_t status = 0;
942 
943 	ifmr->ifm_status = IFM_AVALID;
944 	ifmr->ifm_active = IFM_ETHER;
945 
946 	if (rge_get_link_status(sc)) {
947 		ifmr->ifm_status |= IFM_ACTIVE;
948 
949 		status = RGE_READ_2(sc, RGE_PHYSTAT);
950 		if ((status & RGE_PHYSTAT_FDX) ||
951 		    (status & RGE_PHYSTAT_2500MBPS))
952 			ifmr->ifm_active |= IFM_FDX;
953 		else
954 			ifmr->ifm_active |= IFM_HDX;
955 
956 		if (status & RGE_PHYSTAT_10MBPS)
957 			ifmr->ifm_active |= IFM_10_T;
958 		else if (status & RGE_PHYSTAT_100MBPS)
959 			ifmr->ifm_active |= IFM_100_TX;
960 		else if (status & RGE_PHYSTAT_1000MBPS)
961 			ifmr->ifm_active |= IFM_1000_T;
962 		else if (status & RGE_PHYSTAT_2500MBPS)
963 			ifmr->ifm_active |= IFM_2500_T;
964 	}
965 }
966 
967 /*
968  * Allocate memory for RX/TX rings.
969  */
970 int
971 rge_allocmem(struct rge_softc *sc)
972 {
973 	struct rge_queues *q = sc->sc_queues;
974 	int error, i;
975 
976 	/* Allocate DMA'able memory for the TX ring. */
977 	error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
978 	    RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
979 	    &q->q_tx.rge_tx_list_map);
980 	if (error) {
981 		printf("%s: can't create TX list map\n", sc->sc_dev.dv_xname);
982 		return (error);
983 	}
984 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
985 	    &q->q_tx.rge_tx_listseg, 1, &q->q_tx.rge_tx_listnseg,
986 	    BUS_DMA_NOWAIT| BUS_DMA_ZERO);
987 	if (error) {
988 		printf("%s: can't alloc TX list\n", sc->sc_dev.dv_xname);
989 		return (error);
990 	}
991 
992 	/* Load the map for the TX ring. */
993 	error = bus_dmamem_map(sc->sc_dmat, &q->q_tx.rge_tx_listseg,
994 	    q->q_tx.rge_tx_listnseg, RGE_TX_LIST_SZ,
995 	    (caddr_t *)&q->q_tx.rge_tx_list, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
996 	if (error) {
997 		printf("%s: can't map TX dma buffers\n", sc->sc_dev.dv_xname);
998 		bus_dmamem_free(sc->sc_dmat, &q->q_tx.rge_tx_listseg,
999 		    q->q_tx.rge_tx_listnseg);
1000 		return (error);
1001 	}
1002 	error = bus_dmamap_load(sc->sc_dmat, q->q_tx.rge_tx_list_map,
1003 	    q->q_tx.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1004 	if (error) {
1005 		printf("%s: can't load TX dma map\n", sc->sc_dev.dv_xname);
1006 		bus_dmamap_destroy(sc->sc_dmat, q->q_tx.rge_tx_list_map);
1007 		bus_dmamem_unmap(sc->sc_dmat,
1008 		    (caddr_t)q->q_tx.rge_tx_list, RGE_TX_LIST_SZ);
1009 		bus_dmamem_free(sc->sc_dmat, &q->q_tx.rge_tx_listseg,
1010 		    q->q_tx.rge_tx_listnseg);
1011 		return (error);
1012 	}
1013 
1014 	/* Create DMA maps for TX buffers. */
1015 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
1016 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
1017 		    RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0,
1018 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1019 		    &q->q_tx.rge_txq[i].txq_dmamap);
1020 		if (error) {
1021 			printf("%s: can't create DMA map for TX\n",
1022 			    sc->sc_dev.dv_xname);
1023 			return (error);
1024 		}
1025 	}
1026 
1027 	/* Allocate DMA'able memory for the RX ring. */
1028 	error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
1029 	    RGE_RX_LIST_SZ, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1030 	    &q->q_rx.rge_rx_list_map);
1031 	if (error) {
1032 		printf("%s: can't create RX list map\n", sc->sc_dev.dv_xname);
1033 		return (error);
1034 	}
1035 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
1036 	    &q->q_rx.rge_rx_listseg, 1, &q->q_rx.rge_rx_listnseg,
1037 	    BUS_DMA_NOWAIT| BUS_DMA_ZERO);
1038 	if (error) {
1039 		printf("%s: can't alloc RX list\n", sc->sc_dev.dv_xname);
1040 		return (error);
1041 	}
1042 
1043 	/* Load the map for the RX ring. */
1044 	error = bus_dmamem_map(sc->sc_dmat, &q->q_rx.rge_rx_listseg,
1045 	    q->q_rx.rge_rx_listnseg, RGE_RX_LIST_SZ,
1046 	    (caddr_t *)&q->q_rx.rge_rx_list, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1047 	if (error) {
1048 		printf("%s: can't map RX dma buffers\n", sc->sc_dev.dv_xname);
1049 		bus_dmamem_free(sc->sc_dmat, &q->q_rx.rge_rx_listseg,
1050 		    q->q_rx.rge_rx_listnseg);
1051 		return (error);
1052 	}
1053 	error = bus_dmamap_load(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1054 	    q->q_rx.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1055 	if (error) {
1056 		printf("%s: can't load RX dma map\n", sc->sc_dev.dv_xname);
1057 		bus_dmamap_destroy(sc->sc_dmat, q->q_rx.rge_rx_list_map);
1058 		bus_dmamem_unmap(sc->sc_dmat,
1059 		    (caddr_t)q->q_rx.rge_rx_list, RGE_RX_LIST_SZ);
1060 		bus_dmamem_free(sc->sc_dmat, &q->q_rx.rge_rx_listseg,
1061 		    q->q_rx.rge_rx_listnseg);
1062 		return (error);
1063 	}
1064 
1065 	/* Create DMA maps for RX buffers. */
1066 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1067 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
1068 		    RGE_JUMBO_FRAMELEN, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1069 		    &q->q_rx.rge_rxq[i].rxq_dmamap);
1070 		if (error) {
1071 			printf("%s: can't create DMA map for RX\n",
1072 			    sc->sc_dev.dv_xname);
1073 			return (error);
1074 		}
1075 	}
1076 
1077 	return (error);
1078 }
1079 
1080 /*
1081  * Initialize the RX descriptor and attach an mbuf cluster.
1082  */
1083 int
1084 rge_newbuf(struct rge_queues *q)
1085 {
1086 	struct rge_softc *sc = q->q_sc;
1087 	struct mbuf *m;
1088 	struct rge_rx_desc *r;
1089 	struct rge_rxq *rxq;
1090 	bus_dmamap_t rxmap;
1091 	int idx;
1092 
1093 	m = MCLGETL(NULL, M_DONTWAIT, RGE_JUMBO_FRAMELEN);
1094 	if (m == NULL)
1095 		return (ENOBUFS);
1096 
1097 	m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
1098 
1099 	idx = q->q_rx.rge_rxq_prodidx;
1100 	rxq = &q->q_rx.rge_rxq[idx];
1101 	rxmap = rxq->rxq_dmamap;
1102 
1103 	if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT)) {
1104 		m_freem(m);
1105 		return (ENOBUFS);
1106 	}
1107 
1108 	bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1109 	    BUS_DMASYNC_PREREAD);
1110 
1111 	/* Map the segments into RX descriptors. */
1112 	r = &q->q_rx.rge_rx_list[idx];
1113 
1114 	rxq->rxq_mbuf = m;
1115 
1116 	r->hi_qword1.rx_qword4.rge_extsts = 0;
1117 	r->hi_qword0.rge_addr = htole64(rxmap->dm_segs[0].ds_addr);
1118 
1119 	r->hi_qword1.rx_qword4.rge_cmdsts = htole32(rxmap->dm_segs[0].ds_len);
1120 	if (idx == RGE_RX_LIST_CNT - 1)
1121 		r->hi_qword1.rx_qword4.rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1122 
1123 	r->hi_qword1.rx_qword4.rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1124 
1125 	bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1126 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1127 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1128 
1129 	q->q_rx.rge_rxq_prodidx = RGE_NEXT_RX_DESC(idx);
1130 
1131 	return (0);
1132 }
1133 
1134 void
1135 rge_discard_rxbuf(struct rge_queues *q, int idx)
1136 {
1137 	struct rge_softc *sc = q->q_sc;
1138 	struct rge_rx_desc *r;
1139 
1140 	r = &q->q_rx.rge_rx_list[idx];
1141 
1142 	r->hi_qword1.rx_qword4.rge_cmdsts = htole32(RGE_JUMBO_FRAMELEN);
1143 	r->hi_qword1.rx_qword4.rge_extsts = 0;
1144 	if (idx == RGE_RX_LIST_CNT - 1)
1145 		r->hi_qword1.rx_qword4.rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1146 	r->hi_qword1.rx_qword4.rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1147 
1148 	bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1149 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1150 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1151 }
1152 
1153 void
1154 rge_rx_list_init(struct rge_queues *q)
1155 {
1156 	memset(q->q_rx.rge_rx_list, 0, RGE_RX_LIST_SZ);
1157 
1158 	q->q_rx.rge_rxq_prodidx = q->q_rx.rge_rxq_considx = 0;
1159 	q->q_rx.rge_head = q->q_rx.rge_tail = NULL;
1160 
1161 	if_rxr_init(&q->q_rx.rge_rx_ring, 32, RGE_RX_LIST_CNT);
1162 	rge_fill_rx_ring(q);
1163 }
1164 
1165 void
1166 rge_fill_rx_ring(struct rge_queues *q)
1167 {
1168 	struct if_rxring *rxr = &q->q_rx.rge_rx_ring;
1169 	int slots;
1170 
1171 	for (slots = if_rxr_get(rxr, RGE_RX_LIST_CNT); slots > 0; slots--) {
1172 		if (rge_newbuf(q))
1173 			break;
1174 	}
1175 	if_rxr_put(rxr, slots);
1176 }
1177 
1178 void
1179 rge_tx_list_init(struct rge_queues *q)
1180 {
1181 	struct rge_softc *sc = q->q_sc;
1182 	struct rge_tx_desc *d;
1183 	int i;
1184 
1185 	memset(q->q_tx.rge_tx_list, 0, RGE_TX_LIST_SZ);
1186 
1187 	for (i = 0; i < RGE_TX_LIST_CNT; i++)
1188 		q->q_tx.rge_txq[i].txq_mbuf = NULL;
1189 
1190 	d = &q->q_tx.rge_tx_list[RGE_TX_LIST_CNT - 1];
1191 	d->rge_cmdsts = htole32(RGE_TDCMDSTS_EOR);
1192 
1193 	bus_dmamap_sync(sc->sc_dmat, q->q_tx.rge_tx_list_map, 0,
1194 	    q->q_tx.rge_tx_list_map->dm_mapsize,
1195 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1196 
1197 	q->q_tx.rge_txq_prodidx = q->q_tx.rge_txq_considx = 0;
1198 }
1199 
1200 int
1201 rge_rxeof(struct rge_queues *q)
1202 {
1203 	struct rge_softc *sc = q->q_sc;
1204 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1205 	struct mbuf *m;
1206 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1207 	struct if_rxring *rxr = &q->q_rx.rge_rx_ring;
1208 	struct rge_rx_desc *cur_rx;
1209 	struct rge_rxq *rxq;
1210 	uint32_t rxstat, extsts;
1211 	int i, total_len, rx = 0;
1212 
1213 	for (i = q->q_rx.rge_rxq_considx; if_rxr_inuse(rxr) > 0;
1214 	    i = RGE_NEXT_RX_DESC(i)) {
1215 		/* Invalidate the descriptor memory. */
1216 		bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1217 		    i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1218 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1219 
1220 		cur_rx = &q->q_rx.rge_rx_list[i];
1221 		rxstat = letoh32(cur_rx->hi_qword1.rx_qword4.rge_cmdsts);
1222 		extsts = letoh32(cur_rx->hi_qword1.rx_qword4.rge_extsts);
1223 
1224 		if (rxstat & RGE_RDCMDSTS_OWN)
1225 			break;
1226 
1227 		total_len = rxstat & RGE_RDCMDSTS_FRAGLEN;
1228 		rxq = &q->q_rx.rge_rxq[i];
1229 		m = rxq->rxq_mbuf;
1230 		rxq->rxq_mbuf = NULL;
1231 		if_rxr_put(rxr, 1);
1232 		rx = 1;
1233 
1234 		/* Invalidate the RX mbuf and unload its map. */
1235 		bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
1236 		    rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1237 		bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
1238 
1239 		if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
1240 		    (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
1241 			ifp->if_ierrors++;
1242 			m_freem(m);
1243 			rge_discard_rxbuf(q, i);
1244 			continue;
1245 		}
1246 
1247 		if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
1248 			ifp->if_ierrors++;
1249 			/*
1250 			 * If this is part of a multi-fragment packet,
1251 			 * discard all the pieces.
1252 			 */
1253 			if (q->q_rx.rge_head != NULL) {
1254 				m_freem(q->q_rx.rge_head);
1255 				q->q_rx.rge_head = q->q_rx.rge_tail = NULL;
1256 			}
1257 			m_freem(m);
1258 			rge_discard_rxbuf(q, i);
1259 			continue;
1260 		}
1261 
1262 		if (q->q_rx.rge_head != NULL) {
1263 			m->m_len = total_len;
1264 			/*
1265 			 * Special case: if there's 4 bytes or less
1266 			 * in this buffer, the mbuf can be discarded:
1267 			 * the last 4 bytes is the CRC, which we don't
1268 			 * care about anyway.
1269 			 */
1270 			if (m->m_len <= ETHER_CRC_LEN) {
1271 				q->q_rx.rge_tail->m_len -=
1272 				    (ETHER_CRC_LEN - m->m_len);
1273 				m_freem(m);
1274 			} else {
1275 				m->m_len -= ETHER_CRC_LEN;
1276 				m->m_flags &= ~M_PKTHDR;
1277 				q->q_rx.rge_tail->m_next = m;
1278 			}
1279 			m = q->q_rx.rge_head;
1280 			q->q_rx.rge_head = q->q_rx.rge_tail = NULL;
1281 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1282 		} else
1283 			m->m_pkthdr.len = m->m_len =
1284 			    (total_len - ETHER_CRC_LEN);
1285 
1286 		/* Check IP header checksum. */
1287 		if (!(extsts & RGE_RDEXTSTS_IPCSUMERR) &&
1288 		    (extsts & RGE_RDEXTSTS_IPV4))
1289 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1290 
1291 		/* Check TCP/UDP checksum. */
1292 		if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
1293 		    (((extsts & RGE_RDEXTSTS_TCPPKT) &&
1294 		    !(extsts & RGE_RDEXTSTS_TCPCSUMERR)) ||
1295 		    ((extsts & RGE_RDEXTSTS_UDPPKT) &&
1296 		    !(extsts & RGE_RDEXTSTS_UDPCSUMERR))))
1297 			m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
1298 			    M_UDP_CSUM_IN_OK;
1299 
1300 #if NVLAN > 0
1301 		if (extsts & RGE_RDEXTSTS_VTAG) {
1302 			m->m_pkthdr.ether_vtag =
1303 			    ntohs(extsts & RGE_RDEXTSTS_VLAN_MASK);
1304 			m->m_flags |= M_VLANTAG;
1305 		}
1306 #endif
1307 
1308 		ml_enqueue(&ml, m);
1309 	}
1310 
1311 	if (ifiq_input(&ifp->if_rcv, &ml))
1312 		if_rxr_livelocked(rxr);
1313 
1314 	q->q_rx.rge_rxq_considx = i;
1315 	rge_fill_rx_ring(q);
1316 
1317 	return (rx);
1318 }
1319 
1320 int
1321 rge_txeof(struct rge_queues *q)
1322 {
1323 	struct rge_softc *sc = q->q_sc;
1324 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1325 	struct rge_txq *txq;
1326 	uint32_t txstat;
1327 	int cons, idx, prod;
1328 	int free = 0;
1329 
1330 	prod = q->q_tx.rge_txq_prodidx;
1331 	cons = q->q_tx.rge_txq_considx;
1332 
1333 	while (prod != cons) {
1334 		txq = &q->q_tx.rge_txq[cons];
1335 		idx = txq->txq_descidx;
1336 
1337 		bus_dmamap_sync(sc->sc_dmat, q->q_tx.rge_tx_list_map,
1338 		    idx * sizeof(struct rge_tx_desc),
1339 		    sizeof(struct rge_tx_desc),
1340 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1341 
1342 		txstat = letoh32(q->q_tx.rge_tx_list[idx].rge_cmdsts);
1343 
1344 		if (txstat & RGE_TDCMDSTS_OWN) {
1345 			free = 2;
1346 			break;
1347 		}
1348 
1349 		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
1350 		    txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1351 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
1352 		m_freem(txq->txq_mbuf);
1353 		txq->txq_mbuf = NULL;
1354 
1355 		if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
1356 			ifp->if_collisions++;
1357 		if (txstat & RGE_TDCMDSTS_TXERR)
1358 			ifp->if_oerrors++;
1359 
1360 		bus_dmamap_sync(sc->sc_dmat, q->q_tx.rge_tx_list_map,
1361 		    idx * sizeof(struct rge_tx_desc),
1362 		    sizeof(struct rge_tx_desc),
1363 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1364 
1365 		cons = RGE_NEXT_TX_DESC(idx);
1366 		free = 1;
1367 	}
1368 
1369 	if (free == 0)
1370 		return (0);
1371 
1372 	q->q_tx.rge_txq_considx = cons;
1373 
1374 	if (ifq_is_oactive(&ifp->if_snd))
1375 		ifq_restart(&ifp->if_snd);
1376 	else if (free == 2)
1377 		ifq_serialize(&ifp->if_snd, &sc->sc_task);
1378 	else
1379 		ifp->if_timer = 0;
1380 
1381 	return (1);
1382 }
1383 
1384 void
1385 rge_reset(struct rge_softc *sc)
1386 {
1387 	int i;
1388 
1389 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
1390 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
1391 	    RGE_RXCFG_ERRPKT);
1392 
1393 	/* Enable RXDV gate. */
1394 	RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
1395 	DELAY(2000);
1396 
1397 	RGE_SETBIT_1(sc, RGE_CMD, RGE_CMD_STOPREQ);
1398 	for (i = 0; i < 20; i++) {
1399 		DELAY(10);
1400 		if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_STOPREQ))
1401 			break;
1402 	}
1403 
1404 	for (i = 0; i < 3000; i++) {
1405 		DELAY(50);
1406 		if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
1407 		    RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
1408 		    RGE_MCUCMD_TXFIFO_EMPTY))
1409 			break;
1410 	}
1411 	if (sc->rge_type != MAC_CFG3) {
1412 		for (i = 0; i < 3000; i++) {
1413 			DELAY(50);
1414 			if ((RGE_READ_2(sc, RGE_IM) & 0x0103) == 0x0103)
1415 				break;
1416 		}
1417 	}
1418 
1419 	DELAY(2000);
1420 
1421 	/* Soft reset. */
1422 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
1423 
1424 	for (i = 0; i < RGE_TIMEOUT; i++) {
1425 		DELAY(100);
1426 		if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
1427 			break;
1428 	}
1429 	if (i == RGE_TIMEOUT)
1430 		printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
1431 }
1432 
1433 void
1434 rge_iff(struct rge_softc *sc)
1435 {
1436 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1437 	struct arpcom *ac = &sc->sc_arpcom;
1438 	struct ether_multi *enm;
1439 	struct ether_multistep step;
1440 	uint32_t hashes[2];
1441 	uint32_t rxfilt;
1442 	int h = 0;
1443 
1444 	rxfilt = RGE_READ_4(sc, RGE_RXCFG);
1445 	rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
1446 	ifp->if_flags &= ~IFF_ALLMULTI;
1447 
1448 	/*
1449 	 * Always accept frames destined to our station address.
1450 	 * Always accept broadcast frames.
1451 	 */
1452 	rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
1453 
1454 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1455 		ifp->if_flags |= IFF_ALLMULTI;
1456 		rxfilt |= RGE_RXCFG_MULTI;
1457 		if (ifp->if_flags & IFF_PROMISC)
1458 			rxfilt |= RGE_RXCFG_ALLPHYS;
1459 		hashes[0] = hashes[1] = 0xffffffff;
1460 	} else {
1461 		rxfilt |= RGE_RXCFG_MULTI;
1462 		/* Program new filter. */
1463 		memset(hashes, 0, sizeof(hashes));
1464 
1465 		ETHER_FIRST_MULTI(step, ac, enm);
1466 		while (enm != NULL) {
1467 			h = ether_crc32_be(enm->enm_addrlo,
1468 			    ETHER_ADDR_LEN) >> 26;
1469 
1470 			if (h < 32)
1471 				hashes[0] |= (1 << h);
1472 			else
1473 				hashes[1] |= (1 << (h - 32));
1474 
1475 			ETHER_NEXT_MULTI(step, enm);
1476 		}
1477 	}
1478 
1479 	RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
1480 	RGE_WRITE_4(sc, RGE_MAR0, swap32(hashes[1]));
1481 	RGE_WRITE_4(sc, RGE_MAR4, swap32(hashes[0]));
1482 }
1483 
1484 void
1485 rge_chipinit(struct rge_softc *sc)
1486 {
1487 	rge_exit_oob(sc);
1488 	rge_set_phy_power(sc, 1);
1489 	rge_hw_init(sc);
1490 	rge_hw_reset(sc);
1491 }
1492 
1493 void
1494 rge_set_phy_power(struct rge_softc *sc, int on)
1495 {
1496 	int i;
1497 
1498 	if (on) {
1499 		RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
1500 
1501 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
1502 
1503 		for (i = 0; i < RGE_TIMEOUT; i++) {
1504 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 3)
1505 				break;
1506 			DELAY(1000);
1507 		}
1508 	} else {
1509 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
1510 		RGE_CLRBIT_1(sc, RGE_PMCH, 0x80);
1511 		RGE_CLRBIT_1(sc, RGE_PPSW, 0x40);
1512 	}
1513 }
1514 
1515 void
1516 rge_ephy_config(struct rge_softc *sc)
1517 {
1518 	switch (sc->rge_type) {
1519 	case MAC_CFG3:
1520 		rge_ephy_config_mac_cfg3(sc);
1521 		break;
1522 	case MAC_CFG5:
1523 		rge_ephy_config_mac_cfg5(sc);
1524 		break;
1525 	default:
1526 		break;	/* Can't happen. */
1527 	}
1528 }
1529 
1530 void
1531 rge_ephy_config_mac_cfg3(struct rge_softc *sc)
1532 {
1533 	uint16_t val;
1534 	int i;
1535 
1536 	for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
1537 		rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
1538 		    rtl8125_mac_cfg3_ephy[i].val);
1539 
1540 	val = rge_read_ephy(sc, 0x002a) & ~0x7000;
1541 	rge_write_ephy(sc, 0x002a, val | 0x3000);
1542 	RGE_EPHY_CLRBIT(sc, 0x0019, 0x0040);
1543 	RGE_EPHY_SETBIT(sc, 0x001b, 0x0e00);
1544 	RGE_EPHY_CLRBIT(sc, 0x001b, 0x7000);
1545 	rge_write_ephy(sc, 0x0002, 0x6042);
1546 	rge_write_ephy(sc, 0x0006, 0x0014);
1547 	val = rge_read_ephy(sc, 0x006a) & ~0x7000;
1548 	rge_write_ephy(sc, 0x006a, val | 0x3000);
1549 	RGE_EPHY_CLRBIT(sc, 0x0059, 0x0040);
1550 	RGE_EPHY_SETBIT(sc, 0x005b, 0x0e00);
1551 	RGE_EPHY_CLRBIT(sc, 0x005b, 0x7000);
1552 	rge_write_ephy(sc, 0x0042, 0x6042);
1553 	rge_write_ephy(sc, 0x0046, 0x0014);
1554 }
1555 
1556 void
1557 rge_ephy_config_mac_cfg5(struct rge_softc *sc)
1558 {
1559 	int i;
1560 
1561 	for (i = 0; i < nitems(rtl8125_mac_cfg5_ephy); i++)
1562 		rge_write_ephy(sc, rtl8125_mac_cfg5_ephy[i].reg,
1563 		    rtl8125_mac_cfg5_ephy[i].val);
1564 }
1565 
1566 int
1567 rge_phy_config(struct rge_softc *sc)
1568 {
1569 	int i;
1570 
1571 	rge_ephy_config(sc);
1572 
1573 	/* PHY reset. */
1574 	rge_write_phy(sc, 0, MII_ANAR,
1575 	    rge_read_phy(sc, 0, MII_ANAR) &
1576 	    ~(ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10));
1577 	rge_write_phy(sc, 0, MII_100T2CR,
1578 	    rge_read_phy(sc, 0, MII_100T2CR) &
1579 	    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX));
1580 	RGE_PHY_CLRBIT(sc, 0xa5d4, RGE_ADV_2500TFDX);
1581 	rge_write_phy(sc, 0, MII_BMCR, BMCR_RESET | BMCR_AUTOEN |
1582 	    BMCR_STARTNEG);
1583 	for (i = 0; i < 2500; i++) {
1584 		if (!(rge_read_phy(sc, 0, MII_BMCR) & BMCR_RESET))
1585 			break;
1586 		DELAY(1000);
1587 	}
1588 	if (i == 2500) {
1589 		printf("%s: PHY reset failed\n", sc->sc_dev.dv_xname);
1590 		return (ETIMEDOUT);
1591 	}
1592 
1593 	/* Read microcode version. */
1594 	rge_write_phy_ocp(sc, 0xa436, 0x801e);
1595 	sc->rge_mcodever = rge_read_phy_ocp(sc, 0xa438);
1596 
1597 	switch (sc->rge_type) {
1598 	case MAC_CFG3:
1599 		rge_phy_config_mac_cfg3(sc);
1600 		break;
1601 	case MAC_CFG5:
1602 		rge_phy_config_mac_cfg5(sc);
1603 		break;
1604 	default:
1605 		break;	/* Can't happen. */
1606 	}
1607 
1608 	RGE_PHY_CLRBIT(sc, 0xa5b4, 0x8000);
1609 
1610 	/* Disable EEE. */
1611 	RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
1612 	if (sc->rge_type == MAC_CFG3) {
1613 		RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
1614 		RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
1615 	}
1616 	RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
1617 	RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
1618 	RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
1619 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
1620 	RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
1621 
1622 	/* Advanced EEE. */
1623 	rge_patch_phy_mcu(sc, 1);
1624 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
1625 	RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
1626 	RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
1627 	rge_patch_phy_mcu(sc, 0);
1628 
1629 	return (0);
1630 }
1631 
1632 void
1633 rge_phy_config_mac_cfg3(struct rge_softc *sc)
1634 {
1635 	uint16_t val;
1636 	int i;
1637 	static const uint16_t mac_cfg3_a438_value[] =
1638 	    { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
1639 	      0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
1640 
1641 	static const uint16_t mac_cfg3_b88e_value[] =
1642 	    { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
1643 	      0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
1644 	      0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
1645 	      0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
1646 	      0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
1647 	      0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
1648 
1649 	rge_phy_config_mcu(sc, RGE_MAC_CFG3_MCODE_VER);
1650 
1651 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1652 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1653 	rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
1654 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
1655 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1656 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
1657 	RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
1658 	val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
1659 	rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
1660 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
1661 	rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
1662 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
1663 	rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
1664 	RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
1665 	RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
1666 	val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
1667 	rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
1668 	rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
1669 	rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
1670 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
1671 	rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
1672 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1673 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1674 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0500);
1675 	rge_write_phy_ocp(sc, 0xb87c, 0x8159);
1676 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1677 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
1678 	rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
1679 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1680 	rge_write_phy_ocp(sc, 0xb87c, 0x809c);
1681 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1682 
1683 	rge_write_phy_ocp(sc, 0xa436, 0x81b3);
1684 	for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
1685 		rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
1686 	for (i = 0; i < 26; i++)
1687 		rge_write_phy_ocp(sc, 0xa438, 0);
1688 	rge_write_phy_ocp(sc, 0xa436, 0x8257);
1689 	rge_write_phy_ocp(sc, 0xa438, 0x020f);
1690 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1691 	rge_write_phy_ocp(sc, 0xa438, 0x7843);
1692 
1693 	rge_patch_phy_mcu(sc, 1);
1694 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1695 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1696 	for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
1697 		rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
1698 		rge_write_phy_ocp(sc, 0xb890, mac_cfg3_b88e_value[i + 1]);
1699 	}
1700 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1701 	rge_patch_phy_mcu(sc, 0);
1702 
1703 	RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
1704 	rge_write_phy_ocp(sc, 0xa436, 0x81a2);
1705 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1706 	val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
1707 	rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
1708 	RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
1709 	RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
1710 	RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
1711 	RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
1712 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1713 	RGE_PHY_SETBIT(sc, 0xa424, 0x0008);
1714 }
1715 
1716 void
1717 rge_phy_config_mac_cfg5(struct rge_softc *sc)
1718 {
1719 	uint16_t val;
1720 	int i;
1721 
1722 	rge_phy_config_mcu(sc, RGE_MAC_CFG5_MCODE_VER);
1723 
1724 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1725 	val = rge_read_phy_ocp(sc, 0xac46) & ~0x00f0;
1726 	rge_write_phy_ocp(sc, 0xac46, val | 0x0090);
1727 	val = rge_read_phy_ocp(sc, 0xad30) & ~0x0003;
1728 	rge_write_phy_ocp(sc, 0xad30, val | 0x0001);
1729 	rge_write_phy_ocp(sc, 0xb87c, 0x80f5);
1730 	rge_write_phy_ocp(sc, 0xb87e, 0x760e);
1731 	rge_write_phy_ocp(sc, 0xb87c, 0x8107);
1732 	rge_write_phy_ocp(sc, 0xb87e, 0x360e);
1733 	rge_write_phy_ocp(sc, 0xb87c, 0x8551);
1734 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1735 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
1736 	val = rge_read_phy_ocp(sc, 0xbf00) & ~0xe000;
1737 	rge_write_phy_ocp(sc, 0xbf00, val | 0xa000);
1738 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1739 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0300);
1740 	for (i = 0; i < 10; i++) {
1741 		rge_write_phy_ocp(sc, 0xa436, 0x8044 + i * 6);
1742 		rge_write_phy_ocp(sc, 0xa438, 0x2417);
1743 	}
1744 	RGE_PHY_SETBIT(sc, 0xa4ca, 0x0040);
1745 	val = rge_read_phy_ocp(sc, 0xbf84) & ~0xe000;
1746 	rge_write_phy_ocp(sc, 0xbf84, val | 0xa000);
1747 	rge_write_phy_ocp(sc, 0xa436, 0x8170);
1748 	val = rge_read_phy_ocp(sc, 0xa438) & ~0x2700;
1749 	rge_write_phy_ocp(sc, 0xa438, val | 0xd800);
1750 	RGE_PHY_SETBIT(sc, 0xa424, 0x0008);
1751 }
1752 
1753 void
1754 rge_phy_config_mcu(struct rge_softc *sc, uint16_t mcode_version)
1755 {
1756 	if (sc->rge_mcodever != mcode_version) {
1757 		int i;
1758 
1759 		rge_patch_phy_mcu(sc, 1);
1760 
1761 		if (sc->rge_type == MAC_CFG3) {
1762 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
1763 			rge_write_phy_ocp(sc, 0xa438, 0x8601);
1764 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
1765 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
1766 
1767 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
1768 
1769 			for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
1770 				rge_write_phy_ocp(sc,
1771 				    rtl8125_mac_cfg3_mcu[i].reg,
1772 				    rtl8125_mac_cfg3_mcu[i].val);
1773 			}
1774 
1775 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
1776 
1777 			rge_write_phy_ocp(sc, 0xa436, 0);
1778 			rge_write_phy_ocp(sc, 0xa438, 0);
1779 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
1780 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
1781 			rge_write_phy_ocp(sc, 0xa438, 0);
1782 		} else if (sc->rge_type == MAC_CFG5) {
1783 			for (i = 0; i < nitems(rtl8125_mac_cfg5_mcu); i++) {
1784 				rge_write_phy_ocp(sc,
1785 				    rtl8125_mac_cfg5_mcu[i].reg,
1786 				    rtl8125_mac_cfg5_mcu[i].val);
1787 			}
1788 		}
1789 
1790 		rge_patch_phy_mcu(sc, 0);
1791 
1792 		/* Write microcode version. */
1793 		rge_write_phy_ocp(sc, 0xa436, 0x801e);
1794 		rge_write_phy_ocp(sc, 0xa438, mcode_version);
1795 	}
1796 }
1797 
1798 void
1799 rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
1800 {
1801 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
1802 	RGE_WRITE_4(sc, RGE_MAC0,
1803 	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1804 	RGE_WRITE_4(sc, RGE_MAC4,
1805 	    addr[5] <<  8 | addr[4]);
1806 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
1807 }
1808 
1809 void
1810 rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
1811 {
1812 	int i;
1813 
1814 	for (i = 0; i < ETHER_ADDR_LEN; i++)
1815 		addr[i] = RGE_READ_1(sc, RGE_MAC0 + i);
1816 
1817 	*(uint32_t *)&addr[0] = RGE_READ_4(sc, RGE_ADDR0);
1818 	*(uint16_t *)&addr[4] = RGE_READ_2(sc, RGE_ADDR1);
1819 
1820 	rge_set_macaddr(sc, addr);
1821 }
1822 
1823 void
1824 rge_hw_init(struct rge_softc *sc)
1825 {
1826 	uint16_t reg;
1827 	int i, npages;
1828 
1829 	rge_disable_aspm_clkreq(sc);
1830 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
1831 
1832 	/* Disable UPS. */
1833 	RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
1834 
1835 	/* Disable MAC MCU. */
1836 	rge_disable_aspm_clkreq(sc);
1837 	rge_write_mac_ocp(sc, 0xfc48, 0);
1838 	for (reg = 0xfc28; reg < 0xfc48; reg += 2)
1839 		rge_write_mac_ocp(sc, reg, 0);
1840 	DELAY(3000);
1841 	rge_write_mac_ocp(sc, 0xfc26, 0);
1842 
1843 	if (sc->rge_type == MAC_CFG3) {
1844 		for (npages = 0; npages < 3; npages++) {
1845 			rge_switch_mcu_ram_page(sc, npages);
1846 			for (i = 0; i < nitems(rtl8125_mac_bps); i++) {
1847 				if (npages == 0)
1848 					rge_write_mac_ocp(sc,
1849 					    rtl8125_mac_bps[i].reg,
1850 					    rtl8125_mac_bps[i].val);
1851 				else if (npages == 1)
1852 					rge_write_mac_ocp(sc,
1853 					    rtl8125_mac_bps[i].reg, 0);
1854 				else {
1855 					if (rtl8125_mac_bps[i].reg < 0xf9f8)
1856 						rge_write_mac_ocp(sc,
1857 						    rtl8125_mac_bps[i].reg, 0);
1858 				}
1859 			}
1860 			if (npages == 2) {
1861 				rge_write_mac_ocp(sc, 0xf9f8, 0x6486);
1862 				rge_write_mac_ocp(sc, 0xf9fa, 0x0b15);
1863 				rge_write_mac_ocp(sc, 0xf9fc, 0x090e);
1864 				rge_write_mac_ocp(sc, 0xf9fe, 0x1139);
1865 			}
1866 		}
1867 		rge_write_mac_ocp(sc, 0xfc26, 0x8000);
1868 		rge_write_mac_ocp(sc, 0xfc2a, 0x0540);
1869 		rge_write_mac_ocp(sc, 0xfc2e, 0x0a06);
1870 		rge_write_mac_ocp(sc, 0xfc30, 0x0eb8);
1871 		rge_write_mac_ocp(sc, 0xfc32, 0x3a5c);
1872 		rge_write_mac_ocp(sc, 0xfc34, 0x10a8);
1873 		rge_write_mac_ocp(sc, 0xfc40, 0x0d54);
1874 		rge_write_mac_ocp(sc, 0xfc42, 0x0e24);
1875 		rge_write_mac_ocp(sc, 0xfc48, 0x307a);
1876 	} else if (sc->rge_type == MAC_CFG5) {
1877 		rge_switch_mcu_ram_page(sc, 0);
1878 		for (i = 0; i < nitems(rtl8125b_mac_bps); i++) {
1879 			rge_write_mac_ocp(sc, rtl8125b_mac_bps[i].reg,
1880 			    rtl8125b_mac_bps[i].val);
1881 		}
1882 	}
1883 
1884 	/* Disable PHY power saving. */
1885 	rge_disable_phy_ocp_pwrsave(sc);
1886 
1887 	/* Set PCIe uncorrectable error status. */
1888 	rge_write_csi(sc, 0x108,
1889 	    rge_read_csi(sc, 0x108) | 0x00100000);
1890 }
1891 
1892 void
1893 rge_hw_reset(struct rge_softc *sc)
1894 {
1895 	/* Disable interrupts */
1896 	RGE_WRITE_4(sc, RGE_IMR, 0);
1897 	RGE_WRITE_4(sc, RGE_ISR, RGE_READ_4(sc, RGE_ISR));
1898 
1899 	/* Clear timer interrupts. */
1900 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
1901 	RGE_WRITE_4(sc, RGE_TIMERINT1, 0);
1902 	RGE_WRITE_4(sc, RGE_TIMERINT2, 0);
1903 	RGE_WRITE_4(sc, RGE_TIMERINT3, 0);
1904 
1905 	rge_reset(sc);
1906 }
1907 
1908 void
1909 rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
1910 {
1911 	if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
1912 		rge_patch_phy_mcu(sc, 1);
1913 		rge_write_phy_ocp(sc, 0xc416, 0);
1914 		rge_write_phy_ocp(sc, 0xc416, 0x0500);
1915 		rge_patch_phy_mcu(sc, 0);
1916 	}
1917 }
1918 
1919 void
1920 rge_patch_phy_mcu(struct rge_softc *sc, int set)
1921 {
1922 	int i;
1923 
1924 	if (set)
1925 		RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
1926 	else
1927 		RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
1928 
1929 	for (i = 0; i < 1000; i++) {
1930 		if (set) {
1931 			if ((rge_read_phy_ocp(sc, 0xb800) & 0x0040) != 0)
1932 				break;
1933 		} else {
1934 			if (!(rge_read_phy_ocp(sc, 0xb800) & 0x0040))
1935 				break;
1936 		}
1937 		DELAY(100);
1938 	}
1939 	if (i == 1000)
1940 		printf("%s: timeout waiting to patch phy mcu\n",
1941 		    sc->sc_dev.dv_xname);
1942 }
1943 
1944 void
1945 rge_add_media_types(struct rge_softc *sc)
1946 {
1947 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
1948 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
1949 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
1950 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
1951 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
1952 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
1953 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
1954 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
1955 }
1956 
1957 void
1958 rge_config_imtype(struct rge_softc *sc, int imtype)
1959 {
1960 	switch (imtype) {
1961 	case RGE_IMTYPE_NONE:
1962 		sc->rge_intrs = RGE_INTRS;
1963 		break;
1964 	case RGE_IMTYPE_SIM:
1965 		sc->rge_intrs = RGE_INTRS_TIMER;
1966 		break;
1967 	default:
1968 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
1969 	}
1970 }
1971 
1972 void
1973 rge_disable_aspm_clkreq(struct rge_softc *sc)
1974 {
1975 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
1976 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
1977 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
1978 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
1979 }
1980 
1981 void
1982 rge_disable_hw_im(struct rge_softc *sc)
1983 {
1984 	RGE_WRITE_2(sc, RGE_IM, 0);
1985 }
1986 
1987 void
1988 rge_disable_sim_im(struct rge_softc *sc)
1989 {
1990 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
1991 	sc->rge_timerintr = 0;
1992 }
1993 
1994 void
1995 rge_setup_sim_im(struct rge_softc *sc)
1996 {
1997 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0x2600);
1998 	RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
1999 	sc->rge_timerintr = 1;
2000 }
2001 
2002 void
2003 rge_setup_intr(struct rge_softc *sc, int imtype)
2004 {
2005 	rge_config_imtype(sc, imtype);
2006 
2007 	/* Enable interrupts. */
2008 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
2009 
2010 	switch (imtype) {
2011 	case RGE_IMTYPE_NONE:
2012 		rge_disable_sim_im(sc);
2013 		rge_disable_hw_im(sc);
2014 		break;
2015 	case RGE_IMTYPE_SIM:
2016 		rge_disable_hw_im(sc);
2017 		rge_setup_sim_im(sc);
2018 		break;
2019 	default:
2020 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
2021 	}
2022 }
2023 
2024 void
2025 rge_switch_mcu_ram_page(struct rge_softc *sc, int page)
2026 {
2027 	uint16_t val;
2028 
2029 	val = rge_read_mac_ocp(sc, 0xe446) & ~0x0003;
2030 	val |= page;
2031 	rge_write_mac_ocp(sc, 0xe446, val);
2032 }
2033 
2034 void
2035 rge_exit_oob(struct rge_softc *sc)
2036 {
2037 	int i;
2038 
2039 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
2040 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
2041 	    RGE_RXCFG_ERRPKT);
2042 
2043 	/* Disable RealWoW. */
2044 	rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
2045 
2046 	rge_reset(sc);
2047 
2048 	/* Disable OOB. */
2049 	RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
2050 
2051 	RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
2052 
2053 	for (i = 0; i < 10; i++) {
2054 		DELAY(100);
2055 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2056 			break;
2057 	}
2058 
2059 	rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
2060 	rge_write_mac_ocp(sc, 0xc0a6, 0x01b5);
2061 	rge_write_mac_ocp(sc, 0xc01e, 0x5555);
2062 
2063 	for (i = 0; i < 10; i++) {
2064 		DELAY(100);
2065 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2066 			break;
2067 	}
2068 
2069 	if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
2070 		for (i = 0; i < RGE_TIMEOUT; i++) {
2071 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 2)
2072 				break;
2073 			DELAY(1000);
2074 		}
2075 		RGE_MAC_CLRBIT(sc, 0xd42c, 0x0100);
2076 		if (sc->rge_type != MAC_CFG3)
2077 			RGE_PHY_CLRBIT(sc, 0xa466, 0x0001);
2078 		RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
2079 	}
2080 }
2081 
2082 void
2083 rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
2084 {
2085 	int i;
2086 
2087 	RGE_WRITE_4(sc, RGE_CSIDR, val);
2088 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2089 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
2090 
2091 	for (i = 0; i < 20000; i++) {
2092 		 DELAY(1);
2093 		 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
2094 			break;
2095 	}
2096 
2097 	DELAY(20);
2098 }
2099 
2100 uint32_t
2101 rge_read_csi(struct rge_softc *sc, uint32_t reg)
2102 {
2103 	int i;
2104 
2105 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2106 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
2107 
2108 	for (i = 0; i < 20000; i++) {
2109 		 DELAY(1);
2110 		 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
2111 			break;
2112 	}
2113 
2114 	DELAY(20);
2115 
2116 	return (RGE_READ_4(sc, RGE_CSIDR));
2117 }
2118 
2119 void
2120 rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2121 {
2122 	uint32_t tmp;
2123 
2124 	tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2125 	tmp += val;
2126 	tmp |= RGE_MACOCP_BUSY;
2127 	RGE_WRITE_4(sc, RGE_MACOCP, tmp);
2128 }
2129 
2130 uint16_t
2131 rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
2132 {
2133 	uint32_t val;
2134 
2135 	val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2136 	RGE_WRITE_4(sc, RGE_MACOCP, val);
2137 
2138 	return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
2139 }
2140 
2141 void
2142 rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
2143 {
2144 	uint32_t tmp;
2145 	int i;
2146 
2147 	tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2148 	tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
2149 	RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
2150 
2151 	for (i = 0; i < 10; i++) {
2152 		DELAY(100);
2153 		if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
2154 			break;
2155 	}
2156 
2157 	DELAY(20);
2158 }
2159 
2160 uint16_t
2161 rge_read_ephy(struct rge_softc *sc, uint16_t reg)
2162 {
2163 	uint32_t val;
2164 	int i;
2165 
2166 	val = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2167 	RGE_WRITE_4(sc, RGE_EPHYAR, val);
2168 
2169 	for (i = 0; i < 10; i++) {
2170 		DELAY(100);
2171 		val = RGE_READ_4(sc, RGE_EPHYAR);
2172 		if (val & RGE_EPHYAR_BUSY)
2173 			break;
2174 	}
2175 
2176 	DELAY(20);
2177 
2178 	return (val & RGE_EPHYAR_DATA_MASK);
2179 }
2180 
2181 void
2182 rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
2183 {
2184 	uint16_t off, phyaddr;
2185 
2186 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2187 	phyaddr <<= 4;
2188 
2189 	off = addr ? reg : 0x10 + (reg % 8);
2190 
2191 	phyaddr += (off - 16) << 1;
2192 
2193 	rge_write_phy_ocp(sc, phyaddr, val);
2194 }
2195 
2196 uint16_t
2197 rge_read_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg)
2198 {
2199 	uint16_t off, phyaddr;
2200 
2201 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2202 	phyaddr <<= 4;
2203 
2204 	off = addr ? reg : 0x10 + (reg % 8);
2205 
2206 	phyaddr += (off - 16) << 1;
2207 
2208 	return (rge_read_phy_ocp(sc, phyaddr));
2209 }
2210 
2211 void
2212 rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2213 {
2214 	uint32_t tmp;
2215 	int i;
2216 
2217 	tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2218 	tmp |= RGE_PHYOCP_BUSY | val;
2219 	RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
2220 
2221 	for (i = 0; i < RGE_TIMEOUT; i++) {
2222 		DELAY(1);
2223 		if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
2224 			break;
2225 	}
2226 }
2227 
2228 uint16_t
2229 rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
2230 {
2231 	uint32_t val;
2232 	int i;
2233 
2234 	val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2235 	RGE_WRITE_4(sc, RGE_PHYOCP, val);
2236 
2237 	for (i = 0; i < RGE_TIMEOUT; i++) {
2238 		DELAY(1);
2239 		val = RGE_READ_4(sc, RGE_PHYOCP);
2240 		if (val & RGE_PHYOCP_BUSY)
2241 			break;
2242 	}
2243 
2244 	return (val & RGE_PHYOCP_DATA_MASK);
2245 }
2246 
2247 int
2248 rge_get_link_status(struct rge_softc *sc)
2249 {
2250 	return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
2251 }
2252 
2253 void
2254 rge_txstart(void *arg)
2255 {
2256 	struct rge_softc *sc = arg;
2257 
2258 	RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
2259 }
2260 
2261 void
2262 rge_tick(void *arg)
2263 {
2264 	struct rge_softc *sc = arg;
2265 	int s;
2266 
2267 	s = splnet();
2268 	rge_link_state(sc);
2269 	splx(s);
2270 
2271 	timeout_add_sec(&sc->sc_timeout, 1);
2272 }
2273 
2274 void
2275 rge_link_state(struct rge_softc *sc)
2276 {
2277 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2278 	int link = LINK_STATE_DOWN;
2279 
2280 	if (rge_get_link_status(sc))
2281 		link = LINK_STATE_UP;
2282 
2283 	if (ifp->if_link_state != link) {
2284 		ifp->if_link_state = link;
2285 		if_link_state_change(ifp);
2286 	}
2287 }
2288 
2289 #ifndef SMALL_KERNEL
2290 int
2291 rge_wol(struct ifnet *ifp, int enable)
2292 {
2293 	struct rge_softc *sc = ifp->if_softc;
2294 
2295 	if (enable) {
2296 		if (!(RGE_READ_1(sc, RGE_CFG1) & RGE_CFG1_PM_EN)) {
2297 			printf("%s: power management is disabled, "
2298 			    "cannot do WOL\n", sc->sc_dev.dv_xname);
2299 			return (ENOTSUP);
2300 		}
2301 
2302 	}
2303 
2304 	rge_iff(sc);
2305 
2306 	if (enable)
2307 		RGE_MAC_SETBIT(sc, 0xc0b6, 0x0001);
2308 	else
2309 		RGE_MAC_CLRBIT(sc, 0xc0b6, 0x0001);
2310 
2311 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2312 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_WOL_LANWAKE | RGE_CFG5_WOL_UCAST |
2313 	    RGE_CFG5_WOL_MCAST | RGE_CFG5_WOL_BCAST);
2314 	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_WOL_LINK | RGE_CFG3_WOL_MAGIC);
2315 	if (enable)
2316 		RGE_SETBIT_1(sc, RGE_CFG5, RGE_CFG5_WOL_LANWAKE);
2317 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2318 
2319 	return (0);
2320 }
2321 
2322 void
2323 rge_wol_power(struct rge_softc *sc)
2324 {
2325 	/* Disable RXDV gate. */
2326 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
2327 	DELAY(2000);
2328 
2329 	RGE_SETBIT_1(sc, RGE_CFG1, RGE_CFG1_PM_EN);
2330 	RGE_SETBIT_1(sc, RGE_CFG2, RGE_CFG2_PMSTS_EN);
2331 }
2332 #endif
2333 
2334 #if NKSTAT > 0
2335 
2336 #define RGE_DTCCR_CMD		(1U << 3)
2337 #define RGE_DTCCR_LO		0x10
2338 #define RGE_DTCCR_HI		0x14
2339 
2340 struct rge_kstats {
2341 	struct kstat_kv		tx_ok;
2342 	struct kstat_kv		rx_ok;
2343 	struct kstat_kv		tx_er;
2344 	struct kstat_kv		rx_er;
2345 	struct kstat_kv		miss_pkt;
2346 	struct kstat_kv		fae;
2347 	struct kstat_kv		tx_1col;
2348 	struct kstat_kv		tx_mcol;
2349 	struct kstat_kv		rx_ok_phy;
2350 	struct kstat_kv		rx_ok_brd;
2351 	struct kstat_kv		rx_ok_mul;
2352 	struct kstat_kv		tx_abt;
2353 	struct kstat_kv		tx_undrn;
2354 };
2355 
2356 static const struct rge_kstats rge_kstats_tpl = {
2357 	.tx_ok =	KSTAT_KV_UNIT_INITIALIZER("TxOk",
2358 			    KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
2359 	.rx_ok =	KSTAT_KV_UNIT_INITIALIZER("RxOk",
2360 			    KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
2361 	.tx_er =	KSTAT_KV_UNIT_INITIALIZER("TxEr",
2362 			    KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
2363 	.rx_er =	KSTAT_KV_UNIT_INITIALIZER("RxEr",
2364 			    KSTAT_KV_T_COUNTER32, KSTAT_KV_U_PACKETS),
2365 	.miss_pkt =	KSTAT_KV_UNIT_INITIALIZER("MissPkt",
2366 			    KSTAT_KV_T_COUNTER16, KSTAT_KV_U_PACKETS),
2367 	.fae =		KSTAT_KV_UNIT_INITIALIZER("FAE",
2368 			    KSTAT_KV_T_COUNTER16, KSTAT_KV_U_PACKETS),
2369 	.tx_1col =	KSTAT_KV_UNIT_INITIALIZER("Tx1Col",
2370 			    KSTAT_KV_T_COUNTER32, KSTAT_KV_U_PACKETS),
2371 	.tx_mcol =	KSTAT_KV_UNIT_INITIALIZER("TxMCol",
2372 			    KSTAT_KV_T_COUNTER32, KSTAT_KV_U_PACKETS),
2373 	.rx_ok_phy =	KSTAT_KV_UNIT_INITIALIZER("RxOkPhy",
2374 			    KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
2375 	.rx_ok_brd =	KSTAT_KV_UNIT_INITIALIZER("RxOkBrd",
2376 			    KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
2377 	.rx_ok_mul =	KSTAT_KV_UNIT_INITIALIZER("RxOkMul",
2378 			    KSTAT_KV_T_COUNTER32, KSTAT_KV_U_PACKETS),
2379 	.tx_abt =	KSTAT_KV_UNIT_INITIALIZER("TxAbt",
2380 			    KSTAT_KV_T_COUNTER16, KSTAT_KV_U_PACKETS),
2381 	.tx_undrn =	KSTAT_KV_UNIT_INITIALIZER("TxUndrn",
2382 			    KSTAT_KV_T_COUNTER16, KSTAT_KV_U_PACKETS),
2383 };
2384 
2385 struct rge_kstat_softc {
2386 	struct rge_stats	*rge_ks_sc_stats;
2387 
2388 	bus_dmamap_t		 rge_ks_sc_map;
2389 	bus_dma_segment_t	 rge_ks_sc_seg;
2390 	int			 rge_ks_sc_nsegs;
2391 
2392 	struct rwlock		 rge_ks_sc_rwl;
2393 };
2394 
2395 static int
2396 rge_kstat_read(struct kstat *ks)
2397 {
2398 	struct rge_softc *sc = ks->ks_softc;
2399 	struct rge_kstat_softc *rge_ks_sc = ks->ks_ptr;
2400 	bus_dmamap_t map;
2401 	uint64_t cmd;
2402 	uint32_t reg;
2403 	uint8_t command;
2404 	int tmo;
2405 
2406 	command = RGE_READ_1(sc, RGE_CMD);
2407 	if (!ISSET(command, RGE_CMD_RXENB) || command == 0xff)
2408 		return (ENETDOWN);
2409 
2410 	map = rge_ks_sc->rge_ks_sc_map;
2411 	cmd = map->dm_segs[0].ds_addr | RGE_DTCCR_CMD;
2412 
2413 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2414 	    BUS_DMASYNC_PREREAD);
2415 
2416 	RGE_WRITE_4(sc, RGE_DTCCR_HI, cmd >> 32);
2417 	bus_space_barrier(sc->rge_btag, sc->rge_bhandle, RGE_DTCCR_HI, 8,
2418 	    BUS_SPACE_BARRIER_WRITE);
2419 	RGE_WRITE_4(sc, RGE_DTCCR_LO, cmd);
2420 	bus_space_barrier(sc->rge_btag, sc->rge_bhandle, RGE_DTCCR_LO, 4,
2421 	    BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE);
2422 
2423 	tmo = 1000;
2424 	do {
2425 		reg = RGE_READ_4(sc, RGE_DTCCR_LO);
2426 		if (!ISSET(reg, RGE_DTCCR_CMD))
2427 			break;
2428 
2429 		delay(10);
2430 		bus_space_barrier(sc->rge_btag, sc->rge_bhandle,
2431 		    RGE_DTCCR_LO, 4, BUS_SPACE_BARRIER_READ);
2432 	} while (--tmo);
2433 
2434 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2435 	    BUS_DMASYNC_POSTREAD);
2436 
2437 	if (ISSET(reg, RGE_DTCCR_CMD))
2438 		return (EIO);
2439 
2440 	nanouptime(&ks->ks_updated);
2441 
2442 	return (0);
2443 }
2444 
2445 static int
2446 rge_kstat_copy(struct kstat *ks, void *dst)
2447 {
2448 	struct rge_kstat_softc *rge_ks_sc = ks->ks_ptr;
2449 	struct rge_stats *rs = rge_ks_sc->rge_ks_sc_stats;
2450 	struct rge_kstats *kvs = dst;
2451 
2452 	*kvs = rge_kstats_tpl;
2453 	kstat_kv_u64(&kvs->tx_ok) = lemtoh64(&rs->rge_tx_ok);
2454 	kstat_kv_u64(&kvs->rx_ok) = lemtoh64(&rs->rge_rx_ok);
2455 	kstat_kv_u64(&kvs->tx_er) = lemtoh64(&rs->rge_tx_er);
2456 	kstat_kv_u32(&kvs->rx_er) = lemtoh32(&rs->rge_rx_er);
2457 	kstat_kv_u16(&kvs->miss_pkt) = lemtoh16(&rs->rge_miss_pkt);
2458 	kstat_kv_u16(&kvs->fae) = lemtoh16(&rs->rge_fae);
2459 	kstat_kv_u32(&kvs->tx_1col) = lemtoh32(&rs->rge_tx_1col);
2460 	kstat_kv_u32(&kvs->tx_mcol) = lemtoh32(&rs->rge_tx_mcol);
2461 	kstat_kv_u64(&kvs->rx_ok_phy) = lemtoh64(&rs->rge_rx_ok_phy);
2462 	kstat_kv_u64(&kvs->rx_ok_brd) = lemtoh64(&rs->rge_rx_ok_brd);
2463 	kstat_kv_u32(&kvs->rx_ok_mul) = lemtoh32(&rs->rge_rx_ok_mul);
2464 	kstat_kv_u16(&kvs->tx_abt) = lemtoh16(&rs->rge_tx_abt);
2465 	kstat_kv_u16(&kvs->tx_undrn) = lemtoh16(&rs->rge_tx_undrn);
2466 
2467 	return (0);
2468 }
2469 
2470 void
2471 rge_kstat_attach(struct rge_softc *sc)
2472 {
2473 	struct rge_kstat_softc *rge_ks_sc;
2474 	struct kstat *ks;
2475 
2476 	rge_ks_sc = malloc(sizeof(*rge_ks_sc), M_DEVBUF, M_NOWAIT);
2477 	if (rge_ks_sc == NULL) {
2478 		printf("%s: cannot allocate kstat softc\n",
2479 		    sc->sc_dev.dv_xname);
2480 		return;
2481 	}
2482 
2483 	if (bus_dmamap_create(sc->sc_dmat,
2484 	    sizeof(struct rge_stats), 1, sizeof(struct rge_stats), 0,
2485 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2486 	    &rge_ks_sc->rge_ks_sc_map) != 0) {
2487 		printf("%s: cannot create counter dma memory map\n",
2488 		    sc->sc_dev.dv_xname);
2489 		goto free;
2490 	}
2491 
2492 	if (bus_dmamem_alloc(sc->sc_dmat,
2493 	    sizeof(struct rge_stats), RGE_STATS_ALIGNMENT, 0,
2494 	    &rge_ks_sc->rge_ks_sc_seg, 1, &rge_ks_sc->rge_ks_sc_nsegs,
2495 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) {
2496 		printf("%s: cannot allocate counter dma memory\n",
2497 		    sc->sc_dev.dv_xname);
2498 		goto destroy;
2499 	}
2500 
2501 	if (bus_dmamem_map(sc->sc_dmat,
2502 	    &rge_ks_sc->rge_ks_sc_seg, rge_ks_sc->rge_ks_sc_nsegs,
2503 	    sizeof(struct rge_stats), (caddr_t *)&rge_ks_sc->rge_ks_sc_stats,
2504 	    BUS_DMA_NOWAIT) != 0) {
2505 		printf("%s: cannot map counter dma memory\n",
2506 		    sc->sc_dev.dv_xname);
2507 		goto freedma;
2508 	}
2509 
2510 	if (bus_dmamap_load(sc->sc_dmat, rge_ks_sc->rge_ks_sc_map,
2511 	    (caddr_t)rge_ks_sc->rge_ks_sc_stats, sizeof(struct rge_stats),
2512 	    NULL, BUS_DMA_NOWAIT) != 0) {
2513 		printf("%s: cannot load counter dma memory\n",
2514 		    sc->sc_dev.dv_xname);
2515 		goto unmap;
2516 	}
2517 
2518 	ks = kstat_create(sc->sc_dev.dv_xname, 0, "re-stats", 0,
2519 	    KSTAT_T_KV, 0);
2520 	if (ks == NULL) {
2521 		printf("%s: cannot create re-stats kstat\n",
2522 		    sc->sc_dev.dv_xname);
2523 		goto unload;
2524 	}
2525 
2526 	ks->ks_datalen = sizeof(rge_kstats_tpl);
2527 
2528 	rw_init(&rge_ks_sc->rge_ks_sc_rwl, "rgestats");
2529 	kstat_set_wlock(ks, &rge_ks_sc->rge_ks_sc_rwl);
2530 	ks->ks_softc = sc;
2531 	ks->ks_ptr = rge_ks_sc;
2532 	ks->ks_read = rge_kstat_read;
2533 	ks->ks_copy = rge_kstat_copy;
2534 
2535 	kstat_install(ks);
2536 
2537 	sc->sc_kstat = ks;
2538 
2539 	return;
2540 
2541 unload:
2542 	bus_dmamap_unload(sc->sc_dmat, rge_ks_sc->rge_ks_sc_map);
2543 unmap:
2544 	bus_dmamem_unmap(sc->sc_dmat,
2545 	    (caddr_t)rge_ks_sc->rge_ks_sc_stats, sizeof(struct rge_stats));
2546 freedma:
2547 	bus_dmamem_free(sc->sc_dmat, &rge_ks_sc->rge_ks_sc_seg, 1);
2548 destroy:
2549 	bus_dmamap_destroy(sc->sc_dmat, rge_ks_sc->rge_ks_sc_map);
2550 free:
2551 	free(rge_ks_sc, M_DEVBUF, sizeof(*rge_ks_sc));
2552 }
2553 #endif /* NKSTAT > 0 */
2554