xref: /openbsd/sys/dev/pci/if_rge.c (revision 67d345de)
1 /*	$OpenBSD: if_rge.c,v 1.16 2021/11/23 01:44:44 kevlo Exp $	*/
2 
3 /*
4  * Copyright (c) 2019, 2020 Kevin Lo <kevlo@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bpfilter.h"
20 #include "vlan.h"
21 
22 #include <sys/param.h>
23 #include <sys/systm.h>
24 #include <sys/sockio.h>
25 #include <sys/mbuf.h>
26 #include <sys/malloc.h>
27 #include <sys/kernel.h>
28 #include <sys/socket.h>
29 #include <sys/device.h>
30 #include <sys/endian.h>
31 
32 #include <net/if.h>
33 #include <net/if_media.h>
34 
35 #include <netinet/in.h>
36 #include <netinet/if_ether.h>
37 
38 #if NBPFILTER > 0
39 #include <net/bpf.h>
40 #endif
41 
42 #include <machine/bus.h>
43 #include <machine/intr.h>
44 
45 #include <dev/mii/mii.h>
46 
47 #include <dev/pci/pcivar.h>
48 #include <dev/pci/pcireg.h>
49 #include <dev/pci/pcidevs.h>
50 
51 #include <dev/pci/if_rgereg.h>
52 
53 #ifdef RGE_DEBUG
54 #define DPRINTF(x)	do { if (rge_debug > 0) printf x; } while (0)
55 int rge_debug = 0;
56 #else
57 #define DPRINTF(x)
58 #endif
59 
60 int		rge_match(struct device *, void *, void *);
61 void		rge_attach(struct device *, struct device *, void *);
62 int		rge_activate(struct device *, int);
63 int		rge_intr(void *);
64 int		rge_encap(struct rge_queues *, struct mbuf *, int);
65 int		rge_ioctl(struct ifnet *, u_long, caddr_t);
66 void		rge_start(struct ifqueue *);
67 void		rge_watchdog(struct ifnet *);
68 int		rge_init(struct ifnet *);
69 void		rge_stop(struct ifnet *);
70 int		rge_ifmedia_upd(struct ifnet *);
71 void		rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
72 int		rge_allocmem(struct rge_softc *);
73 int		rge_newbuf(struct rge_queues *);
74 void		rge_discard_rxbuf(struct rge_queues *, int);
75 void		rge_rx_list_init(struct rge_queues *);
76 void		rge_tx_list_init(struct rge_queues *);
77 void		rge_fill_rx_ring(struct rge_queues *);
78 int		rge_rxeof(struct rge_queues *);
79 int		rge_txeof(struct rge_queues *);
80 void		rge_reset(struct rge_softc *);
81 void		rge_iff(struct rge_softc *);
82 void		rge_set_phy_power(struct rge_softc *, int);
83 void		rge_phy_config(struct rge_softc *);
84 void		rge_phy_config_mac_cfg2(struct rge_softc *);
85 void		rge_phy_config_mac_cfg3(struct rge_softc *);
86 void		rge_phy_config_mac_cfg4(struct rge_softc *);
87 void		rge_phy_config_mac_cfg5(struct rge_softc *);
88 void		rge_phy_config_mcu(struct rge_softc *, uint16_t);
89 void		rge_set_macaddr(struct rge_softc *, const uint8_t *);
90 void		rge_get_macaddr(struct rge_softc *, uint8_t *);
91 void		rge_hw_init(struct rge_softc *);
92 void		rge_disable_phy_ocp_pwrsave(struct rge_softc *);
93 void		rge_patch_phy_mcu(struct rge_softc *, int);
94 void		rge_add_media_types(struct rge_softc *);
95 void		rge_config_imtype(struct rge_softc *, int);
96 void		rge_disable_hw_im(struct rge_softc *);
97 void		rge_disable_sim_im(struct rge_softc *);
98 void		rge_setup_sim_im(struct rge_softc *);
99 void		rge_setup_intr(struct rge_softc *, int);
100 void		rge_exit_oob(struct rge_softc *);
101 void		rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
102 uint32_t	rge_read_csi(struct rge_softc *, uint32_t);
103 void		rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
104 uint16_t	rge_read_mac_ocp(struct rge_softc *, uint16_t);
105 void		rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
106 uint16_t	rge_read_ephy(struct rge_softc *, uint16_t);
107 void		rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
108 uint16_t	rge_read_phy(struct rge_softc *, uint16_t, uint16_t);
109 void		rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
110 uint16_t	rge_read_phy_ocp(struct rge_softc *, uint16_t);
111 int		rge_get_link_status(struct rge_softc *);
112 void		rge_txstart(void *);
113 void		rge_tick(void *);
114 void		rge_link_state(struct rge_softc *);
115 #ifndef SMALL_KERNEL
116 int		rge_wol(struct ifnet *, int);
117 void		rge_wol_power(struct rge_softc *);
118 #endif
119 
120 static const struct {
121 	uint16_t reg;
122 	uint16_t val;
123 }  rtl8125_mac_cfg2_mcu[] = {
124 	RTL8125_MAC_CFG2_MCU
125 }, rtl8125_mac_cfg3_mcu[] = {
126 	RTL8125_MAC_CFG3_MCU
127 }, rtl8125_mac_cfg4_mcu[] = {
128 	RTL8125_MAC_CFG4_MCU
129 }, rtl8125_mac_cfg5_mcu[] = {
130 	RTL8125_MAC_CFG5_MCU
131 };
132 
133 struct cfattach rge_ca = {
134 	sizeof(struct rge_softc), rge_match, rge_attach, NULL, rge_activate
135 };
136 
137 struct cfdriver rge_cd = {
138 	NULL, "rge", DV_IFNET
139 };
140 
141 const struct pci_matchid rge_devices[] = {
142 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000 },
143 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8125 }
144 };
145 
146 int
147 rge_match(struct device *parent, void *match, void *aux)
148 {
149 	return (pci_matchbyid((struct pci_attach_args *)aux, rge_devices,
150 	    nitems(rge_devices)));
151 }
152 
153 void
154 rge_attach(struct device *parent, struct device *self, void *aux)
155 {
156 	struct rge_softc *sc = (struct rge_softc *)self;
157 	struct pci_attach_args *pa = aux;
158 	pci_chipset_tag_t pc = pa->pa_pc;
159 	pci_intr_handle_t ih;
160 	const char *intrstr = NULL;
161 	struct ifnet *ifp;
162 	struct rge_queues *q;
163 	pcireg_t reg;
164 	uint32_t hwrev;
165 	uint8_t eaddr[ETHER_ADDR_LEN];
166 	int offset;
167 
168 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
169 
170 	/*
171 	 * Map control/status registers.
172 	 */
173 	if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
174 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
175 	    NULL, &sc->rge_bsize, 0)) {
176 		if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
177 		    PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
178 		    &sc->rge_bhandle, NULL, &sc->rge_bsize, 0)) {
179 			if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
180 			    0, &sc->rge_btag, &sc->rge_bhandle, NULL,
181 			    &sc->rge_bsize, 0)) {
182 				printf(": can't map mem or i/o space\n");
183 				return;
184 			}
185 		}
186 	}
187 
188 	q = malloc(sizeof(struct rge_queues), M_DEVBUF, M_NOWAIT | M_ZERO);
189 	if (q == NULL) {
190 		printf(": unable to allocate queue memory\n");
191 		return;
192 	}
193 	q->q_sc = sc;
194 	q->q_index = 0;
195 
196 	sc->sc_queues = q;
197 	sc->sc_nqueues = 1;
198 
199 	/*
200 	 * Allocate interrupt.
201 	 */
202 	if (pci_intr_map_msi(pa, &ih) == 0)
203 		sc->rge_flags |= RGE_FLAG_MSI;
204 	else if (pci_intr_map(pa, &ih) != 0) {
205 		printf(": couldn't map interrupt\n");
206 		return;
207 	}
208 	intrstr = pci_intr_string(pc, ih);
209 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET | IPL_MPSAFE, rge_intr,
210 	    sc, sc->sc_dev.dv_xname);
211 	if (sc->sc_ih == NULL) {
212 		printf(": couldn't establish interrupt");
213 		if (intrstr != NULL)
214 			printf(" at %s", intrstr);
215 		printf("\n");
216 		return;
217 	}
218 	printf(": %s", intrstr);
219 
220 	sc->sc_dmat = pa->pa_dmat;
221 	sc->sc_pc = pa->pa_pc;
222 	sc->sc_tag = pa->pa_tag;
223 
224 	/* Determine hardware revision */
225 	hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
226 	switch (hwrev) {
227 	case 0x60800000:
228 		sc->rge_type = MAC_CFG2;
229 		break;
230 	case 0x60900000:
231 		sc->rge_type = MAC_CFG3;
232 		break;
233 	case 0x64000000:
234 		sc->rge_type = MAC_CFG4;
235 		break;
236 	case 0x64100000:
237 		sc->rge_type = MAC_CFG5;
238 		break;
239 	default:
240 		printf(": unknown version 0x%08x\n", hwrev);
241 		return;
242 	}
243 
244 	rge_config_imtype(sc, RGE_IMTYPE_SIM);
245 
246 	/*
247 	 * PCI Express check.
248 	 */
249 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
250 	    &offset, NULL)) {
251 		/* Disable PCIe ASPM and ECPM. */
252 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
253 		    offset + PCI_PCIE_LCSR);
254 		reg &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1 |
255 		    PCI_PCIE_LCSR_ECPM);
256 		pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCI_PCIE_LCSR,
257 		    reg);
258 	}
259 
260 	rge_exit_oob(sc);
261 	rge_hw_init(sc);
262 
263 	rge_get_macaddr(sc, eaddr);
264 	printf(", address %s\n", ether_sprintf(eaddr));
265 
266 	memcpy(sc->sc_arpcom.ac_enaddr, eaddr, ETHER_ADDR_LEN);
267 
268 	rge_set_phy_power(sc, 1);
269 	rge_phy_config(sc);
270 
271 	if (rge_allocmem(sc))
272 		return;
273 
274 	ifp = &sc->sc_arpcom.ac_if;
275 	ifp->if_softc = sc;
276 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
277 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
278 	ifp->if_xflags = IFXF_MPSAFE;
279 	ifp->if_ioctl = rge_ioctl;
280 	ifp->if_qstart = rge_start;
281 	ifp->if_watchdog = rge_watchdog;
282 	ifq_set_maxlen(&ifp->if_snd, RGE_TX_LIST_CNT - 1);
283 	ifp->if_hardmtu = RGE_JUMBO_MTU;
284 
285 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
286 	    IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
287 
288 #if NVLAN > 0
289 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
290 #endif
291 
292 #ifndef SMALL_KERNEL
293 	ifp->if_capabilities |= IFCAP_WOL;
294 	ifp->if_wol = rge_wol;
295 	rge_wol(ifp, 0);
296 #endif
297 	timeout_set(&sc->sc_timeout, rge_tick, sc);
298 	task_set(&sc->sc_task, rge_txstart, sc);
299 
300 	/* Initialize ifmedia structures. */
301 	ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
302 	    rge_ifmedia_sts);
303 	rge_add_media_types(sc);
304 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
305 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
306 	sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
307 
308 	if_attach(ifp);
309 	ether_ifattach(ifp);
310 }
311 
312 int
313 rge_activate(struct device *self, int act)
314 {
315 #ifndef SMALL_KERNEL
316 	struct rge_softc *sc = (struct rge_softc *)self;
317 #endif
318 	int rv = 0;
319 
320 	switch (act) {
321 	case DVACT_POWERDOWN:
322 		rv = config_activate_children(self, act);
323 #ifndef SMALL_KERNEL
324 		rge_wol_power(sc);
325 #endif
326 		break;
327 	default:
328 		rv = config_activate_children(self, act);
329 		break;
330 	}
331 	return (rv);
332 }
333 
334 int
335 rge_intr(void *arg)
336 {
337 	struct rge_softc *sc = arg;
338 	struct rge_queues *q = sc->sc_queues;
339 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
340 	uint32_t status;
341 	int claimed = 0, rv;
342 
343 	if (!(ifp->if_flags & IFF_RUNNING))
344 		return (0);
345 
346 	/* Disable interrupts. */
347 	RGE_WRITE_4(sc, RGE_IMR, 0);
348 
349 	if (!(sc->rge_flags & RGE_FLAG_MSI)) {
350 		if ((RGE_READ_4(sc, RGE_ISR) & sc->rge_intrs) == 0)
351 			return (0);
352 	}
353 
354 	status = RGE_READ_4(sc, RGE_ISR);
355 	if (status)
356 		RGE_WRITE_4(sc, RGE_ISR, status);
357 
358 	if (status & RGE_ISR_PCS_TIMEOUT)
359 		claimed = 1;
360 
361 	rv = 0;
362 	if (status & sc->rge_intrs) {
363 		rv |= rge_rxeof(q);
364 		rv |= rge_txeof(q);
365 
366 		if (status & RGE_ISR_SYSTEM_ERR) {
367 			KERNEL_LOCK();
368 			rge_init(ifp);
369 			KERNEL_UNLOCK();
370 		}
371 		claimed = 1;
372 	}
373 
374 	if (sc->rge_timerintr) {
375 		if (!rv) {
376 			/*
377 			 * Nothing needs to be processed, fallback
378 			 * to use TX/RX interrupts.
379 			 */
380 			rge_setup_intr(sc, RGE_IMTYPE_NONE);
381 
382 			/*
383 			 * Recollect, mainly to avoid the possible
384 			 * race introduced by changing interrupt
385 			 * masks.
386 			 */
387 			rge_rxeof(q);
388 			rge_txeof(q);
389 		} else
390 			RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
391 	} else if (rv) {
392 		/*
393 		 * Assume that using simulated interrupt moderation
394 		 * (hardware timer based) could reduce the interrupt
395 		 * rate.
396 		 */
397 		rge_setup_intr(sc, RGE_IMTYPE_SIM);
398 	}
399 
400 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
401 
402 	return (claimed);
403 }
404 
405 int
406 rge_encap(struct rge_queues *q, struct mbuf *m, int idx)
407 {
408 	struct rge_softc *sc = q->q_sc;
409 	struct rge_tx_desc *d = NULL;
410 	struct rge_txq *txq;
411 	bus_dmamap_t txmap;
412 	uint32_t cmdsts, cflags = 0;
413 	int cur, error, i, last, nsegs;
414 
415 	/*
416 	 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
417 	 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
418 	 * take affect.
419 	 */
420 	if ((m->m_pkthdr.csum_flags &
421 	    (M_IPV4_CSUM_OUT | M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) != 0) {
422 		cflags |= RGE_TDEXTSTS_IPCSUM;
423 		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
424 			cflags |= RGE_TDEXTSTS_TCPCSUM;
425 		if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
426 			cflags |= RGE_TDEXTSTS_UDPCSUM;
427 	}
428 
429 	txq = &q->q_tx.rge_txq[idx];
430 	txmap = txq->txq_dmamap;
431 
432 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
433 	switch (error) {
434 	case 0:
435 		break;
436 	case EFBIG: /* mbuf chain is too fragmented */
437 		if (m_defrag(m, M_DONTWAIT) == 0 &&
438 		    bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
439 		    BUS_DMA_NOWAIT) == 0)
440 			break;
441 
442 		/* FALLTHROUGH */
443 	default:
444 		return (0);
445 	}
446 
447 	bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
448 	    BUS_DMASYNC_PREWRITE);
449 
450 	nsegs = txmap->dm_nsegs;
451 
452 	/* Set up hardware VLAN tagging. */
453 #if NVLAN > 0
454 	if (m->m_flags & M_VLANTAG)
455 		cflags |= swap16(m->m_pkthdr.ether_vtag | RGE_TDEXTSTS_VTAG);
456 #endif
457 
458 	cur = idx;
459 	cmdsts = RGE_TDCMDSTS_SOF;
460 
461 	for (i = 0; i < txmap->dm_nsegs; i++) {
462 		d = &q->q_tx.rge_tx_list[cur];
463 
464 		d->rge_extsts = htole32(cflags);
465 		d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
466 		d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
467 
468 		cmdsts |= txmap->dm_segs[i].ds_len;
469 
470 		if (cur == RGE_TX_LIST_CNT - 1)
471 			cmdsts |= RGE_TDCMDSTS_EOR;
472 
473 		d->rge_cmdsts = htole32(cmdsts);
474 
475 		last = cur;
476 		cmdsts = RGE_TDCMDSTS_OWN;
477 		cur = RGE_NEXT_TX_DESC(cur);
478 	}
479 
480 	/* Set EOF on the last descriptor. */
481 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
482 
483 	/* Transfer ownership of packet to the chip. */
484 	d = &q->q_tx.rge_tx_list[idx];
485 
486 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
487 
488 	bus_dmamap_sync(sc->sc_dmat, q->q_tx.rge_tx_list_map,
489 	    cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
490 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
491 
492 	/* Update info of TX queue and descriptors. */
493 	txq->txq_mbuf = m;
494 	txq->txq_descidx = last;
495 
496 	return (nsegs);
497 }
498 
499 int
500 rge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
501 {
502 	struct rge_softc *sc = ifp->if_softc;
503 	struct ifreq *ifr = (struct ifreq *)data;
504 	int s, error = 0;
505 
506 	s = splnet();
507 
508 	switch (cmd) {
509 	case SIOCSIFADDR:
510 		ifp->if_flags |= IFF_UP;
511 		if (!(ifp->if_flags & IFF_RUNNING))
512 			rge_init(ifp);
513 		break;
514 	case SIOCSIFFLAGS:
515 		if (ifp->if_flags & IFF_UP) {
516 			if (ifp->if_flags & IFF_RUNNING)
517 				error = ENETRESET;
518 			else
519 				rge_init(ifp);
520 		} else {
521 			if (ifp->if_flags & IFF_RUNNING)
522 				rge_stop(ifp);
523 		}
524 		break;
525 	case SIOCGIFMEDIA:
526 	case SIOCSIFMEDIA:
527 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
528 		break;
529 	case SIOCGIFRXR:
530 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
531 		    NULL, RGE_JUMBO_FRAMELEN, &sc->sc_queues->q_rx.rge_rx_ring);
532 		break;
533 	default:
534 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
535 	}
536 
537 	if (error == ENETRESET) {
538 		if (ifp->if_flags & IFF_RUNNING)
539 			rge_iff(sc);
540 		error = 0;
541 	}
542 
543 	splx(s);
544 	return (error);
545 }
546 
547 void
548 rge_start(struct ifqueue *ifq)
549 {
550 	struct ifnet *ifp = ifq->ifq_if;
551 	struct rge_softc *sc = ifp->if_softc;
552 	struct rge_queues *q = sc->sc_queues;
553 	struct mbuf *m;
554 	int free, idx, used;
555 	int queued = 0;
556 
557 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
558 		ifq_purge(ifq);
559 		return;
560 	}
561 
562 	/* Calculate free space. */
563 	idx = q->q_tx.rge_txq_prodidx;
564 	free = q->q_tx.rge_txq_considx;
565 	if (free <= idx)
566 		free += RGE_TX_LIST_CNT;
567 	free -= idx;
568 
569 	for (;;) {
570 		if (RGE_TX_NSEGS >= free + 2) {
571 			ifq_set_oactive(&ifp->if_snd);
572 			break;
573 		}
574 
575 		m = ifq_dequeue(ifq);
576 		if (m == NULL)
577 			break;
578 
579 		used = rge_encap(q, m, idx);
580 		if (used == 0) {
581 			m_freem(m);
582 			continue;
583 		}
584 
585 		KASSERT(used <= free);
586 		free -= used;
587 
588 #if NBPFILTER > 0
589 		if (ifp->if_bpf)
590 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
591 #endif
592 
593 		idx += used;
594 		if (idx >= RGE_TX_LIST_CNT)
595 			idx -= RGE_TX_LIST_CNT;
596 
597 		queued++;
598 	}
599 
600 	if (queued == 0)
601 		return;
602 
603 	/* Set a timeout in case the chip goes out to lunch. */
604 	ifp->if_timer = 5;
605 
606 	q->q_tx.rge_txq_prodidx = idx;
607 	ifq_serialize(ifq, &sc->sc_task);
608 }
609 
610 void
611 rge_watchdog(struct ifnet *ifp)
612 {
613 	struct rge_softc *sc = ifp->if_softc;
614 
615 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
616 	ifp->if_oerrors++;
617 
618 	rge_init(ifp);
619 }
620 
621 int
622 rge_init(struct ifnet *ifp)
623 {
624 	struct rge_softc *sc = ifp->if_softc;
625 	struct rge_queues *q = sc->sc_queues;
626 	uint32_t val;
627 	int i;
628 
629 	rge_stop(ifp);
630 
631 	/* Set MAC address. */
632 	rge_set_macaddr(sc, sc->sc_arpcom.ac_enaddr);
633 
634 	/* Set Maximum frame size. */
635 	RGE_WRITE_2(sc, RGE_RXMAXSIZE, RGE_JUMBO_FRAMELEN);
636 
637 	/* Initialize RX and TX descriptors lists. */
638 	rge_rx_list_init(q);
639 	rge_tx_list_init(q);
640 
641 	/* Load the addresses of the RX and TX lists into the chip. */
642 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
643 	    RGE_ADDR_LO(q->q_rx.rge_rx_list_map->dm_segs[0].ds_addr));
644 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
645 	    RGE_ADDR_HI(q->q_rx.rge_rx_list_map->dm_segs[0].ds_addr));
646 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
647 	    RGE_ADDR_LO(q->q_tx.rge_tx_list_map->dm_segs[0].ds_addr));
648 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
649 	    RGE_ADDR_HI(q->q_tx.rge_tx_list_map->dm_segs[0].ds_addr));
650 
651 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
652 
653 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
654 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
655 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
656 	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
657 
658 	/* Clear interrupt moderation timer. */
659 	for (i = 0; i < 64; i++)
660 		RGE_WRITE_4(sc, RGE_INTMITI(i), 0);
661 
662 	/* Set the initial RX and TX configurations. */
663 	RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
664 	RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
665 
666 	val = rge_read_csi(sc, 0x70c) & ~0xff000000;
667 	rge_write_csi(sc, 0x70c, val | 0x27000000);
668 
669 	/* Enable hardware optimization function. */
670 	val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
671 	pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
672 
673 	RGE_WRITE_2(sc, 0x0382, 0x221b);
674 
675 	RGE_WRITE_1(sc, RGE_RSS_CTRL, 0);
676 
677 	val = RGE_READ_2(sc, RGE_RXQUEUE_CTRL) & ~0x001c;
678 	RGE_WRITE_2(sc, RGE_RXQUEUE_CTRL, val | (fls(sc->sc_nqueues) - 1) << 2);
679 
680 	RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
681 
682 	rge_write_mac_ocp(sc, 0xc140, 0xffff);
683 	rge_write_mac_ocp(sc, 0xc142, 0xffff);
684 
685 	RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
686 
687 	val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
688 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
689 		rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
690 	else
691 		rge_write_mac_ocp(sc, 0xe614, val | 0x0200);
692 
693 	val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0c00;
694 	rge_write_mac_ocp(sc, 0xe63e, val |
695 	    ((fls(sc->sc_nqueues) - 1) & 0x03) << 10);
696 
697 	RGE_MAC_CLRBIT(sc, 0xe63e, 0x0030);
698 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
699 		RGE_MAC_SETBIT(sc, 0xe63e, 0x0020);
700 
701 	RGE_MAC_CLRBIT(sc, 0xc0b4, 0x0001);
702 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x0001);
703 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
704 
705 	val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x00ff;
706 	rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
707 
708 	val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
709 	rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
710 
711 	val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
712 	rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
713 
714 	RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
715 
716 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_TX_10M_PS_EN);
717 
718 	RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
719 
720 	val = rge_read_mac_ocp(sc, 0xea1c) & ~0x0003;
721 	rge_write_mac_ocp(sc, 0xea1c, val | 0x0001);
722 
723 	val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
724 	rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
725 
726 	RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
727 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
728 
729 	val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
730 	rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
731 
732 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_PFM_EN | RGE_DLLPR_TX_10M_PS_EN);
733 
734 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
735 		RGE_SETBIT_1(sc, RGE_MCUCMD, 0x01);
736 
737 	/* Disable EEE plus. */
738 	RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
739 
740 	RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
741 
742 	RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
743 	DELAY(1);
744 	RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
745 
746 	RGE_CLRBIT_4(sc, 0x1880, 0x0030);
747 
748 	rge_write_mac_ocp(sc, 0xe098, 0xc302);
749 
750 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
751 		RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
752 
753 	RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
754 
755 	for (i = 0; i < 10; i++) {
756 		if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
757 			break;
758 		DELAY(1000);
759 	}
760 
761 	/* Disable RXDV gate. */
762 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
763 	DELAY(2000);
764 
765 	rge_ifmedia_upd(ifp);
766 
767 	/* Enable transmit and receive. */
768 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
769 
770 	/* Program promiscuous mode and multicast filters. */
771 	rge_iff(sc);
772 
773 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
774 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
775 
776 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
777 
778 	/* Enable interrupts. */
779 	rge_setup_intr(sc, RGE_IMTYPE_SIM);
780 
781 	ifp->if_flags |= IFF_RUNNING;
782 	ifq_clr_oactive(&ifp->if_snd);
783 
784 	timeout_add_sec(&sc->sc_timeout, 1);
785 
786 	return (0);
787 }
788 
789 /*
790  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
791  */
792 void
793 rge_stop(struct ifnet *ifp)
794 {
795 	struct rge_softc *sc = ifp->if_softc;
796 	struct rge_queues *q = sc->sc_queues;
797 	int i;
798 
799 	timeout_del(&sc->sc_timeout);
800 
801 	ifp->if_timer = 0;
802 	ifp->if_flags &= ~IFF_RUNNING;
803 	sc->rge_timerintr = 0;
804 
805 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
806 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
807 	    RGE_RXCFG_ERRPKT);
808 
809 	RGE_WRITE_4(sc, RGE_IMR, 0);
810 	RGE_WRITE_4(sc, RGE_ISR, 0);
811 
812 	/* Clear timer interrupts. */
813 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
814 	RGE_WRITE_4(sc, RGE_TIMERINT1, 0);
815 	RGE_WRITE_4(sc, RGE_TIMERINT2, 0);
816 	RGE_WRITE_4(sc, RGE_TIMERINT3, 0);
817 
818 	rge_reset(sc);
819 
820 	intr_barrier(sc->sc_ih);
821 	ifq_barrier(&ifp->if_snd);
822 	ifq_clr_oactive(&ifp->if_snd);
823 
824 	if (q->q_rx.rge_head != NULL) {
825 		m_freem(q->q_rx.rge_head);
826 		q->q_rx.rge_head = q->q_rx.rge_tail = NULL;
827 	}
828 
829 	/* Free the TX list buffers. */
830 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
831 		if (q->q_tx.rge_txq[i].txq_mbuf != NULL) {
832 			bus_dmamap_unload(sc->sc_dmat,
833 			    q->q_tx.rge_txq[i].txq_dmamap);
834 			m_freem(q->q_tx.rge_txq[i].txq_mbuf);
835 			q->q_tx.rge_txq[i].txq_mbuf = NULL;
836 		}
837 	}
838 
839 	/* Free the RX list buffers. */
840 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
841 		if (q->q_rx.rge_rxq[i].rxq_mbuf != NULL) {
842 			bus_dmamap_unload(sc->sc_dmat,
843 			    q->q_rx.rge_rxq[i].rxq_dmamap);
844 			m_freem(q->q_rx.rge_rxq[i].rxq_mbuf);
845 			q->q_rx.rge_rxq[i].rxq_mbuf = NULL;
846 		}
847 	}
848 }
849 
850 /*
851  * Set media options.
852  */
853 int
854 rge_ifmedia_upd(struct ifnet *ifp)
855 {
856 	struct rge_softc *sc = ifp->if_softc;
857 	struct ifmedia *ifm = &sc->sc_media;
858 	int anar, gig, val;
859 
860 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
861 		return (EINVAL);
862 
863 	/* Disable Gigabit Lite. */
864 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
865 	RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
866 
867 	val = rge_read_phy_ocp(sc, 0xa5d4);
868 	val &= ~RGE_ADV_2500TFDX;
869 
870 	anar = gig = 0;
871 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
872 	case IFM_AUTO:
873 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
874 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
875 		val |= RGE_ADV_2500TFDX;
876 		break;
877 	case IFM_2500_T:
878 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
879 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
880 		val |= RGE_ADV_2500TFDX;
881 		ifp->if_baudrate = IF_Mbps(2500);
882 		break;
883 	case IFM_1000_T:
884 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
885 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
886 		ifp->if_baudrate = IF_Gbps(1);
887 		break;
888 	case IFM_100_TX:
889 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
890 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
891 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
892 		    ANAR_TX | ANAR_TX_FD | ANAR_10_FD | ANAR_10 :
893 		    ANAR_TX | ANAR_10_FD | ANAR_10;
894 		ifp->if_baudrate = IF_Mbps(100);
895 		break;
896 	case IFM_10_T:
897 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
898 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
899 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
900 		    ANAR_10_FD | ANAR_10 : ANAR_10;
901 		ifp->if_baudrate = IF_Mbps(10);
902 		break;
903 	default:
904 		printf("%s: unsupported media type\n", sc->sc_dev.dv_xname);
905 		return (EINVAL);
906 	}
907 
908 	rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
909 	rge_write_phy(sc, 0, MII_100T2CR, gig);
910 	rge_write_phy_ocp(sc, 0xa5d4, val);
911 	rge_write_phy(sc, 0, MII_BMCR, BMCR_RESET | BMCR_AUTOEN |
912 	    BMCR_STARTNEG);
913 
914 	return (0);
915 }
916 
917 /*
918  * Report current media status.
919  */
920 void
921 rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
922 {
923 	struct rge_softc *sc = ifp->if_softc;
924 	uint16_t status = 0;
925 
926 	ifmr->ifm_status = IFM_AVALID;
927 	ifmr->ifm_active = IFM_ETHER;
928 
929 	if (rge_get_link_status(sc)) {
930 		ifmr->ifm_status |= IFM_ACTIVE;
931 
932 		status = RGE_READ_2(sc, RGE_PHYSTAT);
933 		if ((status & RGE_PHYSTAT_FDX) ||
934 		    (status & RGE_PHYSTAT_2500MBPS))
935 			ifmr->ifm_active |= IFM_FDX;
936 		else
937 			ifmr->ifm_active |= IFM_HDX;
938 
939 		if (status & RGE_PHYSTAT_10MBPS)
940 			ifmr->ifm_active |= IFM_10_T;
941 		else if (status & RGE_PHYSTAT_100MBPS)
942 			ifmr->ifm_active |= IFM_100_TX;
943 		else if (status & RGE_PHYSTAT_1000MBPS)
944 			ifmr->ifm_active |= IFM_1000_T;
945 		else if (status & RGE_PHYSTAT_2500MBPS)
946 			ifmr->ifm_active |= IFM_2500_T;
947 	}
948 }
949 
950 /*
951  * Allocate memory for RX/TX rings.
952  */
953 int
954 rge_allocmem(struct rge_softc *sc)
955 {
956 	struct rge_queues *q = sc->sc_queues;
957 	int error, i;
958 
959 	/* Allocate DMA'able memory for the TX ring. */
960 	error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
961 	    RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &q->q_tx.rge_tx_list_map);
962 	if (error) {
963 		printf("%s: can't create TX list map\n", sc->sc_dev.dv_xname);
964 		return (error);
965 	}
966 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
967 	    &q->q_tx.rge_tx_listseg, 1, &q->q_tx.rge_tx_listnseg,
968 	    BUS_DMA_NOWAIT| BUS_DMA_ZERO);
969 	if (error) {
970 		printf("%s: can't alloc TX list\n", sc->sc_dev.dv_xname);
971 		return (error);
972 	}
973 
974 	/* Load the map for the TX ring. */
975 	error = bus_dmamem_map(sc->sc_dmat, &q->q_tx.rge_tx_listseg,
976 	    q->q_tx.rge_tx_listnseg, RGE_TX_LIST_SZ,
977 	    (caddr_t *)&q->q_tx.rge_tx_list, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
978 	if (error) {
979 		printf("%s: can't map TX dma buffers\n", sc->sc_dev.dv_xname);
980 		bus_dmamem_free(sc->sc_dmat, &q->q_tx.rge_tx_listseg,
981 		    q->q_tx.rge_tx_listnseg);
982 		return (error);
983 	}
984 	error = bus_dmamap_load(sc->sc_dmat, q->q_tx.rge_tx_list_map,
985 	    q->q_tx.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
986 	if (error) {
987 		printf("%s: can't load TX dma map\n", sc->sc_dev.dv_xname);
988 		bus_dmamap_destroy(sc->sc_dmat, q->q_tx.rge_tx_list_map);
989 		bus_dmamem_unmap(sc->sc_dmat,
990 		    (caddr_t)q->q_tx.rge_tx_list, RGE_TX_LIST_SZ);
991 		bus_dmamem_free(sc->sc_dmat, &q->q_tx.rge_tx_listseg,
992 		    q->q_tx.rge_tx_listnseg);
993 		return (error);
994 	}
995 
996 	/* Create DMA maps for TX buffers. */
997 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
998 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
999 		    RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, BUS_DMA_NOWAIT,
1000 		    &q->q_tx.rge_txq[i].txq_dmamap);
1001 		if (error) {
1002 			printf("%s: can't create DMA map for TX\n",
1003 			    sc->sc_dev.dv_xname);
1004 			return (error);
1005 		}
1006 	}
1007 
1008 	/* Allocate DMA'able memory for the RX ring. */
1009 	error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
1010 	    RGE_RX_LIST_SZ, 0, BUS_DMA_NOWAIT, &q->q_rx.rge_rx_list_map);
1011 	if (error) {
1012 		printf("%s: can't create RX list map\n", sc->sc_dev.dv_xname);
1013 		return (error);
1014 	}
1015 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
1016 	    &q->q_rx.rge_rx_listseg, 1, &q->q_rx.rge_rx_listnseg,
1017 	    BUS_DMA_NOWAIT| BUS_DMA_ZERO);
1018 	if (error) {
1019 		printf("%s: can't alloc RX list\n", sc->sc_dev.dv_xname);
1020 		return (error);
1021 	}
1022 
1023 	/* Load the map for the RX ring. */
1024 	error = bus_dmamem_map(sc->sc_dmat, &q->q_rx.rge_rx_listseg,
1025 	    q->q_rx.rge_rx_listnseg, RGE_RX_LIST_SZ,
1026 	    (caddr_t *)&q->q_rx.rge_rx_list, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1027 	if (error) {
1028 		printf("%s: can't map RX dma buffers\n", sc->sc_dev.dv_xname);
1029 		bus_dmamem_free(sc->sc_dmat, &q->q_rx.rge_rx_listseg,
1030 		    q->q_rx.rge_rx_listnseg);
1031 		return (error);
1032 	}
1033 	error = bus_dmamap_load(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1034 	    q->q_rx.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1035 	if (error) {
1036 		printf("%s: can't load RX dma map\n", sc->sc_dev.dv_xname);
1037 		bus_dmamap_destroy(sc->sc_dmat, q->q_rx.rge_rx_list_map);
1038 		bus_dmamem_unmap(sc->sc_dmat,
1039 		    (caddr_t)q->q_rx.rge_rx_list, RGE_RX_LIST_SZ);
1040 		bus_dmamem_free(sc->sc_dmat, &q->q_rx.rge_rx_listseg,
1041 		    q->q_rx.rge_rx_listnseg);
1042 		return (error);
1043 	}
1044 
1045 	/* Create DMA maps for RX buffers. */
1046 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1047 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
1048 		    RGE_JUMBO_FRAMELEN, 0, BUS_DMA_NOWAIT,
1049 		    &q->q_rx.rge_rxq[i].rxq_dmamap);
1050 		if (error) {
1051 			printf("%s: can't create DMA map for RX\n",
1052 			    sc->sc_dev.dv_xname);
1053 			return (error);
1054 		}
1055 	}
1056 
1057 	return (error);
1058 }
1059 
1060 /*
1061  * Initialize the RX descriptor and attach an mbuf cluster.
1062  */
1063 int
1064 rge_newbuf(struct rge_queues *q)
1065 {
1066 	struct rge_softc *sc = q->q_sc;
1067 	struct mbuf *m;
1068 	struct rge_rx_desc *r;
1069 	struct rge_rxq *rxq;
1070 	bus_dmamap_t rxmap;
1071 	int idx;
1072 
1073 	m = MCLGETL(NULL, M_DONTWAIT, RGE_JUMBO_FRAMELEN);
1074 	if (m == NULL)
1075 		return (ENOBUFS);
1076 
1077 	m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
1078 
1079 	idx = q->q_rx.rge_rxq_prodidx;
1080 	rxq = &q->q_rx.rge_rxq[idx];
1081 	rxmap = rxq->rxq_dmamap;
1082 
1083 	if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT)) {
1084 		m_freem(m);
1085 		return (ENOBUFS);
1086 	}
1087 
1088 	bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1089 	    BUS_DMASYNC_PREREAD);
1090 
1091 	/* Map the segments into RX descriptors. */
1092 	r = &q->q_rx.rge_rx_list[idx];
1093 
1094 	if (RGE_OWN(r)) {
1095 		printf("%s: tried to map busy RX descriptor\n",
1096 		    sc->sc_dev.dv_xname);
1097 		m_freem(m);
1098 		return (ENOBUFS);
1099 	}
1100 
1101 	rxq->rxq_mbuf = m;
1102 
1103 	r->rge_extsts = 0;
1104 	r->rge_addrlo = htole32(RGE_ADDR_LO(rxmap->dm_segs[0].ds_addr));
1105 	r->rge_addrhi = htole32(RGE_ADDR_HI(rxmap->dm_segs[0].ds_addr));
1106 
1107 	r->rge_cmdsts = htole32(rxmap->dm_segs[0].ds_len);
1108 	if (idx == RGE_RX_LIST_CNT - 1)
1109 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1110 
1111 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1112 
1113 	bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1114 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1115 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1116 
1117 	q->q_rx.rge_rxq_prodidx = RGE_NEXT_RX_DESC(idx);
1118 
1119 	return (0);
1120 }
1121 
1122 void
1123 rge_discard_rxbuf(struct rge_queues *q, int idx)
1124 {
1125 	struct rge_softc *sc = q->q_sc;
1126 	struct rge_rx_desc *r;
1127 
1128 	r = &q->q_rx.rge_rx_list[idx];
1129 
1130 	r->rge_cmdsts = htole32(RGE_JUMBO_FRAMELEN);
1131 	r->rge_extsts = 0;
1132 	if (idx == RGE_RX_LIST_CNT - 1)
1133 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1134 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1135 
1136 	bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1137 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1138 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1139 }
1140 
1141 void
1142 rge_rx_list_init(struct rge_queues *q)
1143 {
1144 	memset(q->q_rx.rge_rx_list, 0, RGE_RX_LIST_SZ);
1145 
1146 	q->q_rx.rge_rxq_prodidx = q->q_rx.rge_rxq_considx = 0;
1147 	q->q_rx.rge_head = q->q_rx.rge_tail = NULL;
1148 
1149 	if_rxr_init(&q->q_rx.rge_rx_ring, 2, RGE_RX_LIST_CNT - 1);
1150 	rge_fill_rx_ring(q);
1151 }
1152 
1153 void
1154 rge_fill_rx_ring(struct rge_queues *q)
1155 {
1156 	struct if_rxring *rxr = &q->q_rx.rge_rx_ring;
1157 	int slots;
1158 
1159 	for (slots = if_rxr_get(rxr, RGE_RX_LIST_CNT); slots > 0; slots--) {
1160 		if (rge_newbuf(q) == ENOBUFS)
1161 			break;
1162 	}
1163 	if_rxr_put(rxr, slots);
1164 }
1165 
1166 void
1167 rge_tx_list_init(struct rge_queues *q)
1168 {
1169 	struct rge_softc *sc = q->q_sc;
1170 	int i;
1171 
1172 	memset(q->q_tx.rge_tx_list, 0, RGE_TX_LIST_SZ);
1173 
1174 	for (i = 0; i < RGE_TX_LIST_CNT; i++)
1175 		q->q_tx.rge_txq[i].txq_mbuf = NULL;
1176 
1177 	bus_dmamap_sync(sc->sc_dmat, q->q_tx.rge_tx_list_map, 0,
1178 	    q->q_tx.rge_tx_list_map->dm_mapsize,
1179 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1180 
1181 	q->q_tx.rge_txq_prodidx = q->q_tx.rge_txq_considx = 0;
1182 }
1183 
1184 int
1185 rge_rxeof(struct rge_queues *q)
1186 {
1187 	struct rge_softc *sc = q->q_sc;
1188 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1189 	struct mbuf *m;
1190 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1191 	struct if_rxring *rxr = &q->q_rx.rge_rx_ring;
1192 	struct rge_rx_desc *cur_rx;
1193 	struct rge_rxq *rxq;
1194 	uint32_t rxstat, extsts;
1195 	int i, total_len, rx = 0;
1196 
1197 	for (i = q->q_rx.rge_rxq_considx; if_rxr_inuse(rxr) > 0;
1198 	    i = RGE_NEXT_RX_DESC(i)) {
1199 		/* Invalidate the descriptor memory. */
1200 		bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1201 		    i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1202 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1203 
1204 		cur_rx = &q->q_rx.rge_rx_list[i];
1205 
1206 		if (RGE_OWN(cur_rx))
1207 			break;
1208 
1209 		rxstat = letoh32(cur_rx->rge_cmdsts);
1210 		extsts = letoh32(cur_rx->rge_extsts);
1211 
1212 		total_len = RGE_RXBYTES(cur_rx);
1213 		rxq = &q->q_rx.rge_rxq[i];
1214 		m = rxq->rxq_mbuf;
1215 		rxq->rxq_mbuf = NULL;
1216 		if_rxr_put(rxr, 1);
1217 		rx = 1;
1218 
1219 		/* Invalidate the RX mbuf and unload its map. */
1220 		bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
1221 		    rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1222 		bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
1223 
1224 		if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
1225 		    (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
1226 			ifp->if_ierrors++;
1227 			m_freem(m);
1228 			rge_discard_rxbuf(q, i);
1229 			continue;
1230 		}
1231 
1232 		if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
1233 			ifp->if_ierrors++;
1234 			/*
1235 			 * If this is part of a multi-fragment packet,
1236 			 * discard all the pieces.
1237 			 */
1238 			 if (q->q_rx.rge_head != NULL) {
1239 				m_freem(q->q_rx.rge_head);
1240 				q->q_rx.rge_head = q->q_rx.rge_tail = NULL;
1241 			}
1242 			m_freem(m);
1243 			rge_discard_rxbuf(q, i);
1244 			continue;
1245 		}
1246 
1247 		if (q->q_rx.rge_head != NULL) {
1248 			m->m_len = total_len;
1249 			/*
1250 			 * Special case: if there's 4 bytes or less
1251 			 * in this buffer, the mbuf can be discarded:
1252 			 * the last 4 bytes is the CRC, which we don't
1253 			 * care about anyway.
1254 			 */
1255 			if (m->m_len <= ETHER_CRC_LEN) {
1256 				q->q_rx.rge_tail->m_len -=
1257 				    (ETHER_CRC_LEN - m->m_len);
1258 				m_freem(m);
1259 			} else {
1260 				m->m_len -= ETHER_CRC_LEN;
1261 				m->m_flags &= ~M_PKTHDR;
1262 				q->q_rx.rge_tail->m_next = m;
1263 			}
1264 			m = q->q_rx.rge_head;
1265 			q->q_rx.rge_head = q->q_rx.rge_tail = NULL;
1266 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1267 		} else
1268 			m->m_pkthdr.len = m->m_len =
1269 			    (total_len - ETHER_CRC_LEN);
1270 
1271 		/* Check IP header checksum. */
1272 		if (!(rxstat & RGE_RDCMDSTS_IPCSUMERR) &&
1273 		    (extsts & RGE_RDEXTSTS_IPV4))
1274 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1275 
1276 		/* Check TCP/UDP checksum. */
1277 		if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
1278 		    (((rxstat & RGE_RDCMDSTS_TCPPKT) &&
1279 		    !(rxstat & RGE_RDCMDSTS_TCPCSUMERR)) ||
1280 		    ((rxstat & RGE_RDCMDSTS_UDPPKT) &&
1281 		    !(rxstat & RGE_RDCMDSTS_UDPCSUMERR))))
1282 			m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
1283 			    M_UDP_CSUM_IN_OK;
1284 
1285 #if NVLAN > 0
1286 		if (extsts & RGE_RDEXTSTS_VTAG) {
1287 			m->m_pkthdr.ether_vtag =
1288 			    ntohs(extsts & RGE_RDEXTSTS_VLAN_MASK);
1289 			m->m_flags |= M_VLANTAG;
1290 		}
1291 #endif
1292 
1293 		ml_enqueue(&ml, m);
1294 	}
1295 
1296 	if (ifiq_input(&ifp->if_rcv, &ml))
1297 		if_rxr_livelocked(rxr);
1298 
1299 	q->q_rx.rge_rxq_considx = i;
1300 	rge_fill_rx_ring(q);
1301 
1302 	return (rx);
1303 }
1304 
1305 int
1306 rge_txeof(struct rge_queues *q)
1307 {
1308 	struct rge_softc *sc = q->q_sc;
1309 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1310 	struct rge_txq *txq;
1311 	uint32_t txstat;
1312 	int cons, idx, prod;
1313 	int free = 0;
1314 
1315 	prod = q->q_tx.rge_txq_prodidx;
1316 	cons = q->q_tx.rge_txq_considx;
1317 
1318 	while (prod != cons) {
1319 		txq = &q->q_tx.rge_txq[cons];
1320 		idx = txq->txq_descidx;
1321 
1322 		bus_dmamap_sync(sc->sc_dmat, q->q_tx.rge_tx_list_map,
1323 		    idx * sizeof(struct rge_tx_desc),
1324 		    sizeof(struct rge_tx_desc),
1325 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1326 
1327 		txstat = letoh32(q->q_tx.rge_tx_list[idx].rge_cmdsts);
1328 
1329 		if (txstat & RGE_TDCMDSTS_OWN) {
1330 			free = 2;
1331 			break;
1332 		}
1333 
1334 		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
1335 		    txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1336 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
1337 		m_freem(txq->txq_mbuf);
1338 		txq->txq_mbuf = NULL;
1339 
1340 		if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
1341 			ifp->if_collisions++;
1342 		if (txstat & RGE_TDCMDSTS_TXERR)
1343 			ifp->if_oerrors++;
1344 
1345 		bus_dmamap_sync(sc->sc_dmat, q->q_tx.rge_tx_list_map,
1346 		    idx * sizeof(struct rge_tx_desc),
1347 		    sizeof(struct rge_tx_desc),
1348 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1349 
1350 		cons = RGE_NEXT_TX_DESC(idx);
1351 		free = 1;
1352 	}
1353 
1354 	if (free == 0)
1355 		return (0);
1356 
1357 	q->q_tx.rge_txq_considx = cons;
1358 
1359 	if (ifq_is_oactive(&ifp->if_snd))
1360 		ifq_restart(&ifp->if_snd);
1361 	else if (free == 2)
1362 		ifq_serialize(&ifp->if_snd, &sc->sc_task);
1363 	else
1364 		ifp->if_timer = 0;
1365 
1366 	return (1);
1367 }
1368 
1369 void
1370 rge_reset(struct rge_softc *sc)
1371 {
1372 	int i;
1373 
1374 	/* Enable RXDV gate. */
1375 	RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
1376 	DELAY(2000);
1377 
1378 	for (i = 0; i < 3000; i++) {
1379 		DELAY(50);
1380 		if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
1381 		    RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
1382 		    RGE_MCUCMD_TXFIFO_EMPTY))
1383 			break;
1384 	}
1385 	if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5) {
1386 		for (i = 0; i < 3000; i++) {
1387 			DELAY(50);
1388 			if ((RGE_READ_2(sc, RGE_IM) & 0x0103) == 0x0103)
1389 				break;
1390 		}
1391 	}
1392 
1393 	DELAY(2000);
1394 
1395 	/* Soft reset. */
1396 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
1397 
1398 	for (i = 0; i < RGE_TIMEOUT; i++) {
1399 		DELAY(100);
1400 		if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
1401 			break;
1402 	}
1403 	if (i == RGE_TIMEOUT)
1404 		printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
1405 }
1406 
1407 void
1408 rge_iff(struct rge_softc *sc)
1409 {
1410 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1411 	struct arpcom *ac = &sc->sc_arpcom;
1412 	struct ether_multi *enm;
1413 	struct ether_multistep step;
1414 	uint32_t hashes[2];
1415 	uint32_t rxfilt;
1416 	int h = 0;
1417 
1418 	rxfilt = RGE_READ_4(sc, RGE_RXCFG);
1419 	rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
1420 	ifp->if_flags &= ~IFF_ALLMULTI;
1421 
1422 	/*
1423 	 * Always accept frames destined to our station address.
1424 	 * Always accept broadcast frames.
1425 	 */
1426 	rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
1427 
1428 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1429 		ifp->if_flags |= IFF_ALLMULTI;
1430 		rxfilt |= RGE_RXCFG_MULTI;
1431 		if (ifp->if_flags & IFF_PROMISC)
1432 			rxfilt |= RGE_RXCFG_ALLPHYS;
1433 		hashes[0] = hashes[1] = 0xffffffff;
1434 	} else {
1435 		rxfilt |= RGE_RXCFG_MULTI;
1436 		/* Program new filter. */
1437 		memset(hashes, 0, sizeof(hashes));
1438 
1439 		ETHER_FIRST_MULTI(step, ac, enm);
1440 		while (enm != NULL) {
1441 			h = ether_crc32_be(enm->enm_addrlo,
1442 			    ETHER_ADDR_LEN) >> 26;
1443 
1444 			if (h < 32)
1445 				hashes[0] |= (1 << h);
1446 			else
1447 				hashes[1] |= (1 << (h - 32));
1448 
1449 			ETHER_NEXT_MULTI(step, enm);
1450 		}
1451 	}
1452 
1453 	RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
1454 	RGE_WRITE_4(sc, RGE_MAR0, swap32(hashes[1]));
1455 	RGE_WRITE_4(sc, RGE_MAR4, swap32(hashes[0]));
1456 }
1457 
1458 void
1459 rge_set_phy_power(struct rge_softc *sc, int on)
1460 {
1461 	int i;
1462 
1463 	if (on) {
1464 		RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
1465 
1466 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
1467 
1468 		for (i = 0; i < RGE_TIMEOUT; i++) {
1469 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 3)
1470 				break;
1471 			DELAY(1000);
1472 		}
1473 	} else {
1474 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
1475 		RGE_CLRBIT_1(sc, RGE_PMCH, 0x80);
1476 		RGE_CLRBIT_1(sc, RGE_PPSW, 0x40);
1477 	}
1478 }
1479 
1480 void
1481 rge_phy_config(struct rge_softc *sc)
1482 {
1483 	/* Read microcode version. */
1484 	rge_write_phy_ocp(sc, 0xa436, 0x801e);
1485 	sc->rge_mcodever = rge_read_phy_ocp(sc, 0xa438);
1486 
1487 	switch (sc->rge_type) {
1488 	case MAC_CFG2:
1489 		rge_phy_config_mac_cfg2(sc);
1490 		break;
1491 	case MAC_CFG3:
1492 		rge_phy_config_mac_cfg3(sc);
1493 		break;
1494 	case MAC_CFG4:
1495 		rge_phy_config_mac_cfg4(sc);
1496 		break;
1497 	case MAC_CFG5:
1498 		rge_phy_config_mac_cfg5(sc);
1499 		break;
1500 	default:
1501 		break;	/* Can't happen. */
1502 	}
1503 
1504 	rge_write_phy(sc, 0x0a5b, 0x12,
1505 	    rge_read_phy(sc, 0x0a5b, 0x12) & ~0x8000);
1506 
1507 	/* Disable EEE. */
1508 	RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
1509 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1510 		RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
1511 		RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
1512 	}
1513 	RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
1514 	RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
1515 	RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
1516 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
1517 	RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
1518 
1519 	rge_patch_phy_mcu(sc, 1);
1520 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
1521 	RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
1522 	RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
1523 	rge_patch_phy_mcu(sc, 0);
1524 }
1525 
1526 void
1527 rge_phy_config_mac_cfg2(struct rge_softc *sc)
1528 {
1529 	uint16_t val;
1530 	int i;
1531 
1532 	for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++)
1533 		rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
1534 		    rtl8125_mac_cfg2_ephy[i].val);
1535 
1536 	rge_phy_config_mcu(sc, RGE_MAC_CFG2_MCODE_VER);
1537 
1538 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
1539 	rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
1540 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1541 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1542 	rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
1543 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
1544 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1545 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
1546 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
1547 	rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
1548 	RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
1549 	RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
1550 	RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
1551 
1552 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1553 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1554 	rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
1555 	rge_write_phy_ocp(sc, 0xa436, 0x80eb);
1556 	val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
1557 	rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
1558 	rge_write_phy_ocp(sc, 0xa436, 0x80f8);
1559 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1560 	rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
1561 	rge_write_phy_ocp(sc, 0xa436, 0x80f1);
1562 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1563 	rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
1564 	rge_write_phy_ocp(sc, 0xa436, 0x80fe);
1565 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1566 	rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
1567 	rge_write_phy_ocp(sc, 0xa436, 0x8102);
1568 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1569 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1570 	rge_write_phy_ocp(sc, 0xa436, 0x8105);
1571 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1572 	rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
1573 	rge_write_phy_ocp(sc, 0xa436, 0x8100);
1574 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1575 	rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
1576 	rge_write_phy_ocp(sc, 0xa436, 0x8104);
1577 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1578 	rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
1579 	rge_write_phy_ocp(sc, 0xa436, 0x8106);
1580 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1581 	rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
1582 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1583 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1584 	rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
1585 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
1586 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1587 	rge_write_phy_ocp(sc, 0xa436, 0x80e1);
1588 	RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
1589 	val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
1590 	rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
1591 	rge_write_phy_ocp(sc, 0xa436, 0x819f);
1592 	rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
1593 	rge_write_phy_ocp(sc, 0xbc34, 0x5555);
1594 	val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
1595 	rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
1596 	RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
1597 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1598 }
1599 
1600 void
1601 rge_phy_config_mac_cfg3(struct rge_softc *sc)
1602 {
1603 	uint16_t val;
1604 	int i;
1605 	static const uint16_t mac_cfg3_a438_value[] =
1606 	    { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
1607 	      0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
1608 
1609 	static const uint16_t mac_cfg3_b88e_value[] =
1610 	    { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
1611 	      0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
1612 	      0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
1613 	      0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
1614 	      0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
1615 	      0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
1616 
1617 	for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
1618 		rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
1619 		    rtl8125_mac_cfg3_ephy[i].val);
1620 
1621 	val = rge_read_ephy(sc, 0x002a) & ~0x7000;
1622 	rge_write_ephy(sc, 0x002a, val | 0x3000);
1623 	RGE_EPHY_CLRBIT(sc, 0x0019, 0x0040);
1624 	RGE_EPHY_SETBIT(sc, 0x001b, 0x0e00);
1625 	RGE_EPHY_CLRBIT(sc, 0x001b, 0x7000);
1626 	rge_write_ephy(sc, 0x0002, 0x6042);
1627 	rge_write_ephy(sc, 0x0006, 0x0014);
1628 	val = rge_read_ephy(sc, 0x006a) & ~0x7000;
1629 	rge_write_ephy(sc, 0x006a, val | 0x3000);
1630 	RGE_EPHY_CLRBIT(sc, 0x0059, 0x0040);
1631 	RGE_EPHY_SETBIT(sc, 0x005b, 0x0e00);
1632 	RGE_EPHY_CLRBIT(sc, 0x005b, 0x7000);
1633 	rge_write_ephy(sc, 0x0042, 0x6042);
1634 	rge_write_ephy(sc, 0x0046, 0x0014);
1635 
1636 	rge_phy_config_mcu(sc, RGE_MAC_CFG3_MCODE_VER);
1637 
1638 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1639 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1640 	rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
1641 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
1642 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1643 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
1644 	RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
1645 	val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
1646 	rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
1647 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
1648 	rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
1649 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
1650 	rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
1651 	RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
1652 	RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
1653 	val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
1654 	rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
1655 	rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
1656 	rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
1657 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
1658 	rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
1659 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1660 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1661 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0500);
1662 	rge_write_phy_ocp(sc, 0xb87c, 0x8159);
1663 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1664 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
1665 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, RGE_JUMBO_MTU + ETHER_HDR_LEN +
1666 	    32);
1667 	rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
1668 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1669 	rge_write_phy_ocp(sc, 0xb87c, 0x809c);
1670 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1671 
1672 	rge_write_phy_ocp(sc, 0xa436, 0x81b3);
1673 	for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
1674 		rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
1675 	for (i = 0; i < 26; i++)
1676 		rge_write_phy_ocp(sc, 0xa438, 0);
1677 	rge_write_phy_ocp(sc, 0xa436, 0x8257);
1678 	rge_write_phy_ocp(sc, 0xa438, 0x020f);
1679 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1680 	rge_write_phy_ocp(sc, 0xa438, 0x7843);
1681 
1682 	rge_patch_phy_mcu(sc, 1);
1683 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1684 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1685 	for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
1686 		rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
1687 		rge_write_phy_ocp(sc, 0xb890, mac_cfg3_b88e_value[i + 1]);
1688 	}
1689 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1690 	rge_patch_phy_mcu(sc, 0);
1691 
1692 	RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
1693 	rge_write_phy_ocp(sc, 0xa436, 0x81a2);
1694 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1695 	val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
1696 	rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
1697 	RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
1698 	RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
1699 	RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
1700 	RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
1701 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1702 }
1703 
1704 void
1705 rge_phy_config_mac_cfg4(struct rge_softc *sc)
1706 {
1707 	uint16_t val;
1708 	int i;
1709 	static const uint16_t mac_cfg4_b87c_value[] =
1710 	    { 0x8013, 0x0700, 0x8fb9, 0x2801, 0x8fba, 0x0100, 0x8fbc, 0x1900,
1711 	      0x8fbe, 0xe100, 0x8fc0, 0x0800, 0x8fc2, 0xe500, 0x8fc4, 0x0f00,
1712 	      0x8fc6, 0xf100, 0x8fc8, 0x0400, 0x8fca, 0xf300, 0x8fcc, 0xfd00,
1713 	      0x8fce, 0xff00, 0x8fd0, 0xfb00, 0x8fd2, 0x0100, 0x8fd4, 0xf400,
1714 	      0x8fd6, 0xff00, 0x8fd8, 0xf600, 0x813d, 0x390e, 0x814f, 0x790e,
1715 	      0x80b0, 0x0f31 };
1716 
1717 	for (i = 0; i < nitems(rtl8125_mac_cfg4_ephy); i++)
1718 		rge_write_ephy(sc, rtl8125_mac_cfg4_ephy[i].reg,
1719 		    rtl8125_mac_cfg4_ephy[i].val);
1720 
1721 	rge_write_phy_ocp(sc, 0xbf86, 0x9000);
1722 	RGE_PHY_SETBIT(sc, 0xc402, 0x0400);
1723 	RGE_PHY_CLRBIT(sc, 0xc402, 0x0400);
1724 	rge_write_phy_ocp(sc, 0xbd86, 0x1010);
1725 	rge_write_phy_ocp(sc, 0xbd88, 0x1010);
1726 	val = rge_read_phy_ocp(sc, 0xbd4e) & ~0x0c00;
1727 	rge_write_phy_ocp(sc, 0xbd4e, val | 0x0800);
1728 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1729 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0700);
1730 
1731 	rge_phy_config_mcu(sc, RGE_MAC_CFG4_MCODE_VER);
1732 
1733 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1734 	RGE_PHY_SETBIT(sc, 0xbc08, 0x000c);
1735 	rge_write_phy_ocp(sc, 0xa436, 0x8fff);
1736 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1737 	rge_write_phy_ocp(sc, 0xa438, val | 0x0400);
1738 	for (i = 0; i < 6; i++) {
1739 		rge_write_phy_ocp(sc, 0xb87c, 0x8560 + i * 2);
1740 		if (i < 3)
1741 			rge_write_phy_ocp(sc, 0xb87e, 0x19cc);
1742 		else
1743 			rge_write_phy_ocp(sc, 0xb87e, 0x147d);
1744 	}
1745 	rge_write_phy_ocp(sc, 0xb87c, 0x8ffe);
1746 	rge_write_phy_ocp(sc, 0xb87e, 0x0907);
1747 	val = rge_read_phy_ocp(sc, 0xacda) & ~0xff00;
1748 	rge_write_phy_ocp(sc, 0xacda, val | 0xff00);
1749 	val = rge_read_phy_ocp(sc, 0xacde) & ~0xf000;
1750 	rge_write_phy_ocp(sc, 0xacde, val | 0xf000);
1751 	rge_write_phy_ocp(sc, 0xb87c, 0x80d6);
1752 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1753 	rge_write_phy_ocp(sc, 0xb87c, 0x80F2);
1754 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1755 	rge_write_phy_ocp(sc, 0xb87c, 0x80f4);
1756 	rge_write_phy_ocp(sc, 0xb87e, 0x6077);
1757 	rge_write_phy_ocp(sc, 0xb506, 0x01e7);
1758 	rge_write_phy_ocp(sc, 0xac8c, 0x0ffc);
1759 	rge_write_phy_ocp(sc, 0xac46, 0xb7b4);
1760 	rge_write_phy_ocp(sc, 0xac50, 0x0fbc);
1761 	rge_write_phy_ocp(sc, 0xac3c, 0x9240);
1762 	rge_write_phy_ocp(sc, 0xac4E, 0x0db4);
1763 	rge_write_phy_ocp(sc, 0xacc6, 0x0707);
1764 	rge_write_phy_ocp(sc, 0xacc8, 0xa0d3);
1765 	rge_write_phy_ocp(sc, 0xad08, 0x0007);
1766 	for (i = 0; i < nitems(mac_cfg4_b87c_value); i += 2) {
1767 		rge_write_phy_ocp(sc, 0xb87c, mac_cfg4_b87c_value[i]);
1768 		rge_write_phy_ocp(sc, 0xb87e, mac_cfg4_b87c_value[i + 1]);
1769 	}
1770 	RGE_PHY_SETBIT(sc, 0xbf4c, 0x0002);
1771 	RGE_PHY_SETBIT(sc, 0xbcca, 0x0300);
1772 	rge_write_phy_ocp(sc, 0xb87c, 0x8141);
1773 	rge_write_phy_ocp(sc, 0xb87e, 0x320e);
1774 	rge_write_phy_ocp(sc, 0xb87c, 0x8153);
1775 	rge_write_phy_ocp(sc, 0xb87e, 0x720e);
1776 	RGE_PHY_CLRBIT(sc, 0xa432, 0x0040);
1777 	rge_write_phy_ocp(sc, 0xb87c, 0x8529);
1778 	rge_write_phy_ocp(sc, 0xb87e, 0x050e);
1779 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, RGE_JUMBO_MTU + ETHER_HDR_LEN +
1780 	    32);
1781 	rge_write_phy_ocp(sc, 0xa436, 0x816c);
1782 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1783 	rge_write_phy_ocp(sc, 0xa436, 0x8170);
1784 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1785 	rge_write_phy_ocp(sc, 0xa436, 0x8174);
1786 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1787 	rge_write_phy_ocp(sc, 0xa436, 0x8178);
1788 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1789 	rge_write_phy_ocp(sc, 0xa436, 0x817c);
1790 	rge_write_phy_ocp(sc, 0xa438, 0x0719);
1791 	rge_write_phy_ocp(sc, 0xa436, 0x8ff4);
1792 	rge_write_phy_ocp(sc, 0xa438, 0x0400);
1793 	rge_write_phy_ocp(sc, 0xa436, 0x8ff1);
1794 	rge_write_phy_ocp(sc, 0xa438, 0x0404);
1795 	rge_write_phy_ocp(sc, 0xbf4a, 0x001b);
1796 	for (i = 0; i < 6; i++) {
1797 		rge_write_phy_ocp(sc, 0xb87c, 0x8033 + i * 4);
1798 		if (i == 2)
1799 			rge_write_phy_ocp(sc, 0xb87e, 0xfc32);
1800 		else
1801 			rge_write_phy_ocp(sc, 0xb87e, 0x7c13);
1802 	}
1803 	rge_write_phy_ocp(sc, 0xb87c, 0x8145);
1804 	rge_write_phy_ocp(sc, 0xb87e, 0x370e);
1805 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1806 	rge_write_phy_ocp(sc, 0xb87e, 0x770e);
1807 	rge_write_phy_ocp(sc, 0xb87c, 0x8169);
1808 	rge_write_phy_ocp(sc, 0xb87e, 0x0d0a);
1809 	rge_write_phy_ocp(sc, 0xb87c, 0x817b);
1810 	rge_write_phy_ocp(sc, 0xb87e, 0x1d0a);
1811 	rge_write_phy_ocp(sc, 0xa436, 0x8217);
1812 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1813 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1814 	rge_write_phy_ocp(sc, 0xa436, 0x821a);
1815 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1816 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1817 	rge_write_phy_ocp(sc, 0xa436, 0x80da);
1818 	rge_write_phy_ocp(sc, 0xa438, 0x0403);
1819 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1820 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1821 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1822 	rge_write_phy_ocp(sc, 0xa436, 0x80b3);
1823 	rge_write_phy_ocp(sc, 0xa438, 0x0384);
1824 	rge_write_phy_ocp(sc, 0xa436, 0x80b7);
1825 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
1826 	rge_write_phy_ocp(sc, 0xa436, 0x80ba);
1827 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1828 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1829 	rge_write_phy_ocp(sc, 0xa436, 0x80b5);
1830 	rge_write_phy_ocp(sc, 0xa438, 0xf009);
1831 	rge_write_phy_ocp(sc, 0xa436, 0x80bd);
1832 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1833 	rge_write_phy_ocp(sc, 0xa438, val | 0x9f00);
1834 	rge_write_phy_ocp(sc, 0xa436, 0x80c7);
1835 	rge_write_phy_ocp(sc, 0xa438, 0xf083);
1836 	rge_write_phy_ocp(sc, 0xa436, 0x80dd);
1837 	rge_write_phy_ocp(sc, 0xa438, 0x03f0);
1838 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
1839 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1840 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1841 	rge_write_phy_ocp(sc, 0xa436, 0x80cb);
1842 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
1843 	rge_write_phy_ocp(sc, 0xa436, 0x80ce);
1844 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1845 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1846 	rge_write_phy_ocp(sc, 0xa436, 0x80c9);
1847 	rge_write_phy_ocp(sc, 0xa438, 0x8009);
1848 	rge_write_phy_ocp(sc, 0xa436, 0x80d1);
1849 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1850 	rge_write_phy_ocp(sc, 0xa438, val | 0x8000);
1851 	rge_write_phy_ocp(sc, 0xa436, 0x80a3);
1852 	rge_write_phy_ocp(sc, 0xa438, 0x200a);
1853 	rge_write_phy_ocp(sc, 0xa436, 0x80a5);
1854 	rge_write_phy_ocp(sc, 0xa438, 0xf0ad);
1855 	rge_write_phy_ocp(sc, 0xa436, 0x809f);
1856 	rge_write_phy_ocp(sc, 0xa438, 0x6073);
1857 	rge_write_phy_ocp(sc, 0xa436, 0x80a1);
1858 	rge_write_phy_ocp(sc, 0xa438, 0x000b);
1859 	rge_write_phy_ocp(sc, 0xa436, 0x80a9);
1860 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1861 	rge_write_phy_ocp(sc, 0xa438, val | 0xc000);
1862 	rge_patch_phy_mcu(sc, 1);
1863 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1864 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1865 	rge_write_phy_ocp(sc, 0xb88e, 0xc23e);
1866 	rge_write_phy_ocp(sc, 0xb890, 0x0000);
1867 	rge_write_phy_ocp(sc, 0xb88e, 0xc240);
1868 	rge_write_phy_ocp(sc, 0xb890, 0x0103);
1869 	rge_write_phy_ocp(sc, 0xb88e, 0xc242);
1870 	rge_write_phy_ocp(sc, 0xb890, 0x0507);
1871 	rge_write_phy_ocp(sc, 0xb88e, 0xc244);
1872 	rge_write_phy_ocp(sc, 0xb890, 0x090b);
1873 	rge_write_phy_ocp(sc, 0xb88e, 0xc246);
1874 	rge_write_phy_ocp(sc, 0xb890, 0x0c0e);
1875 	rge_write_phy_ocp(sc, 0xb88e, 0xc248);
1876 	rge_write_phy_ocp(sc, 0xb890, 0x1012);
1877 	rge_write_phy_ocp(sc, 0xb88e, 0xc24a);
1878 	rge_write_phy_ocp(sc, 0xb890, 0x1416);
1879 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1880 	rge_patch_phy_mcu(sc, 0);
1881 	RGE_PHY_SETBIT(sc, 0xa86a, 0x0001);
1882 	RGE_PHY_SETBIT(sc, 0xa6f0, 0x0001);
1883 	rge_write_phy_ocp(sc, 0xbfa0, 0xd70d);
1884 	rge_write_phy_ocp(sc, 0xbfa2, 0x4100);
1885 	rge_write_phy_ocp(sc, 0xbfa4, 0xe868);
1886 	rge_write_phy_ocp(sc, 0xbfa6, 0xdc59);
1887 	rge_write_phy_ocp(sc, 0xb54c, 0x3c18);
1888 	RGE_PHY_CLRBIT(sc, 0xbfa4, 0x0020);
1889 	rge_write_phy_ocp(sc, 0xa436, 0x817d);
1890 	RGE_PHY_SETBIT(sc, 0xa438, 0x1000);
1891 }
1892 
1893 void
1894 rge_phy_config_mac_cfg5(struct rge_softc *sc)
1895 {
1896 	uint16_t val;
1897 	int i;
1898 
1899 	for (i = 0; i < nitems(rtl8125_mac_cfg5_ephy); i++)
1900 		rge_write_ephy(sc, rtl8125_mac_cfg5_ephy[i].reg,
1901 		    rtl8125_mac_cfg5_ephy[i].val);
1902 
1903 	rge_phy_config_mcu(sc, RGE_MAC_CFG5_MCODE_VER);
1904 
1905 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1906 	val = rge_read_phy_ocp(sc, 0xac46) & ~0x00f0;
1907 	rge_write_phy_ocp(sc, 0xac46, val | 0x0090);
1908 	val = rge_read_phy_ocp(sc, 0xad30) & ~0x0003;
1909 	rge_write_phy_ocp(sc, 0xad30, val | 0x0001);
1910 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, RGE_JUMBO_MTU + ETHER_HDR_LEN +
1911 	    32);
1912 	rge_write_phy_ocp(sc, 0xb87c, 0x80f5);
1913 	rge_write_phy_ocp(sc, 0xb87e, 0x760e);
1914 	rge_write_phy_ocp(sc, 0xb87c, 0x8107);
1915 	rge_write_phy_ocp(sc, 0xb87e, 0x360e);
1916 	rge_write_phy_ocp(sc, 0xb87c, 0x8551);
1917 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1918 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
1919 	val = rge_read_phy_ocp(sc, 0xbf00) & ~0xe000;
1920 	rge_write_phy_ocp(sc, 0xbf00, val | 0xa000);
1921 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1922 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0300);
1923 	for (i = 0; i < 10; i++) {
1924 		rge_write_phy_ocp(sc, 0xa436, 0x8044 + i * 6);
1925 		rge_write_phy_ocp(sc, 0xa438, 0x2417);
1926 	}
1927 	RGE_PHY_SETBIT(sc, 0xa4ca, 0x0040);
1928 	val = rge_read_phy_ocp(sc, 0xbf84) & ~0xe000;
1929 	rge_write_phy_ocp(sc, 0xbf84, val | 0xa000);
1930 	rge_write_phy_ocp(sc, 0xa436, 0x8170);
1931 	val = rge_read_phy_ocp(sc, 0xa438) & ~0x2700;
1932 	rge_write_phy_ocp(sc, 0xa438, val | 0xd800);
1933 }
1934 
1935 void
1936 rge_phy_config_mcu(struct rge_softc *sc, uint16_t mcode_version)
1937 {
1938 	if (sc->rge_mcodever != mcode_version) {
1939 		int i;
1940 
1941 		rge_patch_phy_mcu(sc, 1);
1942 
1943 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1944 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
1945 			if (sc->rge_type == MAC_CFG2)
1946 				rge_write_phy_ocp(sc, 0xa438, 0x8600);
1947 			else
1948 				rge_write_phy_ocp(sc, 0xa438, 0x8601);
1949 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
1950 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
1951 
1952 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
1953 		}
1954 
1955 		if (sc->rge_type == MAC_CFG2) {
1956 			for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
1957 				rge_write_phy_ocp(sc,
1958 				    rtl8125_mac_cfg2_mcu[i].reg,
1959 				    rtl8125_mac_cfg2_mcu[i].val);
1960 			}
1961 		} else if (sc->rge_type == MAC_CFG3) {
1962 			for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
1963 				rge_write_phy_ocp(sc,
1964 				    rtl8125_mac_cfg3_mcu[i].reg,
1965 				    rtl8125_mac_cfg3_mcu[i].val);
1966 			}
1967 		} else if (sc->rge_type == MAC_CFG4) {
1968 			for (i = 0; i < nitems(rtl8125_mac_cfg4_mcu); i++) {
1969 				rge_write_phy_ocp(sc,
1970 				    rtl8125_mac_cfg4_mcu[i].reg,
1971 				    rtl8125_mac_cfg4_mcu[i].val);
1972 			}
1973 		} else if (sc->rge_type == MAC_CFG5) {
1974 			for (i = 0; i < nitems(rtl8125_mac_cfg5_mcu); i++) {
1975 				rge_write_phy_ocp(sc,
1976 				    rtl8125_mac_cfg5_mcu[i].reg,
1977 				    rtl8125_mac_cfg5_mcu[i].val);
1978 			}
1979 		}
1980 
1981 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1982 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
1983 
1984 			rge_write_phy_ocp(sc, 0xa436, 0);
1985 			rge_write_phy_ocp(sc, 0xa438, 0);
1986 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
1987 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
1988 			rge_write_phy_ocp(sc, 0xa438, 0);
1989 		}
1990 
1991 		rge_patch_phy_mcu(sc, 0);
1992 
1993 		/* Write microcode version. */
1994 		rge_write_phy_ocp(sc, 0xa436, 0x801e);
1995 		rge_write_phy_ocp(sc, 0xa438, mcode_version);
1996 	}
1997 }
1998 
1999 void
2000 rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
2001 {
2002 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2003 	RGE_WRITE_4(sc, RGE_MAC0,
2004 	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2005 	RGE_WRITE_4(sc, RGE_MAC4,
2006 	    addr[5] <<  8 | addr[4]);
2007 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2008 }
2009 
2010 void
2011 rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
2012 {
2013 	*(uint32_t *)&addr[0] = RGE_READ_4(sc, RGE_ADDR0);
2014 	*(uint16_t *)&addr[4] = RGE_READ_2(sc, RGE_ADDR1);
2015 }
2016 
2017 void
2018 rge_hw_init(struct rge_softc *sc)
2019 {
2020 	int i;
2021 
2022 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2023 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
2024 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
2025 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2026 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
2027 
2028 	/* Disable UPS. */
2029 	RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
2030 
2031 	/* Configure MAC MCU. */
2032 	rge_write_mac_ocp(sc, 0xfc38, 0);
2033 
2034 	for (i = 0xfc28; i < 0xfc38; i += 2)
2035 		rge_write_mac_ocp(sc, i, 0);
2036 
2037 	DELAY(3000);
2038 	rge_write_mac_ocp(sc, 0xfc26, 0);
2039 
2040 	if (sc->rge_type == MAC_CFG3) {
2041 		for (i = 0; i < nitems(rtl8125_mac_bps); i++) {
2042 			rge_write_mac_ocp(sc, rtl8125_mac_bps[i].reg,
2043 			    rtl8125_mac_bps[i].val);
2044 		}
2045 	} else if (sc->rge_type == MAC_CFG5) {
2046 		for (i = 0; i < nitems(rtl8125b_mac_bps); i++) {
2047 			rge_write_mac_ocp(sc, rtl8125b_mac_bps[i].reg,
2048 			    rtl8125b_mac_bps[i].val);
2049 		}
2050 	}
2051 
2052 	/* Disable PHY power saving. */
2053 	rge_disable_phy_ocp_pwrsave(sc);
2054 
2055 	/* Set PCIe uncorrectable error status. */
2056 	rge_write_csi(sc, 0x108,
2057 	    rge_read_csi(sc, 0x108) | 0x00100000);
2058 
2059 }
2060 
2061 void
2062 rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
2063 {
2064 	if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
2065 		rge_patch_phy_mcu(sc, 1);
2066 		rge_write_phy_ocp(sc, 0xc416, 0);
2067 		rge_write_phy_ocp(sc, 0xc416, 0x0500);
2068 		rge_patch_phy_mcu(sc, 0);
2069 	}
2070 }
2071 
2072 void
2073 rge_patch_phy_mcu(struct rge_softc *sc, int set)
2074 {
2075 	int i;
2076 
2077 	if (set)
2078 		RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
2079 	else
2080 		RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
2081 
2082 	for (i = 0; i < 1000; i++) {
2083 		if ((rge_read_phy_ocp(sc, 0xb800) & 0x0040) == 0x0040)
2084 			break;
2085 		DELAY(100);
2086 	}
2087 	if (i == 1000) {
2088 		DPRINTF(("timeout waiting to patch phy mcu\n"));
2089 		return;
2090 	}
2091 }
2092 
2093 void
2094 rge_add_media_types(struct rge_softc *sc)
2095 {
2096 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
2097 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
2098 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
2099 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
2100 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
2101 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2102 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
2103 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
2104 }
2105 
2106 void
2107 rge_config_imtype(struct rge_softc *sc, int imtype)
2108 {
2109 	switch (imtype) {
2110 	case RGE_IMTYPE_NONE:
2111 		sc->rge_intrs = RGE_INTRS;
2112 		break;
2113 	case RGE_IMTYPE_SIM:
2114 		sc->rge_intrs = RGE_INTRS_TIMER;
2115 		break;
2116 	default:
2117 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
2118 	}
2119 }
2120 
2121 void
2122 rge_disable_hw_im(struct rge_softc *sc)
2123 {
2124 	RGE_WRITE_2(sc, RGE_IM, 0);
2125 }
2126 
2127 void
2128 rge_disable_sim_im(struct rge_softc *sc)
2129 {
2130 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
2131 	sc->rge_timerintr = 0;
2132 }
2133 
2134 void
2135 rge_setup_sim_im(struct rge_softc *sc)
2136 {
2137 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0x2600);
2138 	RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
2139 	sc->rge_timerintr = 1;
2140 }
2141 
2142 void
2143 rge_setup_intr(struct rge_softc *sc, int imtype)
2144 {
2145 	rge_config_imtype(sc, imtype);
2146 
2147 	/* Enable interrupts. */
2148 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
2149 
2150 	switch (imtype) {
2151 	case RGE_IMTYPE_NONE:
2152 		rge_disable_sim_im(sc);
2153 		rge_disable_hw_im(sc);
2154 		break;
2155 	case RGE_IMTYPE_SIM:
2156 		rge_disable_hw_im(sc);
2157 		rge_setup_sim_im(sc);
2158 		break;
2159 	default:
2160 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
2161 	}
2162 }
2163 
2164 void
2165 rge_exit_oob(struct rge_softc *sc)
2166 {
2167 	int i;
2168 
2169 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
2170 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
2171 	    RGE_RXCFG_ERRPKT);
2172 
2173 	/* Disable RealWoW. */
2174 	rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
2175 
2176 	rge_reset(sc);
2177 
2178 	/* Disable OOB. */
2179 	RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
2180 
2181 	RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
2182 
2183 	for (i = 0; i < 10; i++) {
2184 		DELAY(100);
2185 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2186 			break;
2187 	}
2188 
2189 	rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
2190 	rge_write_mac_ocp(sc, 0xc0a6, 0x01b5);
2191 	rge_write_mac_ocp(sc, 0xc01e, 0x5555);
2192 
2193 	for (i = 0; i < 10; i++) {
2194 		DELAY(100);
2195 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2196 			break;
2197 	}
2198 
2199 	if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
2200 		printf("%s: rge_exit_oob(): rtl8125_is_ups_resume!!\n",
2201 		    sc->sc_dev.dv_xname);
2202 		for (i = 0; i < RGE_TIMEOUT; i++) {
2203 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 2)
2204 				break;
2205 			DELAY(1000);
2206 		}
2207 		RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
2208 		if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5)
2209 			RGE_PHY_CLRBIT(sc, 0xa466, 0x0001);
2210 		RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
2211 	}
2212 }
2213 
2214 void
2215 rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
2216 {
2217 	int i;
2218 
2219 	RGE_WRITE_4(sc, RGE_CSIDR, val);
2220 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2221 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
2222 
2223 	for (i = 0; i < 10; i++) {
2224 		 DELAY(100);
2225 		 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
2226 			break;
2227 	}
2228 
2229 	DELAY(20);
2230 }
2231 
2232 uint32_t
2233 rge_read_csi(struct rge_softc *sc, uint32_t reg)
2234 {
2235 	int i;
2236 
2237 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2238 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
2239 
2240 	for (i = 0; i < 10; i++) {
2241 		 DELAY(100);
2242 		 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
2243 			break;
2244 	}
2245 
2246 	DELAY(20);
2247 
2248 	return (RGE_READ_4(sc, RGE_CSIDR));
2249 }
2250 
2251 void
2252 rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2253 {
2254 	uint32_t tmp;
2255 
2256 	tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2257 	tmp += val;
2258 	tmp |= RGE_MACOCP_BUSY;
2259 	RGE_WRITE_4(sc, RGE_MACOCP, tmp);
2260 }
2261 
2262 uint16_t
2263 rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
2264 {
2265 	uint32_t val;
2266 
2267 	val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2268 	RGE_WRITE_4(sc, RGE_MACOCP, val);
2269 
2270 	return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
2271 }
2272 
2273 void
2274 rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
2275 {
2276 	uint32_t tmp;
2277 	int i;
2278 
2279 	tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2280 	tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
2281 	RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
2282 
2283 	for (i = 0; i < 10; i++) {
2284 		DELAY(100);
2285 		if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
2286 			break;
2287 	}
2288 
2289 	DELAY(20);
2290 }
2291 
2292 uint16_t
2293 rge_read_ephy(struct rge_softc *sc, uint16_t reg)
2294 {
2295 	uint32_t val;
2296 	int i;
2297 
2298 	val = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2299 	RGE_WRITE_4(sc, RGE_EPHYAR, val);
2300 
2301 	for (i = 0; i < 10; i++) {
2302 		DELAY(100);
2303 		val = RGE_READ_4(sc, RGE_EPHYAR);
2304 		if (val & RGE_EPHYAR_BUSY)
2305 			break;
2306 	}
2307 
2308 	DELAY(20);
2309 
2310 	return (val & RGE_EPHYAR_DATA_MASK);
2311 }
2312 
2313 void
2314 rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
2315 {
2316 	uint16_t off, phyaddr;
2317 
2318 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2319 	phyaddr <<= 4;
2320 
2321 	off = addr ? reg : 0x10 + (reg % 8);
2322 
2323 	phyaddr += (off - 16) << 1;
2324 
2325 	rge_write_phy_ocp(sc, phyaddr, val);
2326 }
2327 
2328 uint16_t
2329 rge_read_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg)
2330 {
2331 	uint16_t off, phyaddr;
2332 
2333 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2334 	phyaddr <<= 4;
2335 
2336 	off = addr ? reg : 0x10 + (reg % 8);
2337 
2338 	phyaddr += (off - 16) << 1;
2339 
2340 	return (rge_read_phy_ocp(sc, phyaddr));
2341 }
2342 
2343 void
2344 rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2345 {
2346 	uint32_t tmp;
2347 	int i;
2348 
2349 	tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2350 	tmp |= RGE_PHYOCP_BUSY | val;
2351 	RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
2352 
2353 	for (i = 0; i < RGE_TIMEOUT; i++) {
2354 		DELAY(1);
2355 		if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
2356 			break;
2357 	}
2358 }
2359 
2360 uint16_t
2361 rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
2362 {
2363 	uint32_t val;
2364 	int i;
2365 
2366 	val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2367 	RGE_WRITE_4(sc, RGE_PHYOCP, val);
2368 
2369 	for (i = 0; i < RGE_TIMEOUT; i++) {
2370 		DELAY(1);
2371 		val = RGE_READ_4(sc, RGE_PHYOCP);
2372 		if (val & RGE_PHYOCP_BUSY)
2373 			break;
2374 	}
2375 
2376 	return (val & RGE_PHYOCP_DATA_MASK);
2377 }
2378 
2379 int
2380 rge_get_link_status(struct rge_softc *sc)
2381 {
2382 	return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
2383 }
2384 
2385 void
2386 rge_txstart(void *arg)
2387 {
2388 	struct rge_softc *sc = arg;
2389 
2390 	RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
2391 }
2392 
2393 void
2394 rge_tick(void *arg)
2395 {
2396 	struct rge_softc *sc = arg;
2397 	int s;
2398 
2399 	s = splnet();
2400 	rge_link_state(sc);
2401 	splx(s);
2402 
2403 	timeout_add_sec(&sc->sc_timeout, 1);
2404 }
2405 
2406 void
2407 rge_link_state(struct rge_softc *sc)
2408 {
2409 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2410 	int link = LINK_STATE_DOWN;
2411 
2412 	if (rge_get_link_status(sc))
2413 		link = LINK_STATE_UP;
2414 
2415 	if (ifp->if_link_state != link) {
2416 		ifp->if_link_state = link;
2417 		if_link_state_change(ifp);
2418 	}
2419 }
2420 
2421 #ifndef SMALL_KERNEL
2422 int
2423 rge_wol(struct ifnet *ifp, int enable)
2424 {
2425 	struct rge_softc *sc = ifp->if_softc;
2426 
2427 	if (enable) {
2428 		if (!(RGE_READ_1(sc, RGE_CFG1) & RGE_CFG1_PM_EN)) {
2429 			printf("%s: power management is disabled, "
2430 			    "cannot do WOL\n", sc->sc_dev.dv_xname);
2431 			return (ENOTSUP);
2432 		}
2433 
2434 	}
2435 
2436 	rge_iff(sc);
2437 
2438 	if (enable)
2439 		RGE_MAC_SETBIT(sc, 0xc0b6, 0x0001);
2440 	else
2441 		RGE_MAC_CLRBIT(sc, 0xc0b6, 0x0001);
2442 
2443 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2444 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_WOL_LANWAKE | RGE_CFG5_WOL_UCAST |
2445 	    RGE_CFG5_WOL_MCAST | RGE_CFG5_WOL_BCAST);
2446 	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_WOL_LINK | RGE_CFG3_WOL_MAGIC);
2447 	if (enable)
2448 		RGE_SETBIT_1(sc, RGE_CFG5, RGE_CFG5_WOL_LANWAKE);
2449 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2450 
2451 	return (0);
2452 }
2453 
2454 void
2455 rge_wol_power(struct rge_softc *sc)
2456 {
2457 	/* Disable RXDV gate. */
2458 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
2459 	DELAY(2000);
2460 
2461 	RGE_SETBIT_1(sc, RGE_CFG1, RGE_CFG1_PM_EN);
2462 	RGE_SETBIT_1(sc, RGE_CFG2, RGE_CFG2_PMSTS_EN);
2463 }
2464 #endif
2465