xref: /openbsd/sys/dev/pci/if_rge.c (revision 9a04daec)
1 /*	$OpenBSD: if_rge.c,v 1.11 2020/12/24 06:34:03 deraadt Exp $	*/
2 
3 /*
4  * Copyright (c) 2019, 2020 Kevin Lo <kevlo@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bpfilter.h"
20 #include "vlan.h"
21 
22 #include <sys/param.h>
23 #include <sys/systm.h>
24 #include <sys/sockio.h>
25 #include <sys/mbuf.h>
26 #include <sys/malloc.h>
27 #include <sys/kernel.h>
28 #include <sys/socket.h>
29 #include <sys/device.h>
30 #include <sys/endian.h>
31 
32 #include <net/if.h>
33 #include <net/if_media.h>
34 
35 #include <netinet/in.h>
36 #include <netinet/if_ether.h>
37 
38 #if NBPFILTER > 0
39 #include <net/bpf.h>
40 #endif
41 
42 #include <machine/bus.h>
43 #include <machine/intr.h>
44 
45 #include <dev/mii/mii.h>
46 
47 #include <dev/pci/pcivar.h>
48 #include <dev/pci/pcireg.h>
49 #include <dev/pci/pcidevs.h>
50 
51 #include <dev/pci/if_rgereg.h>
52 
53 #ifdef RGE_DEBUG
54 #define DPRINTF(x)	do { if (rge_debug > 0) printf x; } while (0)
55 int rge_debug = 0;
56 #else
57 #define DPRINTF(x)
58 #endif
59 
60 int		rge_match(struct device *, void *, void *);
61 void		rge_attach(struct device *, struct device *, void *);
62 int		rge_activate(struct device *, int);
63 int		rge_intr(void *);
64 int		rge_encap(struct rge_softc *, struct mbuf *, int);
65 int		rge_ioctl(struct ifnet *, u_long, caddr_t);
66 void		rge_start(struct ifqueue *);
67 void		rge_watchdog(struct ifnet *);
68 int		rge_init(struct ifnet *);
69 void		rge_stop(struct ifnet *);
70 int		rge_ifmedia_upd(struct ifnet *);
71 void		rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
72 int		rge_allocmem(struct rge_softc *);
73 int		rge_newbuf(struct rge_softc *);
74 void		rge_discard_rxbuf(struct rge_softc *, int);
75 void		rge_rx_list_init(struct rge_softc *);
76 void		rge_tx_list_init(struct rge_softc *);
77 void		rge_fill_rx_ring(struct rge_softc *);
78 int		rge_rxeof(struct rge_softc *);
79 int		rge_txeof(struct rge_softc *);
80 void		rge_reset(struct rge_softc *);
81 void		rge_iff(struct rge_softc *);
82 void		rge_set_phy_power(struct rge_softc *, int);
83 void		rge_phy_config(struct rge_softc *);
84 void		rge_phy_config_mac_cfg2(struct rge_softc *);
85 void		rge_phy_config_mac_cfg3(struct rge_softc *);
86 void		rge_phy_config_mac_cfg4(struct rge_softc *);
87 void		rge_phy_config_mac_cfg5(struct rge_softc *);
88 void		rge_phy_config_mcu(struct rge_softc *, uint16_t);
89 void		rge_set_macaddr(struct rge_softc *, const uint8_t *);
90 void		rge_get_macaddr(struct rge_softc *, uint8_t *);
91 void		rge_hw_init(struct rge_softc *);
92 void		rge_disable_phy_ocp_pwrsave(struct rge_softc *);
93 void		rge_patch_phy_mcu(struct rge_softc *, int);
94 void		rge_add_media_types(struct rge_softc *);
95 void		rge_config_imtype(struct rge_softc *, int);
96 void		rge_disable_hw_im(struct rge_softc *);
97 void		rge_disable_sim_im(struct rge_softc *);
98 void		rge_setup_sim_im(struct rge_softc *);
99 void		rge_setup_intr(struct rge_softc *, int);
100 void		rge_exit_oob(struct rge_softc *);
101 void		rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
102 uint32_t	rge_read_csi(struct rge_softc *, uint32_t);
103 void		rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
104 uint16_t	rge_read_mac_ocp(struct rge_softc *, uint16_t);
105 void		rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
106 uint16_t	rge_read_ephy(struct rge_softc *, uint16_t);
107 void		rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
108 uint16_t	rge_read_phy(struct rge_softc *, uint16_t, uint16_t);
109 void		rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
110 uint16_t	rge_read_phy_ocp(struct rge_softc *, uint16_t);
111 int		rge_get_link_status(struct rge_softc *);
112 void		rge_txstart(void *);
113 void		rge_tick(void *);
114 void		rge_link_state(struct rge_softc *);
115 #ifndef SMALL_KERNEL
116 int		rge_wol(struct ifnet *, int);
117 void		rge_wol_power(struct rge_softc *);
118 #endif
119 
120 static const struct {
121 	uint16_t reg;
122 	uint16_t val;
123 }  rtl8125_mac_cfg2_mcu[] = {
124 	RTL8125_MAC_CFG2_MCU
125 }, rtl8125_mac_cfg3_mcu[] = {
126 	RTL8125_MAC_CFG3_MCU
127 }, rtl8125_mac_cfg4_mcu[] = {
128 	RTL8125_MAC_CFG4_MCU
129 }, rtl8125_mac_cfg5_mcu[] = {
130 	RTL8125_MAC_CFG5_MCU
131 };
132 
133 struct cfattach rge_ca = {
134 	sizeof(struct rge_softc), rge_match, rge_attach, NULL, rge_activate
135 };
136 
137 struct cfdriver rge_cd = {
138 	NULL, "rge", DV_IFNET
139 };
140 
141 const struct pci_matchid rge_devices[] = {
142 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000 },
143 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8125 }
144 };
145 
146 int
147 rge_match(struct device *parent, void *match, void *aux)
148 {
149 	return (pci_matchbyid((struct pci_attach_args *)aux, rge_devices,
150 	    nitems(rge_devices)));
151 }
152 
153 void
154 rge_attach(struct device *parent, struct device *self, void *aux)
155 {
156 	struct rge_softc *sc = (struct rge_softc *)self;
157 	struct pci_attach_args *pa = aux;
158 	pci_chipset_tag_t pc = pa->pa_pc;
159 	pci_intr_handle_t ih;
160 	const char *intrstr = NULL;
161 	struct ifnet *ifp;
162 	pcireg_t reg;
163 	uint32_t hwrev;
164 	uint8_t eaddr[ETHER_ADDR_LEN];
165 	int offset;
166 
167 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
168 
169 	/*
170 	 * Map control/status registers.
171 	 */
172 	if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
173 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
174 	    NULL, &sc->rge_bsize, 0)) {
175 		if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
176 		    PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
177 		    &sc->rge_bhandle, NULL, &sc->rge_bsize, 0)) {
178 			if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
179 			    0, &sc->rge_btag, &sc->rge_bhandle, NULL,
180 			    &sc->rge_bsize, 0)) {
181 				printf(": can't map mem or i/o space\n");
182 				return;
183 			}
184 		}
185 	}
186 
187 	/*
188 	 * Allocate interrupt.
189 	 */
190 	if (pci_intr_map_msi(pa, &ih) == 0)
191 		sc->rge_flags |= RGE_FLAG_MSI;
192 	else if (pci_intr_map(pa, &ih) != 0) {
193 		printf(": couldn't map interrupt\n");
194 		return;
195 	}
196 	intrstr = pci_intr_string(pc, ih);
197 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET | IPL_MPSAFE, rge_intr,
198 	    sc, sc->sc_dev.dv_xname);
199 	if (sc->sc_ih == NULL) {
200 		printf(": couldn't establish interrupt");
201 		if (intrstr != NULL)
202 			printf(" at %s", intrstr);
203 		printf("\n");
204 		return;
205 	}
206 	printf(": %s", intrstr);
207 
208 	sc->sc_dmat = pa->pa_dmat;
209 	sc->sc_pc = pa->pa_pc;
210 	sc->sc_tag = pa->pa_tag;
211 
212 	/* Determine hardware revision */
213 	hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
214 	switch (hwrev) {
215 	case 0x60800000:
216 		sc->rge_type = MAC_CFG2;
217 		break;
218 	case 0x60900000:
219 		sc->rge_type = MAC_CFG3;
220 		break;
221 	case 0x64000000:
222 		sc->rge_type = MAC_CFG4;
223 		break;
224 	case 0x64100000:
225 		sc->rge_type = MAC_CFG5;
226 		break;
227 	default:
228 		printf(": unknown version 0x%08x\n", hwrev);
229 		return;
230 	}
231 
232 	rge_config_imtype(sc, RGE_IMTYPE_SIM);
233 
234 	/*
235 	 * PCI Express check.
236 	 */
237 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
238 	    &offset, NULL)) {
239 		/* Disable PCIe ASPM and ECPM. */
240 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
241 		    offset + PCI_PCIE_LCSR);
242 		reg &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1 |
243 		    PCI_PCIE_LCSR_ECPM);
244 		pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCI_PCIE_LCSR,
245 		    reg);
246 	}
247 
248 	rge_exit_oob(sc);
249 	rge_hw_init(sc);
250 
251 	rge_get_macaddr(sc, eaddr);
252 	printf(", address %s\n", ether_sprintf(eaddr));
253 
254 	memcpy(sc->sc_arpcom.ac_enaddr, eaddr, ETHER_ADDR_LEN);
255 
256 	rge_set_phy_power(sc, 1);
257 	rge_phy_config(sc);
258 
259 	if (rge_allocmem(sc))
260 		return;
261 
262 	ifp = &sc->sc_arpcom.ac_if;
263 	ifp->if_softc = sc;
264 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
265 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
266 	ifp->if_xflags = IFXF_MPSAFE;
267 	ifp->if_ioctl = rge_ioctl;
268 	ifp->if_qstart = rge_start;
269 	ifp->if_watchdog = rge_watchdog;
270 	ifq_set_maxlen(&ifp->if_snd, RGE_TX_LIST_CNT - 1);
271 	ifp->if_hardmtu = RGE_JUMBO_MTU;
272 
273 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
274 	    IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
275 
276 #if NVLAN > 0
277 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
278 #endif
279 
280 #ifndef SMALL_KERNEL
281 	ifp->if_capabilities |= IFCAP_WOL;
282 	ifp->if_wol = rge_wol;
283 	rge_wol(ifp, 0);
284 #endif
285 	timeout_set(&sc->sc_timeout, rge_tick, sc);
286 	task_set(&sc->sc_task, rge_txstart, sc);
287 
288 	/* Initialize ifmedia structures. */
289 	ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
290 	    rge_ifmedia_sts);
291 	rge_add_media_types(sc);
292 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
293 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
294 	sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
295 
296 	if_attach(ifp);
297 	ether_ifattach(ifp);
298 }
299 
300 int
301 rge_activate(struct device *self, int act)
302 {
303 #ifndef SMALL_KERNEL
304 	struct rge_softc *sc = (struct rge_softc *)self;
305 #endif
306 	int rv = 0;
307 
308 	switch (act) {
309 	case DVACT_POWERDOWN:
310 		rv = config_activate_children(self, act);
311 #ifndef SMALL_KERNEL
312 		rge_wol_power(sc);
313 #endif
314 	default:
315 		rv = config_activate_children(self, act);
316 		break;
317 	}
318 	return (rv);
319 }
320 
321 int
322 rge_intr(void *arg)
323 {
324 	struct rge_softc *sc = arg;
325 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
326 	uint32_t status;
327 	int claimed = 0, rx, tx;
328 
329 	if (!(ifp->if_flags & IFF_RUNNING))
330 		return (0);
331 
332 	/* Disable interrupts. */
333 	RGE_WRITE_4(sc, RGE_IMR, 0);
334 
335 	if (!(sc->rge_flags & RGE_FLAG_MSI)) {
336 		if ((RGE_READ_4(sc, RGE_ISR) & sc->rge_intrs) == 0)
337 			return (0);
338 	}
339 
340 	status = RGE_READ_4(sc, RGE_ISR);
341 	if (status)
342 		RGE_WRITE_4(sc, RGE_ISR, status);
343 
344 	if (status & RGE_ISR_PCS_TIMEOUT)
345 		claimed = 1;
346 
347 	rx = tx = 0;
348 	if (status & sc->rge_intrs) {
349 		if (status &
350 		    (sc->rge_rx_ack | RGE_ISR_RX_ERR | RGE_ISR_RX_FIFO_OFLOW)) {
351 			rx |= rge_rxeof(sc);
352 			claimed = 1;
353 		}
354 
355 		if (status & (sc->rge_tx_ack | RGE_ISR_TX_ERR)) {
356 			tx |= rge_txeof(sc);
357 			claimed = 1;
358 		}
359 
360 		if (status & RGE_ISR_SYSTEM_ERR) {
361 			KERNEL_LOCK();
362 			rge_init(ifp);
363 			KERNEL_UNLOCK();
364 			claimed = 1;
365 		}
366 	}
367 
368 	if (sc->rge_timerintr) {
369 		if ((tx | rx) == 0) {
370 			/*
371 			 * Nothing needs to be processed, fallback
372 			 * to use TX/RX interrupts.
373 			 */
374 			rge_setup_intr(sc, RGE_IMTYPE_NONE);
375 
376 			/*
377 			 * Recollect, mainly to avoid the possible
378 			 * race introduced by changing interrupt
379 			 * masks.
380 			 */
381 			rge_rxeof(sc);
382 			rge_txeof(sc);
383 		} else
384 			RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
385 	} else if (tx | rx) {
386 		/*
387 		 * Assume that using simulated interrupt moderation
388 		 * (hardware timer based) could reduce the interrupt
389 		 * rate.
390 		 */
391 		rge_setup_intr(sc, RGE_IMTYPE_SIM);
392 	}
393 
394 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
395 
396 	return (claimed);
397 }
398 
399 int
400 rge_encap(struct rge_softc *sc, struct mbuf *m, int idx)
401 {
402 	struct rge_tx_desc *d = NULL;
403 	struct rge_txq *txq;
404 	bus_dmamap_t txmap;
405 	uint32_t cmdsts, cflags = 0;
406 	int cur, error, i, last, nsegs;
407 
408 	/*
409 	 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
410 	 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
411 	 * take affect.
412 	 */
413 	if ((m->m_pkthdr.csum_flags &
414 	    (M_IPV4_CSUM_OUT | M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) != 0) {
415 		cflags |= RGE_TDEXTSTS_IPCSUM;
416 		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
417 			cflags |= RGE_TDEXTSTS_TCPCSUM;
418 		if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
419 			cflags |= RGE_TDEXTSTS_UDPCSUM;
420 	}
421 
422 	txq = &sc->rge_ldata.rge_txq[idx];
423 	txmap = txq->txq_dmamap;
424 
425 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
426 	switch (error) {
427 	case 0:
428 		break;
429 	case EFBIG: /* mbuf chain is too fragmented */
430 		if (m_defrag(m, M_DONTWAIT) == 0 &&
431 		    bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
432 		    BUS_DMA_NOWAIT) == 0)
433 			break;
434 
435 		/* FALLTHROUGH */
436 	default:
437 		return (0);
438 	}
439 
440 	bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
441 	    BUS_DMASYNC_PREWRITE);
442 
443 	nsegs = txmap->dm_nsegs;
444 
445 	/* Set up hardware VLAN tagging. */
446 #if NVLAN > 0
447 	if (m->m_flags & M_VLANTAG)
448 		cflags |= swap16(m->m_pkthdr.ether_vtag | RGE_TDEXTSTS_VTAG);
449 #endif
450 
451 	cur = idx;
452 	cmdsts = RGE_TDCMDSTS_SOF;
453 
454 	for (i = 0; i < txmap->dm_nsegs; i++) {
455 		d = &sc->rge_ldata.rge_tx_list[cur];
456 
457 		d->rge_extsts = htole32(cflags);
458 		d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
459 		d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
460 
461 		cmdsts |= txmap->dm_segs[i].ds_len;
462 
463 		if (cur == RGE_TX_LIST_CNT - 1)
464 			cmdsts |= RGE_TDCMDSTS_EOR;
465 
466 		d->rge_cmdsts = htole32(cmdsts);
467 
468 		last = cur;
469 		cmdsts = RGE_TDCMDSTS_OWN;
470 		cur = RGE_NEXT_TX_DESC(cur);
471 	}
472 
473 	/* Set EOF on the last descriptor. */
474 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
475 
476 	/* Transfer ownership of packet to the chip. */
477 	d = &sc->rge_ldata.rge_tx_list[idx];
478 
479 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
480 
481 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
482 	    cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
483 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
484 
485 	/* Update info of TX queue and descriptors. */
486 	txq->txq_mbuf = m;
487 	txq->txq_descidx = last;
488 
489 	return (nsegs);
490 }
491 
492 int
493 rge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
494 {
495 	struct rge_softc *sc = ifp->if_softc;
496 	struct ifreq *ifr = (struct ifreq *)data;
497 	int s, error = 0;
498 
499 	s = splnet();
500 
501 	switch (cmd) {
502 	case SIOCSIFADDR:
503 		ifp->if_flags |= IFF_UP;
504 		if (!(ifp->if_flags & IFF_RUNNING))
505 			rge_init(ifp);
506 		break;
507 	case SIOCSIFFLAGS:
508 		if (ifp->if_flags & IFF_UP) {
509 			if (ifp->if_flags & IFF_RUNNING)
510 				error = ENETRESET;
511 			else
512 				rge_init(ifp);
513 		} else {
514 			if (ifp->if_flags & IFF_RUNNING)
515 				rge_stop(ifp);
516 		}
517 		break;
518 	case SIOCGIFMEDIA:
519 	case SIOCSIFMEDIA:
520 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
521 		break;
522 	case SIOCSIFMTU:
523 		if (ifr->ifr_mtu > ifp->if_hardmtu) {
524 			error = EINVAL;
525 			break;
526 		}
527 		ifp->if_mtu = ifr->ifr_mtu;
528 		break;
529 	case SIOCGIFRXR:
530 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
531 		    NULL, RGE_JUMBO_FRAMELEN, &sc->rge_ldata.rge_rx_ring);
532 		break;
533 	default:
534 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
535 	}
536 
537 	if (error == ENETRESET) {
538 		if (ifp->if_flags & IFF_RUNNING)
539 			rge_iff(sc);
540 		error = 0;
541 	}
542 
543 	splx(s);
544 	return (error);
545 }
546 
547 void
548 rge_start(struct ifqueue *ifq)
549 {
550 	struct ifnet *ifp = ifq->ifq_if;
551 	struct rge_softc *sc = ifp->if_softc;
552 	struct mbuf *m;
553 	int free, idx, used;
554 	int queued = 0;
555 
556 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
557 		ifq_purge(ifq);
558 		return;
559 	}
560 
561 	/* Calculate free space. */
562 	idx = sc->rge_ldata.rge_txq_prodidx;
563 	free = sc->rge_ldata.rge_txq_considx;
564 	if (free <= idx)
565 		free += RGE_TX_LIST_CNT;
566 	free -= idx;
567 
568 	for (;;) {
569 		if (RGE_TX_NSEGS >= free + 2) {
570 			ifq_set_oactive(&ifp->if_snd);
571 			break;
572 		}
573 
574 		m = ifq_dequeue(ifq);
575 		if (m == NULL)
576 			break;
577 
578 		used = rge_encap(sc, m, idx);
579 		if (used == 0) {
580 			m_freem(m);
581 			continue;
582 		}
583 
584 		KASSERT(used <= free);
585 		free -= used;
586 
587 #if NBPFILTER > 0
588 		if (ifp->if_bpf)
589 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
590 #endif
591 
592 		idx += used;
593 		if (idx >= RGE_TX_LIST_CNT)
594 			idx -= RGE_TX_LIST_CNT;
595 
596 		queued++;
597 	}
598 
599 	if (queued == 0)
600 		return;
601 
602 	/* Set a timeout in case the chip goes out to lunch. */
603 	ifp->if_timer = 5;
604 
605 	sc->rge_ldata.rge_txq_prodidx = idx;
606 	ifq_serialize(ifq, &sc->sc_task);
607 }
608 
609 void
610 rge_watchdog(struct ifnet *ifp)
611 {
612 	struct rge_softc *sc = ifp->if_softc;
613 
614 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
615 	ifp->if_oerrors++;
616 
617 	rge_init(ifp);
618 }
619 
620 int
621 rge_init(struct ifnet *ifp)
622 {
623 	struct rge_softc *sc = ifp->if_softc;
624 	uint32_t val;
625 	int i;
626 
627 	rge_stop(ifp);
628 
629 	/* Set MAC address. */
630 	rge_set_macaddr(sc, sc->sc_arpcom.ac_enaddr);
631 
632 	/* Set Maximum frame size. */
633 	RGE_WRITE_2(sc, RGE_RXMAXSIZE, RGE_JUMBO_FRAMELEN);
634 
635 	/* Initialize RX and TX descriptors lists. */
636 	rge_rx_list_init(sc);
637 	rge_tx_list_init(sc);
638 
639 	/* Load the addresses of the RX and TX lists into the chip. */
640 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
641 	    RGE_ADDR_LO(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
642 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
643 	    RGE_ADDR_HI(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
644 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
645 	    RGE_ADDR_LO(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
646 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
647 	    RGE_ADDR_HI(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
648 
649 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
650 
651 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
652 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
653 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
654 	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
655 
656 	/* Clear interrupt moderation timer. */
657 	for (i = 0; i < 64; i++)
658 		RGE_WRITE_4(sc, RGE_INTMITI(i), 0);
659 
660 	/* Set the initial RX and TX configurations. */
661 	RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
662 	RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
663 
664 	val = rge_read_csi(sc, 0x70c) & ~0xff000000;
665 	rge_write_csi(sc, 0x70c, val | 0x27000000);
666 
667 	/* Enable hardware optimization function. */
668 	val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
669 	pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
670 
671 	RGE_WRITE_2(sc, 0x0382, 0x221b);
672 	RGE_WRITE_1(sc, 0x4500, 0);
673 	RGE_WRITE_2(sc, 0x4800, 0);
674 	RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
675 
676 	rge_write_mac_ocp(sc, 0xc140, 0xffff);
677 	rge_write_mac_ocp(sc, 0xc142, 0xffff);
678 
679 	val = rge_read_mac_ocp(sc, 0xd3e2) & ~0x0fff;
680 	rge_write_mac_ocp(sc, 0xd3e2, val | 0x03a9);
681 
682 	RGE_MAC_CLRBIT(sc, 0xd3e4, 0x00ff);
683 	RGE_MAC_SETBIT(sc, 0xe860, 0x0080);
684 	RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
685 
686 	val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
687 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
688 		rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
689 	else
690 		rge_write_mac_ocp(sc, 0xe614, val | 0x0200);
691 
692 	RGE_MAC_CLRBIT(sc, 0xe63e, 0x0c00);
693 
694 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
695 		val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0030;
696 		rge_write_mac_ocp(sc, 0xe63e, val | 0x0020);
697 	} else
698 		RGE_MAC_CLRBIT(sc, 0xe63e, 0x0030);
699 
700 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
701 
702 	val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x00ff;
703 	rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
704 
705 	val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
706 	rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
707 
708 	val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
709 	rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
710 
711 	RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
712 
713 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_TX_10M_PS_EN);
714 
715 	RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
716 
717 	val = rge_read_mac_ocp(sc, 0xea1c) & ~0x0003;
718 	rge_write_mac_ocp(sc, 0xea1c, val | 0x0001);
719 
720 	val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
721 	rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
722 
723 	RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
724 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
725 
726 	val = rge_read_mac_ocp(sc, 0xc0ac) & ~0x0080;
727 	rge_write_mac_ocp(sc, 0xc0ac, val | 0x1f00);
728 
729 	val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
730 	rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
731 
732 	val = rge_read_mac_ocp(sc, 0xe84c) & ~0x0040;
733 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
734 		rge_write_mac_ocp(sc, 0xe84c, 0x00c0);
735 	else
736 		rge_write_mac_ocp(sc, 0xe84c, 0x0080);
737 
738 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_PFM_EN);
739 
740 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
741 		RGE_SETBIT_1(sc, RGE_MCUCMD, 0x01);
742 
743 	/* Disable EEE plus. */
744 	RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
745 
746 	RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
747 
748 	RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
749 	DELAY(1);
750 	RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
751 
752 	RGE_CLRBIT_4(sc, 0x1880, 0x0030);
753 
754 	rge_write_mac_ocp(sc, 0xe098, 0xc302);
755 
756 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
757 		RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
758 
759 	RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
760 
761 	for (i = 0; i < 10; i++) {
762 		if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
763 			break;
764 		DELAY(1000);
765 	}
766 
767 	/* Disable RXDV gate. */
768 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
769 	DELAY(2000);
770 
771 	rge_ifmedia_upd(ifp);
772 
773 	/* Enable transmit and receive. */
774 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
775 
776 	/* Program promiscuous mode and multicast filters. */
777 	rge_iff(sc);
778 
779 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
780 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
781 
782 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
783 
784 	/* Enable interrupts. */
785 	rge_setup_intr(sc, RGE_IMTYPE_SIM);
786 
787 	ifp->if_flags |= IFF_RUNNING;
788 	ifq_clr_oactive(&ifp->if_snd);
789 
790 	timeout_add_sec(&sc->sc_timeout, 1);
791 
792 	return (0);
793 }
794 
795 /*
796  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
797  */
798 void
799 rge_stop(struct ifnet *ifp)
800 {
801 	struct rge_softc *sc = ifp->if_softc;
802 	int i;
803 
804 	timeout_del(&sc->sc_timeout);
805 
806 	ifp->if_timer = 0;
807 	ifp->if_flags &= ~IFF_RUNNING;
808 	sc->rge_timerintr = 0;
809 
810 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
811 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
812 	    RGE_RXCFG_ERRPKT);
813 
814 	RGE_WRITE_4(sc, RGE_IMR, 0);
815 
816 	/* Clear timer interrupts. */
817 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
818 	RGE_WRITE_4(sc, RGE_TIMERINT1, 0);
819 	RGE_WRITE_4(sc, RGE_TIMERINT2, 0);
820 	RGE_WRITE_4(sc, RGE_TIMERINT3, 0);
821 
822 	rge_reset(sc);
823 
824 	intr_barrier(sc->sc_ih);
825 	ifq_barrier(&ifp->if_snd);
826 	ifq_clr_oactive(&ifp->if_snd);
827 
828 	if (sc->rge_head != NULL) {
829 		m_freem(sc->rge_head);
830 		sc->rge_head = sc->rge_tail = NULL;
831 	}
832 
833 	/* Free the TX list buffers. */
834 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
835 		if (sc->rge_ldata.rge_txq[i].txq_mbuf != NULL) {
836 			bus_dmamap_unload(sc->sc_dmat,
837 			    sc->rge_ldata.rge_txq[i].txq_dmamap);
838 			m_freem(sc->rge_ldata.rge_txq[i].txq_mbuf);
839 			sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
840 		}
841 	}
842 
843 	/* Free the RX list buffers. */
844 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
845 		if (sc->rge_ldata.rge_rxq[i].rxq_mbuf != NULL) {
846 			bus_dmamap_unload(sc->sc_dmat,
847 			    sc->rge_ldata.rge_rxq[i].rxq_dmamap);
848 			m_freem(sc->rge_ldata.rge_rxq[i].rxq_mbuf);
849 			sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
850 		}
851 	}
852 }
853 
854 /*
855  * Set media options.
856  */
857 int
858 rge_ifmedia_upd(struct ifnet *ifp)
859 {
860 	struct rge_softc *sc = ifp->if_softc;
861 	struct ifmedia *ifm = &sc->sc_media;
862 	int anar, gig, val;
863 
864 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
865 		return (EINVAL);
866 
867 	/* Disable Gigabit Lite. */
868 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
869 	RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
870 
871 	val = rge_read_phy_ocp(sc, 0xa5d4);
872 	val &= ~RGE_ADV_2500TFDX;
873 
874 	anar = gig = 0;
875 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
876 	case IFM_AUTO:
877 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
878 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
879 		val |= RGE_ADV_2500TFDX;
880 		break;
881 	case IFM_2500_T:
882 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
883 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
884 		val |= RGE_ADV_2500TFDX;
885 		ifp->if_baudrate = IF_Mbps(2500);
886 		break;
887 	case IFM_1000_T:
888 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
889 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
890 		ifp->if_baudrate = IF_Gbps(1);
891 		break;
892 	case IFM_100_TX:
893 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
894 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
895 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
896 		    ANAR_TX | ANAR_TX_FD | ANAR_10_FD | ANAR_10 :
897 		    ANAR_TX | ANAR_10_FD | ANAR_10;
898 		ifp->if_baudrate = IF_Mbps(100);
899 		break;
900 	case IFM_10_T:
901 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
902 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
903 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
904 		    ANAR_10_FD | ANAR_10 : ANAR_10;
905 		ifp->if_baudrate = IF_Mbps(10);
906 		break;
907 	default:
908 		printf("%s: unsupported media type\n", sc->sc_dev.dv_xname);
909 		return (EINVAL);
910 	}
911 
912 	rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
913 	rge_write_phy(sc, 0, MII_100T2CR, gig);
914 	rge_write_phy_ocp(sc, 0xa5d4, val);
915 	rge_write_phy(sc, 0, MII_BMCR, BMCR_RESET | BMCR_AUTOEN |
916 	    BMCR_STARTNEG);
917 
918 	return (0);
919 }
920 
921 /*
922  * Report current media status.
923  */
924 void
925 rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
926 {
927 	struct rge_softc *sc = ifp->if_softc;
928 	uint16_t status = 0;
929 
930 	ifmr->ifm_status = IFM_AVALID;
931 	ifmr->ifm_active = IFM_ETHER;
932 
933 	if (rge_get_link_status(sc)) {
934 		ifmr->ifm_status |= IFM_ACTIVE;
935 
936 		status = RGE_READ_2(sc, RGE_PHYSTAT);
937 		if ((status & RGE_PHYSTAT_FDX) ||
938 		    (status & RGE_PHYSTAT_2500MBPS))
939 			ifmr->ifm_active |= IFM_FDX;
940 		else
941 			ifmr->ifm_active |= IFM_HDX;
942 
943 		if (status & RGE_PHYSTAT_10MBPS)
944 			ifmr->ifm_active |= IFM_10_T;
945 		else if (status & RGE_PHYSTAT_100MBPS)
946 			ifmr->ifm_active |= IFM_100_TX;
947 		else if (status & RGE_PHYSTAT_1000MBPS)
948 			ifmr->ifm_active |= IFM_1000_T;
949 		else if (status & RGE_PHYSTAT_2500MBPS)
950 			ifmr->ifm_active |= IFM_2500_T;
951 	}
952 }
953 
954 /*
955  * Allocate memory for RX/TX rings.
956  */
957 int
958 rge_allocmem(struct rge_softc *sc)
959 {
960 	int error, i;
961 
962 	/* Allocate DMA'able memory for the TX ring. */
963 	error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
964 	    RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &sc->rge_ldata.rge_tx_list_map);
965 	if (error) {
966 		printf("%s: can't create TX list map\n", sc->sc_dev.dv_xname);
967 		return (error);
968 	}
969 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
970 	    &sc->rge_ldata.rge_tx_listseg, 1, &sc->rge_ldata.rge_tx_listnseg,
971 	    BUS_DMA_NOWAIT| BUS_DMA_ZERO);
972 	if (error) {
973 		printf("%s: can't alloc TX list\n", sc->sc_dev.dv_xname);
974 		return (error);
975 	}
976 
977 	/* Load the map for the TX ring. */
978 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
979 	    sc->rge_ldata.rge_tx_listnseg, RGE_TX_LIST_SZ,
980 	    (caddr_t *)&sc->rge_ldata.rge_tx_list,
981 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
982 	if (error) {
983 		printf("%s: can't map TX dma buffers\n", sc->sc_dev.dv_xname);
984 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
985 		    sc->rge_ldata.rge_tx_listnseg);
986 		return (error);
987 	}
988 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
989 	    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
990 	if (error) {
991 		printf("%s: can't load TX dma map\n", sc->sc_dev.dv_xname);
992 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
993 		bus_dmamem_unmap(sc->sc_dmat,
994 		    (caddr_t)sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ);
995 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
996 		    sc->rge_ldata.rge_tx_listnseg);
997 		return (error);
998 	}
999 
1000 	/* Create DMA maps for TX buffers. */
1001 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
1002 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
1003 		    RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, 0,
1004 		    &sc->rge_ldata.rge_txq[i].txq_dmamap);
1005 		if (error) {
1006 			printf("%s: can't create DMA map for TX\n",
1007 			    sc->sc_dev.dv_xname);
1008 			return (error);
1009 		}
1010 	}
1011 
1012 	/* Allocate DMA'able memory for the RX ring. */
1013 	error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
1014 	    RGE_RX_LIST_SZ, 0, 0, &sc->rge_ldata.rge_rx_list_map);
1015 	if (error) {
1016 		printf("%s: can't create RX list map\n", sc->sc_dev.dv_xname);
1017 		return (error);
1018 	}
1019 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
1020 	    &sc->rge_ldata.rge_rx_listseg, 1, &sc->rge_ldata.rge_rx_listnseg,
1021 	    BUS_DMA_NOWAIT| BUS_DMA_ZERO);
1022 	if (error) {
1023 		printf("%s: can't alloc RX list\n", sc->sc_dev.dv_xname);
1024 		return (error);
1025 	}
1026 
1027 	/* Load the map for the RX ring. */
1028 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1029 	    sc->rge_ldata.rge_rx_listnseg, RGE_RX_LIST_SZ,
1030 	    (caddr_t *)&sc->rge_ldata.rge_rx_list,
1031 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1032 	if (error) {
1033 		printf("%s: can't map RX dma buffers\n", sc->sc_dev.dv_xname);
1034 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1035 		    sc->rge_ldata.rge_rx_listnseg);
1036 		return (error);
1037 	}
1038 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1039 	    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1040 	if (error) {
1041 		printf("%s: can't load RX dma map\n", sc->sc_dev.dv_xname);
1042 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
1043 		bus_dmamem_unmap(sc->sc_dmat,
1044 		    (caddr_t)sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ);
1045 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1046 		    sc->rge_ldata.rge_rx_listnseg);
1047 		return (error);
1048 	}
1049 
1050 	/* Create DMA maps for RX buffers. */
1051 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1052 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
1053 		    RGE_JUMBO_FRAMELEN, 0, 0,
1054 		    &sc->rge_ldata.rge_rxq[i].rxq_dmamap);
1055 		if (error) {
1056 			printf("%s: can't create DMA map for RX\n",
1057 			    sc->sc_dev.dv_xname);
1058 			return (error);
1059 		}
1060 	}
1061 
1062 	return (error);
1063 }
1064 
1065 /*
1066  * Initialize the RX descriptor and attach an mbuf cluster.
1067  */
1068 int
1069 rge_newbuf(struct rge_softc *sc)
1070 {
1071 	struct mbuf *m;
1072 	struct rge_rx_desc *r;
1073 	struct rge_rxq *rxq;
1074 	bus_dmamap_t rxmap;
1075 	int idx;
1076 
1077 	m = MCLGETL(NULL, M_DONTWAIT, RGE_JUMBO_FRAMELEN);
1078 	if (m == NULL)
1079 		return (ENOBUFS);
1080 
1081 	m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
1082 
1083 	idx = sc->rge_ldata.rge_rxq_prodidx;
1084 	rxq = &sc->rge_ldata.rge_rxq[idx];
1085 	rxmap = rxq->rxq_dmamap;
1086 
1087 	if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT)) {
1088 		m_freem(m);
1089 		return (ENOBUFS);
1090 	}
1091 
1092 	bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1093 	    BUS_DMASYNC_PREREAD);
1094 
1095 	/* Map the segments into RX descriptors. */
1096 	r = &sc->rge_ldata.rge_rx_list[idx];
1097 
1098 	if (RGE_OWN(r)) {
1099 		printf("%s: tried to map busy RX descriptor\n",
1100 		    sc->sc_dev.dv_xname);
1101 		m_freem(m);
1102 		return (ENOBUFS);
1103 	}
1104 
1105 	rxq->rxq_mbuf = m;
1106 
1107 	r->rge_extsts = 0;
1108 	r->rge_addrlo = htole32(RGE_ADDR_LO(rxmap->dm_segs[0].ds_addr));
1109 	r->rge_addrhi = htole32(RGE_ADDR_HI(rxmap->dm_segs[0].ds_addr));
1110 
1111 	r->rge_cmdsts = htole32(rxmap->dm_segs[0].ds_len);
1112 	if (idx == RGE_RX_LIST_CNT - 1)
1113 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1114 
1115 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1116 
1117 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1118 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1119 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1120 
1121 	sc->rge_ldata.rge_rxq_prodidx = RGE_NEXT_RX_DESC(idx);
1122 
1123 	return (0);
1124 }
1125 
1126 void
1127 rge_discard_rxbuf(struct rge_softc *sc, int idx)
1128 {
1129 	struct rge_rx_desc *r;
1130 
1131 	r = &sc->rge_ldata.rge_rx_list[idx];
1132 
1133 	r->rge_cmdsts = htole32(RGE_JUMBO_FRAMELEN);
1134 	r->rge_extsts = 0;
1135 	if (idx == RGE_RX_LIST_CNT - 1)
1136 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1137 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1138 
1139 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1140 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1141 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1142 }
1143 
1144 void
1145 rge_rx_list_init(struct rge_softc *sc)
1146 {
1147 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
1148 
1149 	sc->rge_ldata.rge_rxq_prodidx = sc->rge_ldata.rge_rxq_considx = 0;
1150 	sc->rge_head = sc->rge_tail = NULL;
1151 
1152 	if_rxr_init(&sc->rge_ldata.rge_rx_ring, 2, RGE_RX_LIST_CNT - 1);
1153 	rge_fill_rx_ring(sc);
1154 }
1155 
1156 void
1157 rge_fill_rx_ring(struct rge_softc *sc)
1158 {
1159 	struct if_rxring *rxr = &sc->rge_ldata.rge_rx_ring;
1160 	int slots;
1161 
1162 	for (slots = if_rxr_get(rxr, RGE_RX_LIST_CNT); slots > 0; slots--) {
1163 		if (rge_newbuf(sc) == ENOBUFS)
1164 			break;
1165 	}
1166 	if_rxr_put(rxr, slots);
1167 }
1168 
1169 void
1170 rge_tx_list_init(struct rge_softc *sc)
1171 {
1172 	int i;
1173 
1174 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
1175 
1176 	for (i = 0; i < RGE_TX_LIST_CNT; i++)
1177 		sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
1178 
1179 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 0,
1180 	    sc->rge_ldata.rge_tx_list_map->dm_mapsize,
1181 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1182 
1183 	sc->rge_ldata.rge_txq_prodidx = sc->rge_ldata.rge_txq_considx = 0;
1184 }
1185 
1186 int
1187 rge_rxeof(struct rge_softc *sc)
1188 {
1189 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1190 	struct mbuf *m;
1191 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1192 	struct if_rxring *rxr = &sc->rge_ldata.rge_rx_ring;
1193 	struct rge_rx_desc *cur_rx;
1194 	struct rge_rxq *rxq;
1195 	uint32_t rxstat, extsts;
1196 	int i, total_len, rx = 0;
1197 
1198 	for (i = sc->rge_ldata.rge_rxq_considx; if_rxr_inuse(rxr) > 0;
1199 	    i = RGE_NEXT_RX_DESC(i)) {
1200 		/* Invalidate the descriptor memory. */
1201 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1202 		    i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1203 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1204 
1205 		cur_rx = &sc->rge_ldata.rge_rx_list[i];
1206 
1207 		if (RGE_OWN(cur_rx))
1208 			break;
1209 
1210 		rxstat = letoh32(cur_rx->rge_cmdsts);
1211 		extsts = letoh32(cur_rx->rge_extsts);
1212 
1213 		total_len = RGE_RXBYTES(cur_rx);
1214 		rxq = &sc->rge_ldata.rge_rxq[i];
1215 		m = rxq->rxq_mbuf;
1216 		rxq->rxq_mbuf = NULL;
1217 		if_rxr_put(rxr, 1);
1218 		rx = 1;
1219 
1220 		/* Invalidate the RX mbuf and unload its map. */
1221 		bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
1222 		    rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1223 		bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
1224 
1225 		if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
1226 		    (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
1227 			rge_discard_rxbuf(sc, i);
1228 			continue;
1229 		}
1230 
1231 		if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
1232 			ifp->if_ierrors++;
1233 			/*
1234 			 * If this is part of a multi-fragment packet,
1235 			 * discard all the pieces.
1236 			 */
1237 			 if (sc->rge_head != NULL) {
1238 				m_freem(sc->rge_head);
1239 				sc->rge_head = sc->rge_tail = NULL;
1240 			}
1241 			rge_discard_rxbuf(sc, i);
1242 			continue;
1243 		}
1244 
1245 		if (sc->rge_head != NULL) {
1246 			m->m_len = total_len;
1247 			/*
1248 			 * Special case: if there's 4 bytes or less
1249 			 * in this buffer, the mbuf can be discarded:
1250 			 * the last 4 bytes is the CRC, which we don't
1251 			 * care about anyway.
1252 			 */
1253 			if (m->m_len <= ETHER_CRC_LEN) {
1254 				sc->rge_tail->m_len -=
1255 				    (ETHER_CRC_LEN - m->m_len);
1256 				m_freem(m);
1257 			} else {
1258 				m->m_len -= ETHER_CRC_LEN;
1259 				m->m_flags &= ~M_PKTHDR;
1260 				sc->rge_tail->m_next = m;
1261 			}
1262 			m = sc->rge_head;
1263 			sc->rge_head = sc->rge_tail = NULL;
1264 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1265 		} else
1266 			m->m_pkthdr.len = m->m_len =
1267 			    (total_len - ETHER_CRC_LEN);
1268 
1269 		/* Check IP header checksum. */
1270 		if (!(rxstat & RGE_RDCMDSTS_IPCSUMERR) &&
1271 		    (extsts & RGE_RDEXTSTS_IPV4))
1272 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1273 
1274 		/* Check TCP/UDP checksum. */
1275 		if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
1276 		    (((rxstat & RGE_RDCMDSTS_TCPPKT) &&
1277 		    !(rxstat & RGE_RDCMDSTS_TCPCSUMERR)) ||
1278 		    ((rxstat & RGE_RDCMDSTS_UDPPKT) &&
1279 		    !(rxstat & RGE_RDCMDSTS_UDPCSUMERR))))
1280 			m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
1281 			    M_UDP_CSUM_IN_OK;
1282 
1283 #if NVLAN > 0
1284 		if (extsts & RGE_RDEXTSTS_VTAG) {
1285 			m->m_pkthdr.ether_vtag =
1286 			    ntohs(extsts & RGE_RDEXTSTS_VLAN_MASK);
1287 			m->m_flags |= M_VLANTAG;
1288 		}
1289 #endif
1290 
1291 		ml_enqueue(&ml, m);
1292 	}
1293 
1294 	if (ifiq_input(&ifp->if_rcv, &ml))
1295 		if_rxr_livelocked(rxr);
1296 
1297 	sc->rge_ldata.rge_rxq_considx = i;
1298 	rge_fill_rx_ring(sc);
1299 
1300 	if_input(ifp, &ml);
1301 
1302 	return (rx);
1303 }
1304 
1305 int
1306 rge_txeof(struct rge_softc *sc)
1307 {
1308 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1309 	struct rge_txq *txq;
1310 	uint32_t txstat;
1311 	int cons, idx, prod;
1312 	int free = 0;
1313 
1314 	prod = sc->rge_ldata.rge_txq_prodidx;
1315 	cons = sc->rge_ldata.rge_txq_considx;
1316 
1317 	while (prod != cons) {
1318 		txq = &sc->rge_ldata.rge_txq[cons];
1319 		idx = txq->txq_descidx;
1320 
1321 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1322 		    idx * sizeof(struct rge_tx_desc),
1323 		    sizeof(struct rge_tx_desc),
1324 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1325 
1326 		txstat = letoh32(sc->rge_ldata.rge_tx_list[idx].rge_cmdsts);
1327 
1328 		if (txstat & RGE_TDCMDSTS_OWN) {
1329 			free = 2;
1330 			break;
1331 		}
1332 
1333 		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
1334 		    txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1335 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
1336 		m_freem(txq->txq_mbuf);
1337 		txq->txq_mbuf = NULL;
1338 
1339 		if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
1340 			ifp->if_collisions++;
1341 		if (txstat & RGE_TDCMDSTS_TXERR)
1342 			ifp->if_oerrors++;
1343 
1344 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1345 		    idx * sizeof(struct rge_tx_desc),
1346 		    sizeof(struct rge_tx_desc),
1347 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1348 
1349 		cons = RGE_NEXT_TX_DESC(idx);
1350 		free = 1;
1351 	}
1352 
1353 	if (free == 0)
1354 		return (0);
1355 
1356 	sc->rge_ldata.rge_txq_considx = cons;
1357 
1358 	if (ifq_is_oactive(&ifp->if_snd))
1359 		ifq_restart(&ifp->if_snd);
1360 	else if (free == 2)
1361 		ifq_serialize(&ifp->if_snd, &sc->sc_task);
1362 	else
1363 		ifp->if_timer = 0;
1364 
1365 	return (1);
1366 }
1367 
1368 void
1369 rge_reset(struct rge_softc *sc)
1370 {
1371 	int i;
1372 
1373 	/* Enable RXDV gate. */
1374 	RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
1375 	DELAY(2000);
1376 
1377 	for (i = 0; i < 3000; i++) {
1378 		DELAY(50);
1379 		if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
1380 		    RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
1381 		    RGE_MCUCMD_TXFIFO_EMPTY))
1382 			break;
1383 	}
1384 	if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5) {
1385 		for (i = 0; i < 3000; i++) {
1386 			DELAY(50);
1387 			if ((RGE_READ_2(sc, RGE_IM) & 0x0103) == 0x0103)
1388 				break;
1389 		}
1390 	}
1391 
1392 	DELAY(2000);
1393 
1394 	/* Soft reset. */
1395 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
1396 
1397 	for (i = 0; i < RGE_TIMEOUT; i++) {
1398 		DELAY(100);
1399 		if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
1400 			break;
1401 	}
1402 	if (i == RGE_TIMEOUT)
1403 		printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
1404 }
1405 
1406 void
1407 rge_iff(struct rge_softc *sc)
1408 {
1409 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1410 	struct arpcom *ac = &sc->sc_arpcom;
1411 	struct ether_multi *enm;
1412 	struct ether_multistep step;
1413 	uint32_t hashes[2];
1414 	uint32_t rxfilt;
1415 	int h = 0;
1416 
1417 	rxfilt = RGE_READ_4(sc, RGE_RXCFG);
1418 	rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
1419 	ifp->if_flags &= ~IFF_ALLMULTI;
1420 
1421 	/*
1422 	 * Always accept frames destined to our station address.
1423 	 * Always accept broadcast frames.
1424 	 */
1425 	rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
1426 
1427 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1428 		ifp->if_flags |= IFF_ALLMULTI;
1429 		rxfilt |= RGE_RXCFG_MULTI;
1430 		if (ifp->if_flags & IFF_PROMISC)
1431 			rxfilt |= RGE_RXCFG_ALLPHYS;
1432 		hashes[0] = hashes[1] = 0xffffffff;
1433 	} else {
1434 		rxfilt |= RGE_RXCFG_MULTI;
1435 		/* Program new filter. */
1436 		memset(hashes, 0, sizeof(hashes));
1437 
1438 		ETHER_FIRST_MULTI(step, ac, enm);
1439 		while (enm != NULL) {
1440 			h = ether_crc32_be(enm->enm_addrlo,
1441 			    ETHER_ADDR_LEN) >> 26;
1442 
1443 			if (h < 32)
1444 				hashes[0] |= (1 << h);
1445 			else
1446 				hashes[1] |= (1 << (h - 32));
1447 
1448 			ETHER_NEXT_MULTI(step, enm);
1449 		}
1450 	}
1451 
1452 	RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
1453 	RGE_WRITE_4(sc, RGE_MAR0, swap32(hashes[1]));
1454 	RGE_WRITE_4(sc, RGE_MAR4, swap32(hashes[0]));
1455 }
1456 
1457 void
1458 rge_set_phy_power(struct rge_softc *sc, int on)
1459 {
1460 	int i;
1461 
1462 	if (on) {
1463 		RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
1464 
1465 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
1466 
1467 		for (i = 0; i < RGE_TIMEOUT; i++) {
1468 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 3)
1469 				break;
1470 			DELAY(1000);
1471 		}
1472 	} else {
1473 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
1474 		RGE_CLRBIT_1(sc, RGE_PMCH, 0x80);
1475 		RGE_CLRBIT_1(sc, RGE_PPSW, 0x40);
1476 	}
1477 }
1478 
1479 void
1480 rge_phy_config(struct rge_softc *sc)
1481 {
1482 	/* Read microcode version. */
1483 	rge_write_phy_ocp(sc, 0xa436, 0x801e);
1484 	sc->rge_mcodever = rge_read_phy_ocp(sc, 0xa438);
1485 
1486 	switch (sc->rge_type) {
1487 	case MAC_CFG2:
1488 		rge_phy_config_mac_cfg2(sc);
1489 		break;
1490 	case MAC_CFG3:
1491 		rge_phy_config_mac_cfg3(sc);
1492 		break;
1493 	case MAC_CFG4:
1494 		rge_phy_config_mac_cfg4(sc);
1495 		break;
1496 	case MAC_CFG5:
1497 		rge_phy_config_mac_cfg5(sc);
1498 		break;
1499 	default:
1500 		break;	/* Can't happen. */
1501 	}
1502 
1503 	rge_write_phy(sc, 0x0a5b, 0x12,
1504 	    rge_read_phy(sc, 0x0a5b, 0x12) & ~0x8000);
1505 
1506 	/* Disable EEE. */
1507 	RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
1508 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1509 		RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
1510 		RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
1511 	}
1512 	RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
1513 	RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
1514 	RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
1515 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
1516 	RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
1517 
1518 	rge_patch_phy_mcu(sc, 1);
1519 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
1520 	RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
1521 	RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
1522 	rge_patch_phy_mcu(sc, 0);
1523 }
1524 
1525 void
1526 rge_phy_config_mac_cfg2(struct rge_softc *sc)
1527 {
1528 	uint16_t val;
1529 	int i;
1530 
1531 	for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++)
1532 		rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
1533 		    rtl8125_mac_cfg2_ephy[i].val);
1534 
1535 	rge_phy_config_mcu(sc, RGE_MAC_CFG2_MCODE_VER);
1536 
1537 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
1538 	rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
1539 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1540 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1541 	rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
1542 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
1543 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1544 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
1545 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
1546 	rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
1547 	RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
1548 	RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
1549 	RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
1550 
1551 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1552 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1553 	rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
1554 	rge_write_phy_ocp(sc, 0xa436, 0x80eb);
1555 	val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
1556 	rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
1557 	rge_write_phy_ocp(sc, 0xa436, 0x80f8);
1558 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1559 	rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
1560 	rge_write_phy_ocp(sc, 0xa436, 0x80f1);
1561 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1562 	rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
1563 	rge_write_phy_ocp(sc, 0xa436, 0x80fe);
1564 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1565 	rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
1566 	rge_write_phy_ocp(sc, 0xa436, 0x8102);
1567 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1568 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1569 	rge_write_phy_ocp(sc, 0xa436, 0x8105);
1570 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1571 	rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
1572 	rge_write_phy_ocp(sc, 0xa436, 0x8100);
1573 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1574 	rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
1575 	rge_write_phy_ocp(sc, 0xa436, 0x8104);
1576 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1577 	rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
1578 	rge_write_phy_ocp(sc, 0xa436, 0x8106);
1579 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1580 	rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
1581 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1582 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1583 	rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
1584 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
1585 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1586 	rge_write_phy_ocp(sc, 0xa436, 0x80e1);
1587 	RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
1588 	val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
1589 	rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
1590 	rge_write_phy_ocp(sc, 0xa436, 0x819f);
1591 	rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
1592 	rge_write_phy_ocp(sc, 0xbc34, 0x5555);
1593 	val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
1594 	rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
1595 	RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
1596 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1597 }
1598 
1599 void
1600 rge_phy_config_mac_cfg3(struct rge_softc *sc)
1601 {
1602 	uint16_t val;
1603 	int i;
1604 	static const uint16_t mac_cfg3_a438_value[] =
1605 	    { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
1606 	      0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
1607 
1608 	static const uint16_t mac_cfg3_b88e_value[] =
1609 	    { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
1610 	      0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
1611 	      0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
1612 	      0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
1613 	      0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
1614 	      0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
1615 
1616 	for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
1617 		rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
1618 		    rtl8125_mac_cfg3_ephy[i].val);
1619 
1620 	val = rge_read_ephy(sc, 0x002a) & ~0x7000;
1621 	rge_write_ephy(sc, 0x002a, val | 0x3000);
1622 	RGE_EPHY_CLRBIT(sc, 0x0019, 0x0040);
1623 	RGE_EPHY_SETBIT(sc, 0x001b, 0x0e00);
1624 	RGE_EPHY_CLRBIT(sc, 0x001b, 0x7000);
1625 	rge_write_ephy(sc, 0x0002, 0x6042);
1626 	rge_write_ephy(sc, 0x0006, 0x0014);
1627 	val = rge_read_ephy(sc, 0x006a) & ~0x7000;
1628 	rge_write_ephy(sc, 0x006a, val | 0x3000);
1629 	RGE_EPHY_CLRBIT(sc, 0x0059, 0x0040);
1630 	RGE_EPHY_SETBIT(sc, 0x005b, 0x0e00);
1631 	RGE_EPHY_CLRBIT(sc, 0x005b, 0x7000);
1632 	rge_write_ephy(sc, 0x0042, 0x6042);
1633 	rge_write_ephy(sc, 0x0046, 0x0014);
1634 
1635 	rge_phy_config_mcu(sc, RGE_MAC_CFG3_MCODE_VER);
1636 
1637 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1638 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1639 	rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
1640 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
1641 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1642 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
1643 	RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
1644 	val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
1645 	rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
1646 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
1647 	rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
1648 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
1649 	rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
1650 	RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
1651 	RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
1652 	val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
1653 	rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
1654 	rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
1655 	rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
1656 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
1657 	rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
1658 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1659 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1660 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0500);
1661 	rge_write_phy_ocp(sc, 0xb87c, 0x8159);
1662 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1663 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
1664 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, RGE_JUMBO_MTU + ETHER_HDR_LEN +
1665 	    32);
1666 	rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
1667 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1668 	rge_write_phy_ocp(sc, 0xb87c, 0x809c);
1669 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1670 
1671 	rge_write_phy_ocp(sc, 0xa436, 0x81b3);
1672 	for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
1673 		rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
1674 	for (i = 0; i < 26; i++)
1675 		rge_write_phy_ocp(sc, 0xa438, 0);
1676 	rge_write_phy_ocp(sc, 0xa436, 0x8257);
1677 	rge_write_phy_ocp(sc, 0xa438, 0x020f);
1678 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1679 	rge_write_phy_ocp(sc, 0xa438, 0x7843);
1680 
1681 	rge_patch_phy_mcu(sc, 1);
1682 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1683 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1684 	for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
1685 		rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
1686 		rge_write_phy_ocp(sc, 0xb890, mac_cfg3_b88e_value[i + 1]);
1687 	}
1688 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1689 	rge_patch_phy_mcu(sc, 0);
1690 
1691 	RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
1692 	rge_write_phy_ocp(sc, 0xa436, 0x81a2);
1693 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1694 	val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
1695 	rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
1696 	RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
1697 	RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
1698 	RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
1699 	RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
1700 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1701 }
1702 
1703 void
1704 rge_phy_config_mac_cfg4(struct rge_softc *sc)
1705 {
1706 	uint16_t val;
1707 	int i;
1708 	static const uint16_t mac_cfg4_b87c_value[] =
1709 	    { 0x8013, 0x0700, 0x8fb9, 0x2801, 0x8fba, 0x0100, 0x8fbc, 0x1900,
1710 	      0x8fbe, 0xe100, 0x8fc0, 0x0800, 0x8fc2, 0xe500, 0x8fc4, 0x0f00,
1711 	      0x8fc6, 0xf100, 0x8fc8, 0x0400, 0x8fca, 0xf300, 0x8fcc, 0xfd00,
1712 	      0x8fce, 0xff00, 0x8fd0, 0xfb00, 0x8fd2, 0x0100, 0x8fd4, 0xf400,
1713 	      0x8fd6, 0xff00, 0x8fd8, 0xf600, 0x813d, 0x390e, 0x814f, 0x790e,
1714 	      0x80b0, 0x0f31 };
1715 
1716 	for (i = 0; i < nitems(rtl8125_mac_cfg4_ephy); i++)
1717 		rge_write_ephy(sc, rtl8125_mac_cfg4_ephy[i].reg,
1718 		    rtl8125_mac_cfg4_ephy[i].val);
1719 
1720 	rge_write_phy_ocp(sc, 0xbf86, 0x9000);
1721 	RGE_PHY_SETBIT(sc, 0xc402, 0x0400);
1722 	RGE_PHY_CLRBIT(sc, 0xc402, 0x0400);
1723 	rge_write_phy_ocp(sc, 0xbd86, 0x1010);
1724 	rge_write_phy_ocp(sc, 0xbd88, 0x1010);
1725 	val = rge_read_phy_ocp(sc, 0xbd4e) & ~0x0c00;
1726 	rge_write_phy_ocp(sc, 0xbd4e, val | 0x0800);
1727 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1728 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0700);
1729 
1730 	rge_phy_config_mcu(sc, RGE_MAC_CFG4_MCODE_VER);
1731 
1732 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1733 	RGE_PHY_SETBIT(sc, 0xbc08, 0x000c);
1734 	rge_write_phy_ocp(sc, 0xa436, 0x8fff);
1735 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1736 	rge_write_phy_ocp(sc, 0xa438, val | 0x0400);
1737 	for (i = 0; i < 6; i++) {
1738 		rge_write_phy_ocp(sc, 0xb87c, 0x8560 + i * 2);
1739 		if (i < 3)
1740 			rge_write_phy_ocp(sc, 0xb87e, 0x19cc);
1741 		else
1742 			rge_write_phy_ocp(sc, 0xb87e, 0x147d);
1743 	}
1744 	rge_write_phy_ocp(sc, 0xb87c, 0x8ffe);
1745 	rge_write_phy_ocp(sc, 0xb87e, 0x0907);
1746 	val = rge_read_phy_ocp(sc, 0xacda) & ~0xff00;
1747 	rge_write_phy_ocp(sc, 0xacda, val | 0xff00);
1748 	val = rge_read_phy_ocp(sc, 0xacde) & ~0xf000;
1749 	rge_write_phy_ocp(sc, 0xacde, val | 0xf000);
1750 	rge_write_phy_ocp(sc, 0xb87c, 0x80d6);
1751 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1752 	rge_write_phy_ocp(sc, 0xb87c, 0x80F2);
1753 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1754 	rge_write_phy_ocp(sc, 0xb87c, 0x80f4);
1755 	rge_write_phy_ocp(sc, 0xb87e, 0x6077);
1756 	rge_write_phy_ocp(sc, 0xb506, 0x01e7);
1757 	rge_write_phy_ocp(sc, 0xac8c, 0x0ffc);
1758 	rge_write_phy_ocp(sc, 0xac46, 0xb7b4);
1759 	rge_write_phy_ocp(sc, 0xac50, 0x0fbc);
1760 	rge_write_phy_ocp(sc, 0xac3c, 0x9240);
1761 	rge_write_phy_ocp(sc, 0xac4E, 0x0db4);
1762 	rge_write_phy_ocp(sc, 0xacc6, 0x0707);
1763 	rge_write_phy_ocp(sc, 0xacc8, 0xa0d3);
1764 	rge_write_phy_ocp(sc, 0xad08, 0x0007);
1765 	for (i = 0; i < nitems(mac_cfg4_b87c_value); i += 2) {
1766 		rge_write_phy_ocp(sc, 0xb87c, mac_cfg4_b87c_value[i]);
1767 		rge_write_phy_ocp(sc, 0xb87e, mac_cfg4_b87c_value[i + 1]);
1768 	}
1769 	RGE_PHY_SETBIT(sc, 0xbf4c, 0x0002);
1770 	RGE_PHY_SETBIT(sc, 0xbcca, 0x0300);
1771 	rge_write_phy_ocp(sc, 0xb87c, 0x8141);
1772 	rge_write_phy_ocp(sc, 0xb87e, 0x320e);
1773 	rge_write_phy_ocp(sc, 0xb87c, 0x8153);
1774 	rge_write_phy_ocp(sc, 0xb87e, 0x720e);
1775 	RGE_PHY_CLRBIT(sc, 0xa432, 0x0040);
1776 	rge_write_phy_ocp(sc, 0xb87c, 0x8529);
1777 	rge_write_phy_ocp(sc, 0xb87e, 0x050e);
1778 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, RGE_JUMBO_MTU + ETHER_HDR_LEN +
1779 	    32);
1780 	rge_write_phy_ocp(sc, 0xa436, 0x816c);
1781 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1782 	rge_write_phy_ocp(sc, 0xa436, 0x8170);
1783 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1784 	rge_write_phy_ocp(sc, 0xa436, 0x8174);
1785 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1786 	rge_write_phy_ocp(sc, 0xa436, 0x8178);
1787 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1788 	rge_write_phy_ocp(sc, 0xa436, 0x817c);
1789 	rge_write_phy_ocp(sc, 0xa438, 0x0719);
1790 	rge_write_phy_ocp(sc, 0xa436, 0x8ff4);
1791 	rge_write_phy_ocp(sc, 0xa438, 0x0400);
1792 	rge_write_phy_ocp(sc, 0xa436, 0x8ff1);
1793 	rge_write_phy_ocp(sc, 0xa438, 0x0404);
1794 	rge_write_phy_ocp(sc, 0xbf4a, 0x001b);
1795 	for (i = 0; i < 6; i++) {
1796 		rge_write_phy_ocp(sc, 0xb87c, 0x8033 + i * 4);
1797 		if (i == 2)
1798 			rge_write_phy_ocp(sc, 0xb87e, 0xfc32);
1799 		else
1800 			rge_write_phy_ocp(sc, 0xb87e, 0x7c13);
1801 	}
1802 	rge_write_phy_ocp(sc, 0xb87c, 0x8145);
1803 	rge_write_phy_ocp(sc, 0xb87e, 0x370e);
1804 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1805 	rge_write_phy_ocp(sc, 0xb87e, 0x770e);
1806 	rge_write_phy_ocp(sc, 0xb87c, 0x8169);
1807 	rge_write_phy_ocp(sc, 0xb87e, 0x0d0a);
1808 	rge_write_phy_ocp(sc, 0xb87c, 0x817b);
1809 	rge_write_phy_ocp(sc, 0xb87e, 0x1d0a);
1810 	rge_write_phy_ocp(sc, 0xa436, 0x8217);
1811 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1812 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1813 	rge_write_phy_ocp(sc, 0xa436, 0x821a);
1814 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1815 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1816 	rge_write_phy_ocp(sc, 0xa436, 0x80da);
1817 	rge_write_phy_ocp(sc, 0xa438, 0x0403);
1818 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1819 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1820 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1821 	rge_write_phy_ocp(sc, 0xa436, 0x80b3);
1822 	rge_write_phy_ocp(sc, 0xa438, 0x0384);
1823 	rge_write_phy_ocp(sc, 0xa436, 0x80b7);
1824 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
1825 	rge_write_phy_ocp(sc, 0xa436, 0x80ba);
1826 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1827 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1828 	rge_write_phy_ocp(sc, 0xa436, 0x80b5);
1829 	rge_write_phy_ocp(sc, 0xa438, 0xf009);
1830 	rge_write_phy_ocp(sc, 0xa436, 0x80bd);
1831 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1832 	rge_write_phy_ocp(sc, 0xa438, val | 0x9f00);
1833 	rge_write_phy_ocp(sc, 0xa436, 0x80c7);
1834 	rge_write_phy_ocp(sc, 0xa438, 0xf083);
1835 	rge_write_phy_ocp(sc, 0xa436, 0x80dd);
1836 	rge_write_phy_ocp(sc, 0xa438, 0x03f0);
1837 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
1838 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1839 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1840 	rge_write_phy_ocp(sc, 0xa436, 0x80cb);
1841 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
1842 	rge_write_phy_ocp(sc, 0xa436, 0x80ce);
1843 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1844 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1845 	rge_write_phy_ocp(sc, 0xa436, 0x80c9);
1846 	rge_write_phy_ocp(sc, 0xa438, 0x8009);
1847 	rge_write_phy_ocp(sc, 0xa436, 0x80d1);
1848 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1849 	rge_write_phy_ocp(sc, 0xa438, val | 0x8000);
1850 	rge_write_phy_ocp(sc, 0xa436, 0x80a3);
1851 	rge_write_phy_ocp(sc, 0xa438, 0x200a);
1852 	rge_write_phy_ocp(sc, 0xa436, 0x80a5);
1853 	rge_write_phy_ocp(sc, 0xa438, 0xf0ad);
1854 	rge_write_phy_ocp(sc, 0xa436, 0x809f);
1855 	rge_write_phy_ocp(sc, 0xa438, 0x6073);
1856 	rge_write_phy_ocp(sc, 0xa436, 0x80a1);
1857 	rge_write_phy_ocp(sc, 0xa438, 0x000b);
1858 	rge_write_phy_ocp(sc, 0xa436, 0x80a9);
1859 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1860 	rge_write_phy_ocp(sc, 0xa438, val | 0xc000);
1861 	rge_patch_phy_mcu(sc, 1);
1862 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1863 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1864 	rge_write_phy_ocp(sc, 0xb88e, 0xc23e);
1865 	rge_write_phy_ocp(sc, 0xb890, 0x0000);
1866 	rge_write_phy_ocp(sc, 0xb88e, 0xc240);
1867 	rge_write_phy_ocp(sc, 0xb890, 0x0103);
1868 	rge_write_phy_ocp(sc, 0xb88e, 0xc242);
1869 	rge_write_phy_ocp(sc, 0xb890, 0x0507);
1870 	rge_write_phy_ocp(sc, 0xb88e, 0xc244);
1871 	rge_write_phy_ocp(sc, 0xb890, 0x090b);
1872 	rge_write_phy_ocp(sc, 0xb88e, 0xc246);
1873 	rge_write_phy_ocp(sc, 0xb890, 0x0c0e);
1874 	rge_write_phy_ocp(sc, 0xb88e, 0xc248);
1875 	rge_write_phy_ocp(sc, 0xb890, 0x1012);
1876 	rge_write_phy_ocp(sc, 0xb88e, 0xc24a);
1877 	rge_write_phy_ocp(sc, 0xb890, 0x1416);
1878 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1879 	rge_patch_phy_mcu(sc, 0);
1880 	RGE_PHY_SETBIT(sc, 0xa86a, 0x0001);
1881 	RGE_PHY_SETBIT(sc, 0xa6f0, 0x0001);
1882 	rge_write_phy_ocp(sc, 0xbfa0, 0xd70d);
1883 	rge_write_phy_ocp(sc, 0xbfa2, 0x4100);
1884 	rge_write_phy_ocp(sc, 0xbfa4, 0xe868);
1885 	rge_write_phy_ocp(sc, 0xbfa6, 0xdc59);
1886 	rge_write_phy_ocp(sc, 0xb54c, 0x3c18);
1887 	RGE_PHY_CLRBIT(sc, 0xbfa4, 0x0020);
1888 	rge_write_phy_ocp(sc, 0xa436, 0x817d);
1889 	RGE_PHY_SETBIT(sc, 0xa438, 0x1000);
1890 }
1891 
1892 void
1893 rge_phy_config_mac_cfg5(struct rge_softc *sc)
1894 {
1895 	uint16_t val;
1896 	int i;
1897 
1898 	for (i = 0; i < nitems(rtl8125_mac_cfg5_ephy); i++)
1899 		rge_write_ephy(sc, rtl8125_mac_cfg5_ephy[i].reg,
1900 		    rtl8125_mac_cfg5_ephy[i].val);
1901 
1902 	val = rge_read_ephy(sc, 0x0022) & ~0x0030;
1903 	rge_write_ephy(sc, 0x0022, val | 0x0020);
1904 	val = rge_read_ephy(sc, 0x0062) & ~0x0030;
1905 	rge_write_ephy(sc, 0x0062, val | 0x0020);
1906 
1907 	rge_phy_config_mcu(sc, RGE_MAC_CFG5_MCODE_VER);
1908 
1909 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1910 	val = rge_read_phy_ocp(sc, 0xac46) & ~0x00f0;
1911 	rge_write_phy_ocp(sc, 0xac46, val | 0x0090);
1912 	val = rge_read_phy_ocp(sc, 0xad30) & ~0x0003;
1913 	rge_write_phy_ocp(sc, 0xad30, val | 0x0001);
1914 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, RGE_JUMBO_MTU + ETHER_HDR_LEN +
1915 	    32);
1916 	rge_write_phy_ocp(sc, 0xb87c, 0x80f5);
1917 	rge_write_phy_ocp(sc, 0xb87e, 0x760e);
1918 	rge_write_phy_ocp(sc, 0xb87c, 0x8107);
1919 	rge_write_phy_ocp(sc, 0xb87e, 0x360e);
1920 	rge_write_phy_ocp(sc, 0xb87c, 0x8551);
1921 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1922 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
1923 	val = rge_read_phy_ocp(sc, 0xbf00) & ~0xe000;
1924 	rge_write_phy_ocp(sc, 0xbf00, val | 0xa000);
1925 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1926 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0300);
1927 	for (i = 0; i < 10; i++) {
1928 		rge_write_phy_ocp(sc, 0xa436, 0x8044 + i * 6);
1929 		rge_write_phy_ocp(sc, 0xa438, 0x2417);
1930 	}
1931 	RGE_PHY_SETBIT(sc, 0xa4ca, 0x0040);
1932 	val = rge_read_phy_ocp(sc, 0xbf84) & ~0xe000;
1933 	rge_write_phy_ocp(sc, 0xbf84, val | 0xa000);
1934 }
1935 
1936 void
1937 rge_phy_config_mcu(struct rge_softc *sc, uint16_t mcode_version)
1938 {
1939 	if (sc->rge_mcodever != mcode_version) {
1940 		int i;
1941 
1942 		rge_patch_phy_mcu(sc, 1);
1943 
1944 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1945 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
1946 			if (sc->rge_type == MAC_CFG2)
1947 				rge_write_phy_ocp(sc, 0xa438, 0x8600);
1948 			else
1949 				rge_write_phy_ocp(sc, 0xa438, 0x8601);
1950 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
1951 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
1952 
1953 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
1954 		}
1955 
1956 		if (sc->rge_type == MAC_CFG2) {
1957 			for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
1958 				rge_write_phy_ocp(sc,
1959 				    rtl8125_mac_cfg2_mcu[i].reg,
1960 				    rtl8125_mac_cfg2_mcu[i].val);
1961 			}
1962 		} else if (sc->rge_type == MAC_CFG3) {
1963 			for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
1964 				rge_write_phy_ocp(sc,
1965 				    rtl8125_mac_cfg3_mcu[i].reg,
1966 				    rtl8125_mac_cfg3_mcu[i].val);
1967 			}
1968 		} else if (sc->rge_type == MAC_CFG4) {
1969 			for (i = 0; i < nitems(rtl8125_mac_cfg4_mcu); i++) {
1970 				rge_write_phy_ocp(sc,
1971 				    rtl8125_mac_cfg4_mcu[i].reg,
1972 				    rtl8125_mac_cfg4_mcu[i].val);
1973 			}
1974 		} else if (sc->rge_type == MAC_CFG5) {
1975 			for (i = 0; i < nitems(rtl8125_mac_cfg5_mcu); i++) {
1976 				rge_write_phy_ocp(sc,
1977 				    rtl8125_mac_cfg5_mcu[i].reg,
1978 				    rtl8125_mac_cfg5_mcu[i].val);
1979 			}
1980 		}
1981 
1982 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1983 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
1984 
1985 			rge_write_phy_ocp(sc, 0xa436, 0);
1986 			rge_write_phy_ocp(sc, 0xa438, 0);
1987 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
1988 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
1989 			rge_write_phy_ocp(sc, 0xa438, 0);
1990 		}
1991 
1992 		rge_patch_phy_mcu(sc, 0);
1993 
1994 		/* Write microcode version. */
1995 		rge_write_phy_ocp(sc, 0xa436, 0x801e);
1996 		rge_write_phy_ocp(sc, 0xa438, mcode_version);
1997 	}
1998 }
1999 
2000 void
2001 rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
2002 {
2003 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2004 	RGE_WRITE_4(sc, RGE_MAC0,
2005 	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2006 	RGE_WRITE_4(sc, RGE_MAC4,
2007 	    addr[5] <<  8 | addr[4]);
2008 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2009 }
2010 
2011 void
2012 rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
2013 {
2014 	*(uint32_t *)&addr[0] = RGE_READ_4(sc, RGE_ADDR0);
2015 	*(uint16_t *)&addr[4] = RGE_READ_2(sc, RGE_ADDR1);
2016 }
2017 
2018 void
2019 rge_hw_init(struct rge_softc *sc)
2020 {
2021 	int i;
2022 
2023 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2024 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
2025 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
2026 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2027 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
2028 
2029 	/* Disable UPS. */
2030 	RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
2031 
2032 	/* Configure MAC MCU. */
2033 	rge_write_mac_ocp(sc, 0xfc38, 0);
2034 
2035 	for (i = 0xfc28; i < 0xfc38; i += 2)
2036 		rge_write_mac_ocp(sc, i, 0);
2037 
2038 	DELAY(3000);
2039 	rge_write_mac_ocp(sc, 0xfc26, 0);
2040 
2041 	if (sc->rge_type == MAC_CFG3) {
2042 		for (i = 0; i < nitems(rtl8125_mac_bps); i++) {
2043 			rge_write_mac_ocp(sc, rtl8125_mac_bps[i].reg,
2044 			    rtl8125_mac_bps[i].val);
2045 		}
2046 	} else if (sc->rge_type == MAC_CFG5) {
2047 		for (i = 0; i < nitems(rtl8125b_mac_bps); i++) {
2048 			rge_write_mac_ocp(sc, rtl8125b_mac_bps[i].reg,
2049 			    rtl8125b_mac_bps[i].val);
2050 		}
2051 	}
2052 
2053 	/* Disable PHY power saving. */
2054 	rge_disable_phy_ocp_pwrsave(sc);
2055 
2056 	/* Set PCIe uncorrectable error status. */
2057 	rge_write_csi(sc, 0x108,
2058 	    rge_read_csi(sc, 0x108) | 0x00100000);
2059 
2060 }
2061 
2062 void
2063 rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
2064 {
2065 	if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
2066 		rge_patch_phy_mcu(sc, 1);
2067 		rge_write_phy_ocp(sc, 0xc416, 0);
2068 		rge_write_phy_ocp(sc, 0xc416, 0x0500);
2069 		rge_patch_phy_mcu(sc, 0);
2070 	}
2071 }
2072 
2073 void
2074 rge_patch_phy_mcu(struct rge_softc *sc, int set)
2075 {
2076 	int i;
2077 
2078 	if (set)
2079 		RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
2080 	else
2081 		RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
2082 
2083 	for (i = 0; i < 1000; i++) {
2084 		if ((rge_read_phy_ocp(sc, 0xb800) & 0x0040) == 0x0040)
2085 			break;
2086 		DELAY(100);
2087 	}
2088 	if (i == 1000) {
2089 		DPRINTF(("timeout waiting to patch phy mcu\n"));
2090 		return;
2091 	}
2092 }
2093 
2094 void
2095 rge_add_media_types(struct rge_softc *sc)
2096 {
2097 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
2098 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
2099 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
2100 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
2101 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
2102 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2103 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
2104 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
2105 }
2106 
2107 void
2108 rge_config_imtype(struct rge_softc *sc, int imtype)
2109 {
2110 	switch (imtype) {
2111 	case RGE_IMTYPE_NONE:
2112 		sc->rge_intrs = RGE_INTRS;
2113 		sc->rge_rx_ack = RGE_ISR_RX_OK | RGE_ISR_RX_DESC_UNAVAIL |
2114 		    RGE_ISR_RX_FIFO_OFLOW;
2115 		sc->rge_tx_ack = RGE_ISR_TX_OK;
2116 		break;
2117 	case RGE_IMTYPE_SIM:
2118 		sc->rge_intrs = RGE_INTRS_TIMER;
2119 		sc->rge_rx_ack = RGE_ISR_PCS_TIMEOUT;
2120 		sc->rge_tx_ack = RGE_ISR_PCS_TIMEOUT;
2121 		break;
2122 	default:
2123 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
2124 	}
2125 }
2126 
2127 void
2128 rge_disable_hw_im(struct rge_softc *sc)
2129 {
2130 	RGE_WRITE_2(sc, RGE_IM, 0);
2131 }
2132 
2133 void
2134 rge_disable_sim_im(struct rge_softc *sc)
2135 {
2136 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
2137 	sc->rge_timerintr = 0;
2138 }
2139 
2140 void
2141 rge_setup_sim_im(struct rge_softc *sc)
2142 {
2143 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0x2600);
2144 	RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
2145 	sc->rge_timerintr = 1;
2146 }
2147 
2148 void
2149 rge_setup_intr(struct rge_softc *sc, int imtype)
2150 {
2151 	rge_config_imtype(sc, imtype);
2152 
2153 	/* Enable interrupts. */
2154 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
2155 
2156 	switch (imtype) {
2157 	case RGE_IMTYPE_NONE:
2158 		rge_disable_sim_im(sc);
2159 		rge_disable_hw_im(sc);
2160 		break;
2161 	case RGE_IMTYPE_SIM:
2162 		rge_disable_hw_im(sc);
2163 		rge_setup_sim_im(sc);
2164 		break;
2165 	default:
2166 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
2167 	}
2168 }
2169 
2170 void
2171 rge_exit_oob(struct rge_softc *sc)
2172 {
2173 	int i;
2174 
2175 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
2176 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
2177 	    RGE_RXCFG_ERRPKT);
2178 
2179 	/* Disable RealWoW. */
2180 	rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
2181 
2182 	rge_reset(sc);
2183 
2184 	/* Disable OOB. */
2185 	RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
2186 
2187 	RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
2188 
2189 	for (i = 0; i < 10; i++) {
2190 		DELAY(100);
2191 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2192 			break;
2193 	}
2194 
2195 	rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
2196 	rge_write_mac_ocp(sc, 0xc0a6, 0x01b5);
2197 	rge_write_mac_ocp(sc, 0xc01e, 0x5555);
2198 
2199 	for (i = 0; i < 10; i++) {
2200 		DELAY(100);
2201 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2202 			break;
2203 	}
2204 
2205 	if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
2206 		printf("%s: rge_exit_oob(): rtl8125_is_ups_resume!!\n",
2207 		    sc->sc_dev.dv_xname);
2208 		for (i = 0; i < RGE_TIMEOUT; i++) {
2209 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 2)
2210 				break;
2211 			DELAY(1000);
2212 		}
2213 		RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
2214 		if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5)
2215 			RGE_PHY_CLRBIT(sc, 0xa466, 0x0001);
2216 		RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
2217 	}
2218 }
2219 
2220 void
2221 rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
2222 {
2223 	int i;
2224 
2225 	RGE_WRITE_4(sc, RGE_CSIDR, val);
2226 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2227 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
2228 
2229 	for (i = 0; i < 10; i++) {
2230 		 DELAY(100);
2231 		 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
2232 			break;
2233 	}
2234 
2235 	DELAY(20);
2236 }
2237 
2238 uint32_t
2239 rge_read_csi(struct rge_softc *sc, uint32_t reg)
2240 {
2241 	int i;
2242 
2243 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2244 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
2245 
2246 	for (i = 0; i < 10; i++) {
2247 		 DELAY(100);
2248 		 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
2249 			break;
2250 	}
2251 
2252 	DELAY(20);
2253 
2254 	return (RGE_READ_4(sc, RGE_CSIDR));
2255 }
2256 
2257 void
2258 rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2259 {
2260 	uint32_t tmp;
2261 
2262 	tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2263 	tmp += val;
2264 	tmp |= RGE_MACOCP_BUSY;
2265 	RGE_WRITE_4(sc, RGE_MACOCP, tmp);
2266 }
2267 
2268 uint16_t
2269 rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
2270 {
2271 	uint32_t val;
2272 
2273 	val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2274 	RGE_WRITE_4(sc, RGE_MACOCP, val);
2275 
2276 	return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
2277 }
2278 
2279 void
2280 rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
2281 {
2282 	uint32_t tmp;
2283 	int i;
2284 
2285 	tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2286 	tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
2287 	RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
2288 
2289 	for (i = 0; i < 10; i++) {
2290 		DELAY(100);
2291 		if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
2292 			break;
2293 	}
2294 
2295 	DELAY(20);
2296 }
2297 
2298 uint16_t
2299 rge_read_ephy(struct rge_softc *sc, uint16_t reg)
2300 {
2301 	uint32_t val;
2302 	int i;
2303 
2304 	val = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2305 	RGE_WRITE_4(sc, RGE_EPHYAR, val);
2306 
2307 	for (i = 0; i < 10; i++) {
2308 		DELAY(100);
2309 		val = RGE_READ_4(sc, RGE_EPHYAR);
2310 		if (val & RGE_EPHYAR_BUSY)
2311 			break;
2312 	}
2313 
2314 	DELAY(20);
2315 
2316 	return (val & RGE_EPHYAR_DATA_MASK);
2317 }
2318 
2319 void
2320 rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
2321 {
2322 	uint16_t off, phyaddr;
2323 
2324 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2325 	phyaddr <<= 4;
2326 
2327 	off = addr ? reg : 0x10 + (reg % 8);
2328 
2329 	phyaddr += (off - 16) << 1;
2330 
2331 	rge_write_phy_ocp(sc, phyaddr, val);
2332 }
2333 
2334 uint16_t
2335 rge_read_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg)
2336 {
2337 	uint16_t off, phyaddr;
2338 
2339 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2340 	phyaddr <<= 4;
2341 
2342 	off = addr ? reg : 0x10 + (reg % 8);
2343 
2344 	phyaddr += (off - 16) << 1;
2345 
2346 	return (rge_read_phy_ocp(sc, phyaddr));
2347 }
2348 
2349 void
2350 rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2351 {
2352 	uint32_t tmp;
2353 	int i;
2354 
2355 	tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2356 	tmp |= RGE_PHYOCP_BUSY | val;
2357 	RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
2358 
2359 	for (i = 0; i < RGE_TIMEOUT; i++) {
2360 		DELAY(1);
2361 		if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
2362 			break;
2363 	}
2364 }
2365 
2366 uint16_t
2367 rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
2368 {
2369 	uint32_t val;
2370 	int i;
2371 
2372 	val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2373 	RGE_WRITE_4(sc, RGE_PHYOCP, val);
2374 
2375 	for (i = 0; i < RGE_TIMEOUT; i++) {
2376 		DELAY(1);
2377 		val = RGE_READ_4(sc, RGE_PHYOCP);
2378 		if (val & RGE_PHYOCP_BUSY)
2379 			break;
2380 	}
2381 
2382 	return (val & RGE_PHYOCP_DATA_MASK);
2383 }
2384 
2385 int
2386 rge_get_link_status(struct rge_softc *sc)
2387 {
2388 	return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
2389 }
2390 
2391 void
2392 rge_txstart(void *arg)
2393 {
2394 	struct rge_softc *sc = arg;
2395 
2396 	RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
2397 }
2398 
2399 void
2400 rge_tick(void *arg)
2401 {
2402 	struct rge_softc *sc = arg;
2403 	int s;
2404 
2405 	s = splnet();
2406 	rge_link_state(sc);
2407 	splx(s);
2408 
2409 	timeout_add_sec(&sc->sc_timeout, 1);
2410 }
2411 
2412 void
2413 rge_link_state(struct rge_softc *sc)
2414 {
2415 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2416 	int link = LINK_STATE_DOWN;
2417 
2418 	if (rge_get_link_status(sc))
2419 		link = LINK_STATE_UP;
2420 
2421 	if (ifp->if_link_state != link) {
2422 		ifp->if_link_state = link;
2423 		if_link_state_change(ifp);
2424 	}
2425 }
2426 
2427 #ifndef SMALL_KERNEL
2428 int
2429 rge_wol(struct ifnet *ifp, int enable)
2430 {
2431 	struct rge_softc *sc = ifp->if_softc;
2432 
2433 	if (enable) {
2434 		if (!(RGE_READ_1(sc, RGE_CFG1) & RGE_CFG1_PM_EN)) {
2435 			printf("%s: power management is disabled, "
2436 			    "cannot do WOL\n", sc->sc_dev.dv_xname);
2437 			return (ENOTSUP);
2438 		}
2439 
2440 	}
2441 
2442 	rge_iff(sc);
2443 
2444 	if (enable)
2445 		RGE_MAC_SETBIT(sc, 0xc0b6, 0x0001);
2446 	else
2447 		RGE_MAC_CLRBIT(sc, 0xc0b6, 0x0001);
2448 
2449 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2450 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_WOL_LANWAKE | RGE_CFG5_WOL_UCAST |
2451 	    RGE_CFG5_WOL_MCAST | RGE_CFG5_WOL_BCAST);
2452 	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_WOL_LINK | RGE_CFG3_WOL_MAGIC);
2453 	if (enable)
2454 		RGE_SETBIT_1(sc, RGE_CFG5, RGE_CFG5_WOL_LANWAKE);
2455 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2456 
2457 	return (0);
2458 }
2459 
2460 void
2461 rge_wol_power(struct rge_softc *sc)
2462 {
2463 	/* Disable RXDV gate. */
2464 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
2465 	DELAY(2000);
2466 
2467 	RGE_SETBIT_1(sc, RGE_CFG1, RGE_CFG1_PM_EN);
2468 	RGE_SETBIT_1(sc, RGE_CFG2, RGE_CFG2_PMSTS_EN);
2469 }
2470 #endif
2471