xref: /openbsd/sys/dev/pci/if_rge.c (revision 09467b48)
1 /*	$OpenBSD: if_rge.c,v 1.5 2020/07/22 00:48:02 kevlo Exp $	*/
2 
3 /*
4  * Copyright (c) 2019, 2020 Kevin Lo <kevlo@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bpfilter.h"
20 #include "vlan.h"
21 
22 #include <sys/param.h>
23 #include <sys/systm.h>
24 #include <sys/sockio.h>
25 #include <sys/mbuf.h>
26 #include <sys/malloc.h>
27 #include <sys/kernel.h>
28 #include <sys/socket.h>
29 #include <sys/device.h>
30 #include <sys/endian.h>
31 
32 #include <net/if.h>
33 #include <net/if_media.h>
34 
35 #include <netinet/in.h>
36 #include <netinet/if_ether.h>
37 
38 #if NBPFILTER > 0
39 #include <net/bpf.h>
40 #endif
41 
42 #include <machine/bus.h>
43 #include <machine/intr.h>
44 
45 #include <dev/mii/mii.h>
46 
47 #include <dev/pci/pcivar.h>
48 #include <dev/pci/pcireg.h>
49 #include <dev/pci/pcidevs.h>
50 
51 #include <dev/pci/if_rgereg.h>
52 
53 #ifdef RGE_DEBUG
54 #define DPRINTF(x)	do { if (rge_debug > 0) printf x; } while (0)
55 int rge_debug = 0;
56 #else
57 #define DPRINTF(x)
58 #endif
59 
60 int		rge_match(struct device *, void *, void *);
61 void		rge_attach(struct device *, struct device *, void *);
62 int		rge_intr(void *);
63 int		rge_encap(struct rge_softc *, struct mbuf *, int);
64 int		rge_ioctl(struct ifnet *, u_long, caddr_t);
65 void		rge_start(struct ifqueue *);
66 void		rge_watchdog(struct ifnet *);
67 int		rge_init(struct ifnet *);
68 void		rge_stop(struct ifnet *);
69 int		rge_ifmedia_upd(struct ifnet *);
70 void		rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
71 int		rge_allocmem(struct rge_softc *);
72 int		rge_newbuf(struct rge_softc *);
73 void		rge_discard_rxbuf(struct rge_softc *, int);
74 void		rge_rx_list_init(struct rge_softc *);
75 void		rge_tx_list_init(struct rge_softc *);
76 void		rge_fill_rx_ring(struct rge_softc *);
77 int		rge_rxeof(struct rge_softc *);
78 int		rge_txeof(struct rge_softc *);
79 void		rge_reset(struct rge_softc *);
80 void		rge_iff(struct rge_softc *);
81 void		rge_set_phy_power(struct rge_softc *, int);
82 void		rge_phy_config(struct rge_softc *);
83 void		rge_phy_config_mac_cfg2(struct rge_softc *);
84 void		rge_phy_config_mac_cfg3(struct rge_softc *);
85 void		rge_phy_config_mac_cfg4(struct rge_softc *);
86 void		rge_phy_config_mac_cfg5(struct rge_softc *);
87 void		rge_phy_config_mcu(struct rge_softc *, uint16_t);
88 void		rge_set_macaddr(struct rge_softc *, const uint8_t *);
89 void		rge_get_macaddr(struct rge_softc *, uint8_t *);
90 void		rge_hw_init(struct rge_softc *);
91 void		rge_disable_phy_ocp_pwrsave(struct rge_softc *);
92 void		rge_patch_phy_mcu(struct rge_softc *, int);
93 void		rge_add_media_types(struct rge_softc *);
94 void		rge_config_imtype(struct rge_softc *, int);
95 void		rge_disable_hw_im(struct rge_softc *);
96 void		rge_disable_sim_im(struct rge_softc *);
97 void		rge_setup_sim_im(struct rge_softc *);
98 void		rge_setup_intr(struct rge_softc *, int);
99 void		rge_exit_oob(struct rge_softc *);
100 void		rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
101 uint32_t	rge_read_csi(struct rge_softc *, uint32_t);
102 void		rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
103 uint16_t	rge_read_mac_ocp(struct rge_softc *, uint16_t);
104 void		rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
105 uint16_t	rge_read_ephy(struct rge_softc *, uint16_t);
106 void		rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
107 void		rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
108 uint16_t	rge_read_phy_ocp(struct rge_softc *, uint16_t);
109 int		rge_get_link_status(struct rge_softc *);
110 void		rge_txstart(void *);
111 void		rge_tick(void *);
112 void		rge_link_state(struct rge_softc *);
113 
114 static const struct {
115 	uint16_t reg;
116 	uint16_t val;
117 }  rtl8125_mac_cfg2_mcu[] = {
118 	RTL8125_MAC_CFG2_MCU
119 }, rtl8125_mac_cfg3_mcu[] = {
120 	RTL8125_MAC_CFG3_MCU
121 }, rtl8125_mac_cfg4_mcu[] = {
122 	RTL8125_MAC_CFG4_MCU
123 }, rtl8125_mac_cfg5_mcu[] = {
124 	RTL8125_MAC_CFG5_MCU
125 };
126 
127 struct cfattach rge_ca = {
128 	sizeof(struct rge_softc), rge_match, rge_attach
129 };
130 
131 struct cfdriver rge_cd = {
132 	NULL, "rge", DV_IFNET
133 };
134 
135 const struct pci_matchid rge_devices[] = {
136 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000 },
137 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8125 }
138 };
139 
140 int
141 rge_match(struct device *parent, void *match, void *aux)
142 {
143 	return (pci_matchbyid((struct pci_attach_args *)aux, rge_devices,
144 	    nitems(rge_devices)));
145 }
146 
147 void
148 rge_attach(struct device *parent, struct device *self, void *aux)
149 {
150 	struct rge_softc *sc = (struct rge_softc *)self;
151 	struct pci_attach_args *pa = aux;
152 	pci_chipset_tag_t pc = pa->pa_pc;
153 	pci_intr_handle_t ih;
154 	const char *intrstr = NULL;
155 	struct ifnet *ifp;
156 	pcireg_t reg;
157 	uint32_t hwrev;
158 	uint8_t eaddr[ETHER_ADDR_LEN];
159 	int offset;
160 
161 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
162 
163 	/*
164 	 * Map control/status registers.
165 	 */
166 	if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
167 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
168 	    NULL, &sc->rge_bsize, 0)) {
169 		if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
170 		    PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
171 		    &sc->rge_bhandle, NULL, &sc->rge_bsize, 0)) {
172 			if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
173 			    0, &sc->rge_btag, &sc->rge_bhandle, NULL,
174 			    &sc->rge_bsize, 0)) {
175 				printf(": can't map mem or i/o space\n");
176 				return;
177 			}
178 		}
179 	}
180 
181 	/*
182 	 * Allocate interrupt.
183 	 */
184 	if (pci_intr_map_msi(pa, &ih) == 0)
185 		sc->rge_flags |= RGE_FLAG_MSI;
186 	else if (pci_intr_map(pa, &ih) != 0) {
187 		printf(": couldn't map interrupt\n");
188 		return;
189 	}
190 	intrstr = pci_intr_string(pc, ih);
191 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET | IPL_MPSAFE, rge_intr,
192 	    sc, sc->sc_dev.dv_xname);
193 	if (sc->sc_ih == NULL) {
194 		printf(": couldn't establish interrupt");
195 		if (intrstr != NULL)
196 			printf(" at %s", intrstr);
197 		printf("\n");
198 		return;
199 	}
200 	printf(": %s", intrstr);
201 
202 	sc->sc_dmat = pa->pa_dmat;
203 	sc->sc_pc = pa->pa_pc;
204 	sc->sc_tag = pa->pa_tag;
205 
206 	/* Determine hardware revision */
207 	hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
208 	switch (hwrev) {
209 	case 0x60800000:
210 		sc->rge_type = MAC_CFG2;
211 		break;
212 	case 0x60900000:
213 		sc->rge_type = MAC_CFG3;
214 		break;
215 	case 0x64000000:
216 		sc->rge_type = MAC_CFG4;
217 		break;
218 	case 0x64100000:
219 		sc->rge_type = MAC_CFG5;
220 		break;
221 	default:
222 		printf(": unknown version 0x%08x\n", hwrev);
223 		return;
224 	}
225 
226 	rge_config_imtype(sc, RGE_IMTYPE_SIM);
227 
228 	/*
229 	 * PCI Express check.
230 	 */
231 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
232 	    &offset, NULL)) {
233 		/* Disable PCIe ASPM and ECPM. */
234 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
235 		    offset + PCI_PCIE_LCSR);
236 		reg &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1 |
237 		    PCI_PCIE_LCSR_ECPM);
238 		pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCI_PCIE_LCSR,
239 		    reg);
240 	}
241 
242 	rge_exit_oob(sc);
243 	rge_hw_init(sc);
244 
245 	rge_get_macaddr(sc, eaddr);
246 	printf(", address %s\n", ether_sprintf(eaddr));
247 
248 	memcpy(sc->sc_arpcom.ac_enaddr, eaddr, ETHER_ADDR_LEN);
249 
250 	rge_set_phy_power(sc, 1);
251 	rge_phy_config(sc);
252 
253 	if (rge_allocmem(sc))
254 		return;
255 
256 	ifp = &sc->sc_arpcom.ac_if;
257 	ifp->if_softc = sc;
258 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
259 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
260 	ifp->if_xflags = IFXF_MPSAFE;
261 	ifp->if_ioctl = rge_ioctl;
262 	ifp->if_qstart = rge_start;
263 	ifp->if_watchdog = rge_watchdog;
264 	ifq_set_maxlen(&ifp->if_snd, RGE_TX_LIST_CNT - 1);
265 	ifp->if_hardmtu = RGE_JUMBO_MTU;
266 
267 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
268 	    IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
269 
270 #if NVLAN > 0
271 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
272 #endif
273 
274 	timeout_set(&sc->sc_timeout, rge_tick, sc);
275 	task_set(&sc->sc_task, rge_txstart, sc);
276 
277 	/* Initialize ifmedia structures. */
278 	ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
279 	    rge_ifmedia_sts);
280 	rge_add_media_types(sc);
281 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
282 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
283 	sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
284 
285 	if_attach(ifp);
286 	ether_ifattach(ifp);
287 }
288 
289 int
290 rge_intr(void *arg)
291 {
292 	struct rge_softc *sc = arg;
293 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
294 	uint32_t status;
295 	int claimed = 0, rx, tx;
296 
297 	if (!(ifp->if_flags & IFF_RUNNING))
298 		return (0);
299 
300 	/* Disable interrupts. */
301 	RGE_WRITE_4(sc, RGE_IMR, 0);
302 
303 	if (!(sc->rge_flags & RGE_FLAG_MSI)) {
304 		if ((RGE_READ_4(sc, RGE_ISR) & sc->rge_intrs) == 0)
305 			return (0);
306 	}
307 
308 	status = RGE_READ_4(sc, RGE_ISR);
309 	if (status)
310 		RGE_WRITE_4(sc, RGE_ISR, status);
311 
312 	if (status & RGE_ISR_PCS_TIMEOUT)
313 		claimed = 1;
314 
315 	rx = tx = 0;
316 	if (status & sc->rge_intrs) {
317 		if (status &
318 		    (sc->rge_rx_ack | RGE_ISR_RX_ERR | RGE_ISR_RX_FIFO_OFLOW)) {
319 			rx |= rge_rxeof(sc);
320 			claimed = 1;
321 		}
322 
323 		if (status & (sc->rge_tx_ack | RGE_ISR_TX_ERR)) {
324 			tx |= rge_txeof(sc);
325 			claimed = 1;
326 		}
327 
328 		if (status & RGE_ISR_SYSTEM_ERR) {
329 			KERNEL_LOCK();
330 			rge_init(ifp);
331 			KERNEL_UNLOCK();
332 			claimed = 1;
333 		}
334 	}
335 
336 	if (sc->rge_timerintr) {
337 		if ((tx | rx) == 0) {
338 			/*
339 			 * Nothing needs to be processed, fallback
340 			 * to use TX/RX interrupts.
341 			 */
342 			rge_setup_intr(sc, RGE_IMTYPE_NONE);
343 
344 			/*
345 			 * Recollect, mainly to avoid the possible
346 			 * race introduced by changing interrupt
347 			 * masks.
348 			 */
349 			rge_rxeof(sc);
350 			rge_txeof(sc);
351 		} else
352 			RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
353 	} else if (tx | rx) {
354 		/*
355 		 * Assume that using simulated interrupt moderation
356 		 * (hardware timer based) could reduce the interrupt
357 		 * rate.
358 		 */
359 		rge_setup_intr(sc, RGE_IMTYPE_SIM);
360 	}
361 
362 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
363 
364 	return (claimed);
365 }
366 
367 int
368 rge_encap(struct rge_softc *sc, struct mbuf *m, int idx)
369 {
370 	struct rge_tx_desc *d = NULL;
371 	struct rge_txq *txq;
372 	bus_dmamap_t txmap;
373 	uint32_t cmdsts, cflags = 0;
374 	int cur, error, i, last, nsegs;
375 
376 	/*
377 	 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
378 	 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
379 	 * take affect.
380 	 */
381 	if ((m->m_pkthdr.csum_flags &
382 	    (M_IPV4_CSUM_OUT | M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) != 0) {
383 		cflags |= RGE_TDEXTSTS_IPCSUM;
384 		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
385 			cflags |= RGE_TDEXTSTS_TCPCSUM;
386 		if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
387 			cflags |= RGE_TDEXTSTS_UDPCSUM;
388 	}
389 
390 	txq = &sc->rge_ldata.rge_txq[idx];
391 	txmap = txq->txq_dmamap;
392 
393 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
394 	switch (error) {
395 	case 0:
396 		break;
397 	case EFBIG: /* mbuf chain is too fragmented */
398 		if (m_defrag(m, M_DONTWAIT) == 0 &&
399 		    bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
400 		    BUS_DMA_NOWAIT) == 0)
401 			break;
402 
403 		/* FALLTHROUGH */
404 	default:
405 		return (0);
406 	}
407 
408 	bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
409 	    BUS_DMASYNC_PREWRITE);
410 
411 	nsegs = txmap->dm_nsegs;
412 
413 	/* Set up hardware VLAN tagging. */
414 #if NVLAN > 0
415 	if (m->m_flags & M_VLANTAG)
416 		cflags |= swap16(m->m_pkthdr.ether_vtag | RGE_TDEXTSTS_VTAG);
417 #endif
418 
419 	cur = idx;
420 	cmdsts = RGE_TDCMDSTS_SOF;
421 
422 	for (i = 0; i < txmap->dm_nsegs; i++) {
423 		d = &sc->rge_ldata.rge_tx_list[cur];
424 
425 		d->rge_extsts = htole32(cflags);
426 		d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
427 		d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
428 
429 		cmdsts |= txmap->dm_segs[i].ds_len;
430 
431 		if (cur == RGE_TX_LIST_CNT - 1)
432 			cmdsts |= RGE_TDCMDSTS_EOR;
433 
434 		d->rge_cmdsts = htole32(cmdsts);
435 
436 		last = cur;
437 		cmdsts = RGE_TDCMDSTS_OWN;
438 		cur = RGE_NEXT_TX_DESC(cur);
439 	}
440 
441 	/* Set EOF on the last descriptor. */
442 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
443 
444 	/* Transfer ownership of packet to the chip. */
445 	d = &sc->rge_ldata.rge_tx_list[idx];
446 
447 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
448 
449 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
450 	    cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
451 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
452 
453 	/* Update info of TX queue and descriptors. */
454 	txq->txq_mbuf = m;
455 	txq->txq_descidx = last;
456 
457 	return (nsegs);
458 }
459 
460 int
461 rge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
462 {
463 	struct rge_softc *sc = ifp->if_softc;
464 	struct ifreq *ifr = (struct ifreq *)data;
465 	int s, error = 0;
466 
467 	s = splnet();
468 
469 	switch (cmd) {
470 	case SIOCSIFADDR:
471 		ifp->if_flags |= IFF_UP;
472 		if (!(ifp->if_flags & IFF_RUNNING))
473 			rge_init(ifp);
474 		break;
475 	case SIOCSIFFLAGS:
476 		if (ifp->if_flags & IFF_UP) {
477 			if (ifp->if_flags & IFF_RUNNING)
478 				error = ENETRESET;
479 			else
480 				rge_init(ifp);
481 		} else {
482 			if (ifp->if_flags & IFF_RUNNING)
483 				rge_stop(ifp);
484 		}
485 		break;
486 	case SIOCGIFMEDIA:
487 	case SIOCSIFMEDIA:
488 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
489 		break;
490 	case SIOCSIFMTU:
491 		if (ifr->ifr_mtu > ifp->if_hardmtu) {
492 			error = EINVAL;
493 			break;
494 		}
495 		ifp->if_mtu = ifr->ifr_mtu;
496 		break;
497 	case SIOCGIFRXR:
498 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
499 		    NULL, RGE_JUMBO_FRAMELEN, &sc->rge_ldata.rge_rx_ring);
500 		break;
501 	default:
502 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
503 	}
504 
505 	if (error == ENETRESET) {
506 		if (ifp->if_flags & IFF_RUNNING)
507 			rge_iff(sc);
508 		error = 0;
509 	}
510 
511 	splx(s);
512 	return (error);
513 }
514 
515 void
516 rge_start(struct ifqueue *ifq)
517 {
518 	struct ifnet *ifp = ifq->ifq_if;
519 	struct rge_softc *sc = ifp->if_softc;
520 	struct mbuf *m;
521 	int free, idx, used;
522 	int queued = 0;
523 
524 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
525 		ifq_purge(ifq);
526 		return;
527 	}
528 
529 	/* Calculate free space. */
530 	idx = sc->rge_ldata.rge_txq_prodidx;
531 	free = sc->rge_ldata.rge_txq_considx;
532 	if (free <= idx)
533 		free += RGE_TX_LIST_CNT;
534 	free -= idx;
535 
536 	for (;;) {
537 		if (RGE_TX_NSEGS >= free + 2) {
538 			ifq_set_oactive(&ifp->if_snd);
539 			break;
540 		}
541 
542 		m = ifq_dequeue(ifq);
543 		if (m == NULL)
544 			break;
545 
546 		used = rge_encap(sc, m, idx);
547 		if (used == 0) {
548 			m_freem(m);
549 			continue;
550 		}
551 
552 		KASSERT(used <= free);
553 		free -= used;
554 
555 #if NBPFILTER > 0
556 		if (ifp->if_bpf)
557 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
558 #endif
559 
560 		idx += used;
561 		if (idx >= RGE_TX_LIST_CNT)
562 			idx -= RGE_TX_LIST_CNT;
563 
564 		queued++;
565 	}
566 
567 	if (queued == 0)
568 		return;
569 
570 	/* Set a timeout in case the chip goes out to lunch. */
571 	ifp->if_timer = 5;
572 
573 	sc->rge_ldata.rge_txq_prodidx = idx;
574 	ifq_serialize(ifq, &sc->sc_task);
575 }
576 
577 void
578 rge_watchdog(struct ifnet *ifp)
579 {
580 	struct rge_softc *sc = ifp->if_softc;
581 
582 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
583 	ifp->if_oerrors++;
584 
585 	rge_init(ifp);
586 }
587 
588 int
589 rge_init(struct ifnet *ifp)
590 {
591 	struct rge_softc *sc = ifp->if_softc;
592 	uint32_t val;
593 	int i;
594 
595 	rge_stop(ifp);
596 
597 	/* Set MAC address. */
598 	rge_set_macaddr(sc, sc->sc_arpcom.ac_enaddr);
599 
600 	/* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */
601 	if (ifp->if_mtu < ETHERMTU)
602 		sc->rge_rxbufsz = ETHERMTU;
603 	else
604 		sc->rge_rxbufsz = ifp->if_mtu;
605 
606 	sc->rge_rxbufsz += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
607 	    ETHER_CRC_LEN + 1;
608 
609 	if (sc->rge_rxbufsz > RGE_JUMBO_FRAMELEN)
610 		sc->rge_rxbufsz -= 1;
611 
612 	RGE_WRITE_2(sc, RGE_RXMAXSIZE, sc->rge_rxbufsz);
613 
614 	/* Initialize RX and TX descriptors lists. */
615 	rge_rx_list_init(sc);
616 	rge_tx_list_init(sc);
617 
618 	/* Load the addresses of the RX and TX lists into the chip. */
619 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
620 	    RGE_ADDR_LO(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
621 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
622 	    RGE_ADDR_HI(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
623 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
624 	    RGE_ADDR_LO(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
625 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
626 	    RGE_ADDR_HI(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
627 
628 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
629 
630 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
631 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
632 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
633 	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
634 
635 	/* Clear interrupt moderation timer. */
636 	for (i = 0; i < 64; i++)
637 		RGE_WRITE_4(sc, RGE_INTMITI(i), 0);
638 
639 	/* Set the initial RX and TX configurations. */
640 	RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
641 	RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
642 
643 	val = rge_read_csi(sc, 0x70c) & ~0xff000000;
644 	rge_write_csi(sc, 0x70c, val | 0x27000000);
645 
646 	/* Enable hardware optimization function. */
647 	val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
648 	pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
649 
650 	RGE_WRITE_2(sc, 0x0382, 0x221b);
651 	RGE_WRITE_1(sc, 0x4500, 0);
652 	RGE_WRITE_2(sc, 0x4800, 0);
653 	RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
654 
655 	rge_write_mac_ocp(sc, 0xc140, 0xffff);
656 	rge_write_mac_ocp(sc, 0xc142, 0xffff);
657 
658 	val = rge_read_mac_ocp(sc, 0xd3e2) & ~0x0fff;
659 	rge_write_mac_ocp(sc, 0xd3e2, val | 0x03a9);
660 
661 	RGE_MAC_CLRBIT(sc, 0xd3e4, 0x00ff);
662 	RGE_MAC_SETBIT(sc, 0xe860, 0x0080);
663 	RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
664 
665 	val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
666 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
667 		rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
668 	else
669 		rge_write_mac_ocp(sc, 0xe614, val | 0x0200);
670 
671 	RGE_MAC_CLRBIT(sc, 0xe63e, 0x0c00);
672 
673 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
674 		val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0030;
675 		rge_write_mac_ocp(sc, 0xe63e, val | 0x0020);
676 	} else
677 		RGE_MAC_CLRBIT(sc, 0xe63e, 0x0030);
678 
679 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
680 
681 	val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x00ff;
682 	rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
683 
684 	val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
685 	rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
686 
687 	val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
688 	rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
689 
690 	RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
691 
692 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_TX_10M_PS_EN);
693 
694 	RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
695 
696 	val = rge_read_mac_ocp(sc, 0xea1c) & ~0x0003;
697 	rge_write_mac_ocp(sc, 0xea1c, val | 0x0001);
698 
699 	val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
700 	rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
701 
702 	RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
703 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
704 
705 	val = rge_read_mac_ocp(sc, 0xc0ac) & ~0x0080;
706 	rge_write_mac_ocp(sc, 0xc0ac, val | 0x1f00);
707 
708 	val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
709 	rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
710 
711 	val = rge_read_mac_ocp(sc, 0xe84c) & ~0x0040;
712 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
713 		rge_write_mac_ocp(sc, 0xe84c, 0x00c0);
714 	else
715 		rge_write_mac_ocp(sc, 0xe84c, 0x0080);
716 
717 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_PFM_EN);
718 
719 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
720 		RGE_SETBIT_1(sc, RGE_MCUCMD, 0x01);
721 
722 	/* Disable EEE plus. */
723 	RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
724 
725 	RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
726 
727 	RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
728 	DELAY(1);
729 	RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
730 
731 	RGE_CLRBIT_4(sc, 0x1880, 0x0030);
732 
733 	rge_write_mac_ocp(sc, 0xe098, 0xc302);
734 
735 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
736 		RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
737 
738 	RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
739 
740 	for (i = 0; i < 10; i++) {
741 		if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
742 			break;
743 		DELAY(1000);
744 	}
745 
746 	/* Disable RXDV gate. */
747 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
748 	DELAY(2000);
749 
750 	rge_ifmedia_upd(ifp);
751 
752 	/* Enable transmit and receive. */
753 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
754 
755 	/* Program promiscuous mode and multicast filters. */
756 	rge_iff(sc);
757 
758 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
759 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
760 
761 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
762 
763 	/* Enable interrupts. */
764 	rge_setup_intr(sc, RGE_IMTYPE_SIM);
765 
766 	ifp->if_flags |= IFF_RUNNING;
767 	ifq_clr_oactive(&ifp->if_snd);
768 
769 	timeout_add_sec(&sc->sc_timeout, 1);
770 
771 	return (0);
772 }
773 
774 /*
775  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
776  */
777 void
778 rge_stop(struct ifnet *ifp)
779 {
780 	struct rge_softc *sc = ifp->if_softc;
781 	int i;
782 
783 	timeout_del(&sc->sc_timeout);
784 
785 	ifp->if_timer = 0;
786 	ifp->if_flags &= ~IFF_RUNNING;
787 	sc->rge_timerintr = 0;
788 
789 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
790 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
791 	    RGE_RXCFG_ERRPKT);
792 
793 	RGE_WRITE_4(sc, RGE_IMR, 0);
794 
795 	/* Clear timer interrupts. */
796 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
797 	RGE_WRITE_4(sc, RGE_TIMERINT1, 0);
798 	RGE_WRITE_4(sc, RGE_TIMERINT2, 0);
799 	RGE_WRITE_4(sc, RGE_TIMERINT3, 0);
800 
801 	rge_reset(sc);
802 
803 	intr_barrier(sc->sc_ih);
804 	ifq_barrier(&ifp->if_snd);
805 	ifq_clr_oactive(&ifp->if_snd);
806 
807 	if (sc->rge_head != NULL) {
808 		m_freem(sc->rge_head);
809 		sc->rge_head = sc->rge_tail = NULL;
810 	}
811 
812 	/* Free the TX list buffers. */
813 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
814 		if (sc->rge_ldata.rge_txq[i].txq_mbuf != NULL) {
815 			bus_dmamap_unload(sc->sc_dmat,
816 			    sc->rge_ldata.rge_txq[i].txq_dmamap);
817 			m_freem(sc->rge_ldata.rge_txq[i].txq_mbuf);
818 			sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
819 		}
820 	}
821 
822 	/* Free the RX list buffers. */
823 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
824 		if (sc->rge_ldata.rge_rxq[i].rxq_mbuf != NULL) {
825 			bus_dmamap_unload(sc->sc_dmat,
826 			    sc->rge_ldata.rge_rxq[i].rxq_dmamap);
827 			m_freem(sc->rge_ldata.rge_rxq[i].rxq_mbuf);
828 			sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
829 		}
830 	}
831 }
832 
833 /*
834  * Set media options.
835  */
836 int
837 rge_ifmedia_upd(struct ifnet *ifp)
838 {
839 	struct rge_softc *sc = ifp->if_softc;
840 	struct ifmedia *ifm = &sc->sc_media;
841 	int anar, gig, val;
842 
843 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
844 		return (EINVAL);
845 
846 	/* Disable Gigabit Lite. */
847 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
848 	RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
849 
850 	val = rge_read_phy_ocp(sc, 0xa5d4);
851 	val &= ~RGE_ADV_2500TFDX;
852 
853 	anar = gig = 0;
854 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
855 	case IFM_AUTO:
856 		anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
857 		gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
858 		val |= RGE_ADV_2500TFDX;
859 		break;
860 	case IFM_2500_T:
861 		anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
862 		gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
863 		val |= RGE_ADV_2500TFDX;
864 		ifp->if_baudrate = IF_Mbps(2500);
865 		break;
866 	case IFM_1000_T:
867 		anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
868 		gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
869 		ifp->if_baudrate = IF_Gbps(1);
870 		break;
871 	case IFM_100_TX:
872 		anar |= ANAR_TX | ANAR_TX_FD;
873 		ifp->if_baudrate = IF_Mbps(100);
874 		break;
875 	case IFM_10_T:
876 		anar |= ANAR_10 | ANAR_10_FD;
877 		ifp->if_baudrate = IF_Mbps(10);
878 		break;
879 	default:
880 		printf("%s: unsupported media type\n", sc->sc_dev.dv_xname);
881 		return (EINVAL);
882 	}
883 
884 	rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
885 	rge_write_phy(sc, 0, MII_100T2CR, gig);
886 	rge_write_phy_ocp(sc, 0xa5d4, val);
887 	rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_STARTNEG);
888 
889 	return (0);
890 }
891 
892 /*
893  * Report current media status.
894  */
895 void
896 rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
897 {
898 	struct rge_softc *sc = ifp->if_softc;
899 	uint16_t status = 0;
900 
901 	ifmr->ifm_status = IFM_AVALID;
902 	ifmr->ifm_active = IFM_ETHER;
903 
904 	if (rge_get_link_status(sc)) {
905 		ifmr->ifm_status |= IFM_ACTIVE;
906 
907 		status = RGE_READ_2(sc, RGE_PHYSTAT);
908 		if ((status & RGE_PHYSTAT_FDX) ||
909 		    (status & RGE_PHYSTAT_2500MBPS))
910 			ifmr->ifm_active |= IFM_FDX;
911 		else
912 			ifmr->ifm_active |= IFM_HDX;
913 
914 		if (status & RGE_PHYSTAT_10MBPS)
915 			ifmr->ifm_active |= IFM_10_T;
916 		else if (status & RGE_PHYSTAT_100MBPS)
917 			ifmr->ifm_active |= IFM_100_TX;
918 		else if (status & RGE_PHYSTAT_1000MBPS)
919 			ifmr->ifm_active |= IFM_1000_T;
920 		else if (status & RGE_PHYSTAT_2500MBPS)
921 			ifmr->ifm_active |= IFM_2500_T;
922 	}
923 }
924 
925 /*
926  * Allocate memory for RX/TX rings.
927  */
928 int
929 rge_allocmem(struct rge_softc *sc)
930 {
931 	int error, i;
932 
933 	/* Allocate DMA'able memory for the TX ring. */
934 	error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
935 	    RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &sc->rge_ldata.rge_tx_list_map);
936 	if (error) {
937 		printf("%s: can't create TX list map\n", sc->sc_dev.dv_xname);
938 		return (error);
939 	}
940 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
941 	    &sc->rge_ldata.rge_tx_listseg, 1, &sc->rge_ldata.rge_tx_listnseg,
942 	    BUS_DMA_NOWAIT| BUS_DMA_ZERO);
943 	if (error) {
944 		printf("%s: can't alloc TX list\n", sc->sc_dev.dv_xname);
945 		return (error);
946 	}
947 
948 	/* Load the map for the TX ring. */
949 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
950 	    sc->rge_ldata.rge_tx_listnseg, RGE_TX_LIST_SZ,
951 	    (caddr_t *)&sc->rge_ldata.rge_tx_list,
952 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
953 	if (error) {
954 		printf("%s: can't map TX dma buffers\n", sc->sc_dev.dv_xname);
955 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
956 		    sc->rge_ldata.rge_tx_listnseg);
957 		return (error);
958 	}
959 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
960 	    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
961 	if (error) {
962 		printf("%s: can't load TX dma map\n", sc->sc_dev.dv_xname);
963 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
964 		bus_dmamem_unmap(sc->sc_dmat,
965 		    (caddr_t)sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ);
966 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
967 		    sc->rge_ldata.rge_tx_listnseg);
968 		return (error);
969 	}
970 
971 	/* Create DMA maps for TX buffers. */
972 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
973 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
974 		    RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, 0,
975 		    &sc->rge_ldata.rge_txq[i].txq_dmamap);
976 		if (error) {
977 			printf("%s: can't create DMA map for TX\n",
978 			    sc->sc_dev.dv_xname);
979 			return (error);
980 		}
981 	}
982 
983 	/* Allocate DMA'able memory for the RX ring. */
984 	error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
985 	    RGE_RX_LIST_SZ, 0, 0, &sc->rge_ldata.rge_rx_list_map);
986 	if (error) {
987 		printf("%s: can't create RX list map\n", sc->sc_dev.dv_xname);
988 		return (error);
989 	}
990 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
991 	    &sc->rge_ldata.rge_rx_listseg, 1, &sc->rge_ldata.rge_rx_listnseg,
992 	    BUS_DMA_NOWAIT| BUS_DMA_ZERO);
993 	if (error) {
994 		printf("%s: can't alloc RX list\n", sc->sc_dev.dv_xname);
995 		return (error);
996 	}
997 
998 	/* Load the map for the RX ring. */
999 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1000 	    sc->rge_ldata.rge_rx_listnseg, RGE_RX_LIST_SZ,
1001 	    (caddr_t *)&sc->rge_ldata.rge_rx_list,
1002 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1003 	if (error) {
1004 		printf("%s: can't map RX dma buffers\n", sc->sc_dev.dv_xname);
1005 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1006 		    sc->rge_ldata.rge_rx_listnseg);
1007 		return (error);
1008 	}
1009 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1010 	    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1011 	if (error) {
1012 		printf("%s: can't load RX dma map\n", sc->sc_dev.dv_xname);
1013 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
1014 		bus_dmamem_unmap(sc->sc_dmat,
1015 		    (caddr_t)sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ);
1016 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1017 		    sc->rge_ldata.rge_rx_listnseg);
1018 		return (error);
1019 	}
1020 
1021 	/* Create DMA maps for RX buffers. */
1022 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1023 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
1024 		    RGE_JUMBO_FRAMELEN, 0, 0,
1025 		    &sc->rge_ldata.rge_rxq[i].rxq_dmamap);
1026 		if (error) {
1027 			printf("%s: can't create DMA map for RX\n",
1028 			    sc->sc_dev.dv_xname);
1029 			return (error);
1030 		}
1031 	}
1032 
1033 	return (error);
1034 }
1035 
1036 /*
1037  * Initialize the RX descriptor and attach an mbuf cluster.
1038  */
1039 int
1040 rge_newbuf(struct rge_softc *sc)
1041 {
1042 	struct mbuf *m;
1043 	struct rge_rx_desc *r;
1044 	struct rge_rxq *rxq;
1045 	bus_dmamap_t rxmap;
1046 	int idx;
1047 
1048 	m = MCLGETI(NULL, M_DONTWAIT, NULL, sc->rge_rxbufsz);
1049 	if (m == NULL)
1050 		return (ENOBUFS);
1051 
1052 	m->m_data += (m->m_ext.ext_size - sc->rge_rxbufsz);
1053 	m->m_len = m->m_pkthdr.len = sc->rge_rxbufsz;
1054 
1055 	idx = sc->rge_ldata.rge_rxq_prodidx;
1056 	rxq = &sc->rge_ldata.rge_rxq[idx];
1057 	rxmap = rxq->rxq_dmamap;
1058 
1059 	if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT)) {
1060 		m_freem(m);
1061 		return (ENOBUFS);
1062 	}
1063 
1064 	bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1065 	    BUS_DMASYNC_PREREAD);
1066 
1067 	/* Map the segments into RX descriptors. */
1068 	r = &sc->rge_ldata.rge_rx_list[idx];
1069 
1070 	if (RGE_OWN(r)) {
1071 		printf("%s: tried to map busy RX descriptor\n",
1072 		    sc->sc_dev.dv_xname);
1073 		m_freem(m);
1074 		return (ENOBUFS);
1075 	}
1076 
1077 	rxq->rxq_mbuf = m;
1078 
1079 	r->rge_extsts = 0;
1080 	r->rge_addrlo = htole32(RGE_ADDR_LO(rxmap->dm_segs[0].ds_addr));
1081 	r->rge_addrhi = htole32(RGE_ADDR_HI(rxmap->dm_segs[0].ds_addr));
1082 
1083 	r->rge_cmdsts = htole32(rxmap->dm_segs[0].ds_len);
1084 	if (idx == RGE_RX_LIST_CNT - 1)
1085 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1086 
1087 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1088 
1089 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1090 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1091 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1092 
1093 	sc->rge_ldata.rge_rxq_prodidx = RGE_NEXT_RX_DESC(idx);
1094 
1095 	return (0);
1096 }
1097 
1098 void
1099 rge_discard_rxbuf(struct rge_softc *sc, int idx)
1100 {
1101 	struct rge_rx_desc *r;
1102 
1103 	r = &sc->rge_ldata.rge_rx_list[idx];
1104 
1105 	r->rge_cmdsts = htole32(RGE_JUMBO_FRAMELEN);
1106 	r->rge_extsts = 0;
1107 	if (idx == RGE_RX_LIST_CNT - 1)
1108 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1109 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1110 
1111 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1112 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1113 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1114 }
1115 
1116 void
1117 rge_rx_list_init(struct rge_softc *sc)
1118 {
1119 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
1120 
1121 	sc->rge_ldata.rge_rxq_prodidx = sc->rge_ldata.rge_rxq_considx = 0;
1122 	sc->rge_head = sc->rge_tail = NULL;
1123 
1124 	if_rxr_init(&sc->rge_ldata.rge_rx_ring, 2, RGE_RX_LIST_CNT - 1);
1125 	rge_fill_rx_ring(sc);
1126 }
1127 
1128 void
1129 rge_fill_rx_ring(struct rge_softc *sc)
1130 {
1131 	struct if_rxring *rxr = &sc->rge_ldata.rge_rx_ring;
1132 	int slots;
1133 
1134 	for (slots = if_rxr_get(rxr, RGE_RX_LIST_CNT); slots > 0; slots--) {
1135 		if (rge_newbuf(sc) == ENOBUFS)
1136 			break;
1137 	}
1138 	if_rxr_put(rxr, slots);
1139 }
1140 
1141 void
1142 rge_tx_list_init(struct rge_softc *sc)
1143 {
1144 	int i;
1145 
1146 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
1147 
1148 	for (i = 0; i < RGE_TX_LIST_CNT; i++)
1149 		sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
1150 
1151 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 0,
1152 	    sc->rge_ldata.rge_tx_list_map->dm_mapsize,
1153 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1154 
1155 	sc->rge_ldata.rge_txq_prodidx = sc->rge_ldata.rge_txq_considx = 0;
1156 }
1157 
1158 int
1159 rge_rxeof(struct rge_softc *sc)
1160 {
1161 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1162 	struct mbuf *m;
1163 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1164 	struct if_rxring *rxr = &sc->rge_ldata.rge_rx_ring;
1165 	struct rge_rx_desc *cur_rx;
1166 	struct rge_rxq *rxq;
1167 	uint32_t rxstat, extsts;
1168 	int i, total_len, rx = 0;
1169 
1170 	for (i = sc->rge_ldata.rge_rxq_considx; if_rxr_inuse(rxr) > 0;
1171 	    i = RGE_NEXT_RX_DESC(i)) {
1172 		/* Invalidate the descriptor memory. */
1173 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1174 		    i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1175 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1176 
1177 		cur_rx = &sc->rge_ldata.rge_rx_list[i];
1178 
1179 		if (RGE_OWN(cur_rx))
1180 			break;
1181 
1182 		rxstat = letoh32(cur_rx->rge_cmdsts);
1183 		extsts = letoh32(cur_rx->rge_extsts);
1184 
1185 		total_len = RGE_RXBYTES(cur_rx);
1186 		rxq = &sc->rge_ldata.rge_rxq[i];
1187 		m = rxq->rxq_mbuf;
1188 		rxq->rxq_mbuf = NULL;
1189 		if_rxr_put(rxr, 1);
1190 		rx = 1;
1191 
1192 		/* Invalidate the RX mbuf and unload its map. */
1193 		bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
1194 		    rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1195 		bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
1196 
1197 		if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
1198 		    (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
1199 			rge_discard_rxbuf(sc, i);
1200 			continue;
1201 		}
1202 
1203 		if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
1204 			ifp->if_ierrors++;
1205 			/*
1206 			 * If this is part of a multi-fragment packet,
1207 			 * discard all the pieces.
1208 			 */
1209 			 if (sc->rge_head != NULL) {
1210 				m_freem(sc->rge_head);
1211 				sc->rge_head = sc->rge_tail = NULL;
1212 			}
1213 			rge_discard_rxbuf(sc, i);
1214 			continue;
1215 		}
1216 
1217 		if (sc->rge_head != NULL) {
1218 			m->m_len = total_len;
1219 			/*
1220 			 * Special case: if there's 4 bytes or less
1221 			 * in this buffer, the mbuf can be discarded:
1222 			 * the last 4 bytes is the CRC, which we don't
1223 			 * care about anyway.
1224 			 */
1225 			if (m->m_len <= ETHER_CRC_LEN) {
1226 				sc->rge_tail->m_len -=
1227 				    (ETHER_CRC_LEN - m->m_len);
1228 				m_freem(m);
1229 			} else {
1230 				m->m_len -= ETHER_CRC_LEN;
1231 				m->m_flags &= ~M_PKTHDR;
1232 				sc->rge_tail->m_next = m;
1233 			}
1234 			m = sc->rge_head;
1235 			sc->rge_head = sc->rge_tail = NULL;
1236 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1237 		} else
1238 			m->m_pkthdr.len = m->m_len =
1239 			    (total_len - ETHER_CRC_LEN);
1240 
1241 		/* Check IP header checksum. */
1242 		if (!(rxstat & RGE_RDCMDSTS_IPCSUMERR) &&
1243 		    (extsts & RGE_RDEXTSTS_IPV4))
1244 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1245 
1246 		/* Check TCP/UDP checksum. */
1247 		if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
1248 		    (((rxstat & RGE_RDCMDSTS_TCPPKT) &&
1249 		    !(rxstat & RGE_RDCMDSTS_TCPCSUMERR)) ||
1250 		    ((rxstat & RGE_RDCMDSTS_UDPPKT) &&
1251 		    !(rxstat & RGE_RDCMDSTS_UDPCSUMERR))))
1252 			m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
1253 			    M_UDP_CSUM_IN_OK;
1254 
1255 #if NVLAN > 0
1256 		if (extsts & RGE_RDEXTSTS_VTAG) {
1257 			m->m_pkthdr.ether_vtag =
1258 			    ntohs(extsts & RGE_RDEXTSTS_VLAN_MASK);
1259 			m->m_flags |= M_VLANTAG;
1260 		}
1261 #endif
1262 
1263 		ml_enqueue(&ml, m);
1264 	}
1265 
1266 	sc->rge_ldata.rge_rxq_considx = i;
1267 	rge_fill_rx_ring(sc);
1268 
1269 	if_input(ifp, &ml);
1270 
1271 	return (rx);
1272 }
1273 
1274 int
1275 rge_txeof(struct rge_softc *sc)
1276 {
1277 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1278 	struct rge_txq *txq;
1279 	uint32_t txstat;
1280 	int cons, idx, prod;
1281 	int free = 0;
1282 
1283 	prod = sc->rge_ldata.rge_txq_prodidx;
1284 	cons = sc->rge_ldata.rge_txq_considx;
1285 
1286 	while (prod != cons) {
1287 		txq = &sc->rge_ldata.rge_txq[cons];
1288 		idx = txq->txq_descidx;
1289 
1290 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1291 		    idx * sizeof(struct rge_tx_desc),
1292 		    sizeof(struct rge_tx_desc),
1293 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1294 
1295 		txstat = letoh32(sc->rge_ldata.rge_tx_list[idx].rge_cmdsts);
1296 
1297 		if (txstat & RGE_TDCMDSTS_OWN) {
1298 			free = 2;
1299 			break;
1300 		}
1301 
1302 		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
1303 		    txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1304 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
1305 		m_freem(txq->txq_mbuf);
1306 		txq->txq_mbuf = NULL;
1307 
1308 		if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
1309 			ifp->if_collisions++;
1310 		if (txstat & RGE_TDCMDSTS_TXERR)
1311 			ifp->if_oerrors++;
1312 
1313 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1314 		    idx * sizeof(struct rge_tx_desc),
1315 		    sizeof(struct rge_tx_desc),
1316 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1317 
1318 		cons = RGE_NEXT_TX_DESC(idx);
1319 		free = 1;
1320 	}
1321 
1322 	if (free == 0)
1323 		return (0);
1324 
1325 	sc->rge_ldata.rge_txq_considx = cons;
1326 
1327 	if (ifq_is_oactive(&ifp->if_snd))
1328 		ifq_restart(&ifp->if_snd);
1329 	else if (free == 2)
1330 		ifq_serialize(&ifp->if_snd, &sc->sc_task);
1331 	else
1332 		ifp->if_timer = 0;
1333 
1334 	return (1);
1335 }
1336 
1337 void
1338 rge_reset(struct rge_softc *sc)
1339 {
1340 	int i;
1341 
1342 	/* Enable RXDV gate. */
1343 	RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
1344 	DELAY(2000);
1345 
1346 	for (i = 0; i < 3000; i++) {
1347 		DELAY(50);
1348 		if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
1349 		    RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
1350 		    RGE_MCUCMD_TXFIFO_EMPTY))
1351 			break;
1352 	}
1353 	if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5) {
1354 		for (i = 0; i < 3000; i++) {
1355 			DELAY(50);
1356 			if ((RGE_READ_2(sc, RGE_IM) & 0x0103) == 0x0103)
1357 				break;
1358 		}
1359 	}
1360 
1361 	DELAY(2000);
1362 
1363 	/* Soft reset. */
1364 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
1365 
1366 	for (i = 0; i < RGE_TIMEOUT; i++) {
1367 		DELAY(100);
1368 		if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
1369 			break;
1370 	}
1371 	if (i == RGE_TIMEOUT)
1372 		printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
1373 }
1374 
1375 void
1376 rge_iff(struct rge_softc *sc)
1377 {
1378 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1379 	struct arpcom *ac = &sc->sc_arpcom;
1380 	struct ether_multi *enm;
1381 	struct ether_multistep step;
1382 	uint32_t hashes[2];
1383 	uint32_t rxfilt;
1384 	int h = 0;
1385 
1386 	rxfilt = RGE_READ_4(sc, RGE_RXCFG);
1387 	rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
1388 	ifp->if_flags &= ~IFF_ALLMULTI;
1389 
1390 	/*
1391 	 * Always accept frames destined to our station address.
1392 	 * Always accept broadcast frames.
1393 	 */
1394 	rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
1395 
1396 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1397 		ifp->if_flags |= IFF_ALLMULTI;
1398 		rxfilt |= RGE_RXCFG_MULTI;
1399 		if (ifp->if_flags & IFF_PROMISC)
1400 			rxfilt |= RGE_RXCFG_ALLPHYS;
1401 		hashes[0] = hashes[1] = 0xffffffff;
1402 	} else {
1403 		rxfilt |= RGE_RXCFG_MULTI;
1404 		/* Program new filter. */
1405 		memset(hashes, 0, sizeof(hashes));
1406 
1407 		ETHER_FIRST_MULTI(step, ac, enm);
1408 		while (enm != NULL) {
1409 			h = ether_crc32_be(enm->enm_addrlo,
1410 			    ETHER_ADDR_LEN) >> 26;
1411 
1412 			if (h < 32)
1413 				hashes[0] |= (1 << h);
1414 			else
1415 				hashes[1] |= (1 << (h - 32));
1416 
1417 			ETHER_NEXT_MULTI(step, enm);
1418 		}
1419 	}
1420 
1421 	RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
1422 	RGE_WRITE_4(sc, RGE_MAR0, swap32(hashes[1]));
1423 	RGE_WRITE_4(sc, RGE_MAR4, swap32(hashes[0]));
1424 }
1425 
1426 void
1427 rge_set_phy_power(struct rge_softc *sc, int on)
1428 {
1429 	int i;
1430 
1431 	if (on) {
1432 		RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
1433 
1434 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
1435 
1436 		for (i = 0; i < RGE_TIMEOUT; i++) {
1437 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 3)
1438 				break;
1439 			DELAY(1000);
1440 		}
1441 	} else {
1442 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
1443 		RGE_CLRBIT_1(sc, RGE_PMCH, 0x80);
1444 		RGE_CLRBIT_1(sc, RGE_PPSW, 0x40);
1445 	}
1446 }
1447 
1448 void
1449 rge_phy_config(struct rge_softc *sc)
1450 {
1451 	/* Read microcode version. */
1452 	rge_write_phy_ocp(sc, 0xa436, 0x801e);
1453 	sc->rge_mcodever = rge_read_phy_ocp(sc, 0xa438);
1454 
1455 	switch (sc->rge_type) {
1456 	case MAC_CFG2:
1457 		rge_phy_config_mac_cfg2(sc);
1458 		break;
1459 	case MAC_CFG3:
1460 		rge_phy_config_mac_cfg3(sc);
1461 		break;
1462 	case MAC_CFG4:
1463 		rge_phy_config_mac_cfg4(sc);
1464 		break;
1465 	case MAC_CFG5:
1466 		rge_phy_config_mac_cfg5(sc);
1467 		break;
1468 	default:
1469 		break;	/* Can't happen. */
1470 	}
1471 
1472 	/* Disable EEE. */
1473 	RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
1474 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1475 		RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
1476 		RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
1477 	}
1478 	RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
1479 	RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
1480 	RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
1481 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
1482 	RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
1483 
1484 	rge_patch_phy_mcu(sc, 1);
1485 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
1486 	RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
1487 	RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
1488 	rge_patch_phy_mcu(sc, 0);
1489 }
1490 
1491 void
1492 rge_phy_config_mac_cfg2(struct rge_softc *sc)
1493 {
1494 	uint16_t val;
1495 	int i;
1496 
1497 	for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++)
1498 		rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
1499 		    rtl8125_mac_cfg2_ephy[i].val);
1500 
1501 	rge_phy_config_mcu(sc, RGE_MAC_CFG2_MCODE_VER);
1502 
1503 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
1504 	rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
1505 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1506 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1507 	rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
1508 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
1509 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1510 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
1511 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
1512 	rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
1513 	RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
1514 	RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
1515 	RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
1516 
1517 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1518 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1519 	rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
1520 	rge_write_phy_ocp(sc, 0xa436, 0x80eb);
1521 	val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
1522 	rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
1523 	rge_write_phy_ocp(sc, 0xa436, 0x80f8);
1524 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1525 	rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
1526 	rge_write_phy_ocp(sc, 0xa436, 0x80f1);
1527 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1528 	rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
1529 	rge_write_phy_ocp(sc, 0xa436, 0x80fe);
1530 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1531 	rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
1532 	rge_write_phy_ocp(sc, 0xa436, 0x8102);
1533 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1534 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1535 	rge_write_phy_ocp(sc, 0xa436, 0x8105);
1536 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1537 	rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
1538 	rge_write_phy_ocp(sc, 0xa436, 0x8100);
1539 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1540 	rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
1541 	rge_write_phy_ocp(sc, 0xa436, 0x8104);
1542 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1543 	rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
1544 	rge_write_phy_ocp(sc, 0xa436, 0x8106);
1545 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1546 	rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
1547 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1548 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1549 	rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
1550 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
1551 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1552 	rge_write_phy_ocp(sc, 0xa436, 0x80e1);
1553 	RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
1554 	val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
1555 	rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
1556 	rge_write_phy_ocp(sc, 0xa436, 0x819f);
1557 	rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
1558 	rge_write_phy_ocp(sc, 0xbc34, 0x5555);
1559 	val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
1560 	rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
1561 	RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
1562 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1563 }
1564 
1565 void
1566 rge_phy_config_mac_cfg3(struct rge_softc *sc)
1567 {
1568 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1569 	uint16_t val;
1570 	int i;
1571 	static const uint16_t mac_cfg3_a438_value[] =
1572 	    { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
1573 	      0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
1574 
1575 	static const uint16_t mac_cfg3_b88e_value[] =
1576 	    { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
1577 	      0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
1578 	      0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
1579 	      0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
1580 	      0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
1581 	      0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
1582 
1583 	for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
1584 		rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
1585 		    rtl8125_mac_cfg3_ephy[i].val);
1586 
1587 	val = rge_read_ephy(sc, 0x002a) & ~0x7000;
1588 	rge_write_ephy(sc, 0x002a, val | 0x3000);
1589 	RGE_EPHY_CLRBIT(sc, 0x0019, 0x0040);
1590 	RGE_EPHY_SETBIT(sc, 0x001b, 0x0e00);
1591 	RGE_EPHY_CLRBIT(sc, 0x001b, 0x7000);
1592 	rge_write_ephy(sc, 0x0002, 0x6042);
1593 	rge_write_ephy(sc, 0x0006, 0x0014);
1594 	val = rge_read_ephy(sc, 0x006a) & ~0x7000;
1595 	rge_write_ephy(sc, 0x006a, val | 0x3000);
1596 	RGE_EPHY_CLRBIT(sc, 0x0059, 0x0040);
1597 	RGE_EPHY_SETBIT(sc, 0x005b, 0x0e00);
1598 	RGE_EPHY_CLRBIT(sc, 0x005b, 0x7000);
1599 	rge_write_ephy(sc, 0x0042, 0x6042);
1600 	rge_write_ephy(sc, 0x0046, 0x0014);
1601 
1602 	rge_phy_config_mcu(sc, RGE_MAC_CFG3_MCODE_VER);
1603 
1604 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1605 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1606 	rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
1607 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
1608 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1609 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
1610 	RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
1611 	val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
1612 	rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
1613 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
1614 	rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
1615 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
1616 	rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
1617 	RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
1618 	RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
1619 	val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
1620 	rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
1621 	rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
1622 	rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
1623 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
1624 	rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
1625 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1626 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1627 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0500);
1628 	rge_write_phy_ocp(sc, 0xb87c, 0x8159);
1629 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1630 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
1631 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN + 32);
1632 	rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
1633 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1634 	rge_write_phy_ocp(sc, 0xb87c, 0x809c);
1635 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1636 
1637 	rge_write_phy_ocp(sc, 0xa436, 0x81b3);
1638 	for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
1639 		rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
1640 	for (i = 0; i < 26; i++)
1641 		rge_write_phy_ocp(sc, 0xa438, 0);
1642 	rge_write_phy_ocp(sc, 0xa436, 0x8257);
1643 	rge_write_phy_ocp(sc, 0xa438, 0x020f);
1644 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1645 	rge_write_phy_ocp(sc, 0xa438, 0x7843);
1646 
1647 	rge_patch_phy_mcu(sc, 1);
1648 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1649 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1650 	for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
1651 		rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
1652 		rge_write_phy_ocp(sc, 0xb890, mac_cfg3_b88e_value[i + 1]);
1653 	}
1654 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1655 	rge_patch_phy_mcu(sc, 0);
1656 
1657 	RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
1658 	rge_write_phy_ocp(sc, 0xa436, 0x81a2);
1659 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1660 	val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
1661 	rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
1662 	RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
1663 	RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
1664 	RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
1665 	RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
1666 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1667 }
1668 
1669 void
1670 rge_phy_config_mac_cfg4(struct rge_softc *sc)
1671 {
1672 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1673 	uint16_t val;
1674 	int i;
1675 	static const uint16_t mac_cfg4_b87c_value[] =
1676 	    { 0x8013, 0x0700, 0x8fb9, 0x2801, 0x8fba, 0x0100, 0x8fbc, 0x1900,
1677 	      0x8fbe, 0xe100, 0x8fc0, 0x0800, 0x8fc2, 0xe500, 0x8fc4, 0x0f00,
1678 	      0x8fc6, 0xf100, 0x8fc8, 0x0400, 0x8fca, 0xf300, 0x8fcc, 0xfd00,
1679 	      0x8fce, 0xff00, 0x8fd0, 0xfb00, 0x8fd2, 0x0100, 0x8fd4, 0xf400,
1680 	      0x8fd6, 0xff00, 0x8fd8, 0xf600, 0x813d, 0x390e, 0x814f, 0x790e,
1681 	      0x80b0, 0x0f31 };
1682 
1683 	for (i = 0; i < nitems(rtl8125_mac_cfg4_ephy); i++)
1684 		rge_write_ephy(sc, rtl8125_mac_cfg4_ephy[i].reg,
1685 		    rtl8125_mac_cfg4_ephy[i].val);
1686 
1687 	rge_write_phy_ocp(sc, 0xbf86, 0x9000);
1688 	RGE_PHY_SETBIT(sc, 0xc402, 0x0400);
1689 	RGE_PHY_CLRBIT(sc, 0xc402, 0x0400);
1690 	rge_write_phy_ocp(sc, 0xbd86, 0x1010);
1691 	rge_write_phy_ocp(sc, 0xbd88, 0x1010);
1692 	val = rge_read_phy_ocp(sc, 0xbd4e) & ~0x0c00;
1693 	rge_write_phy_ocp(sc, 0xbd4e, val | 0x0800);
1694 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1695 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0700);
1696 
1697 	rge_phy_config_mcu(sc, RGE_MAC_CFG4_MCODE_VER);
1698 
1699 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1700 	RGE_PHY_SETBIT(sc, 0xbc08, 0x000c);
1701 	rge_write_phy_ocp(sc, 0xa436, 0x8fff);
1702 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1703 	rge_write_phy_ocp(sc, 0xa438, val | 0x0400);
1704 	for (i = 0; i < 6; i++) {
1705 		rge_write_phy_ocp(sc, 0xb87c, 0x8560 + i * 2);
1706 		if (i < 3)
1707 			rge_write_phy_ocp(sc, 0xb87e, 0x19cc);
1708 		else
1709 			rge_write_phy_ocp(sc, 0xb87e, 0x147d);
1710 	}
1711 	rge_write_phy_ocp(sc, 0xb87c, 0x8ffe);
1712 	rge_write_phy_ocp(sc, 0xb87e, 0x0907);
1713 	val = rge_read_phy_ocp(sc, 0xacda) & ~0xff00;
1714 	rge_write_phy_ocp(sc, 0xacda, val | 0xff00);
1715 	val = rge_read_phy_ocp(sc, 0xacde) & ~0xf000;
1716 	rge_write_phy_ocp(sc, 0xacde, val | 0xf000);
1717 	rge_write_phy_ocp(sc, 0xb87c, 0x80d6);
1718 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1719 	rge_write_phy_ocp(sc, 0xb87c, 0x80F2);
1720 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1721 	rge_write_phy_ocp(sc, 0xb87c, 0x80f4);
1722 	rge_write_phy_ocp(sc, 0xb87e, 0x6077);
1723 	rge_write_phy_ocp(sc, 0xb506, 0x01e7);
1724 	rge_write_phy_ocp(sc, 0xac8c, 0x0ffc);
1725 	rge_write_phy_ocp(sc, 0xac46, 0xb7b4);
1726 	rge_write_phy_ocp(sc, 0xac50, 0x0fbc);
1727 	rge_write_phy_ocp(sc, 0xac3c, 0x9240);
1728 	rge_write_phy_ocp(sc, 0xac4E, 0x0db4);
1729 	rge_write_phy_ocp(sc, 0xacc6, 0x0707);
1730 	rge_write_phy_ocp(sc, 0xacc8, 0xa0d3);
1731 	rge_write_phy_ocp(sc, 0xad08, 0x0007);
1732 	for (i = 0; i < nitems(mac_cfg4_b87c_value); i += 2) {
1733 		rge_write_phy_ocp(sc, 0xb87c, mac_cfg4_b87c_value[i]);
1734 		rge_write_phy_ocp(sc, 0xb87e, mac_cfg4_b87c_value[i + 1]);
1735 	}
1736 	RGE_PHY_SETBIT(sc, 0xbf4c, 0x0002);
1737 	RGE_PHY_SETBIT(sc, 0xbcca, 0x0300);
1738 	rge_write_phy_ocp(sc, 0xb87c, 0x8141);
1739 	rge_write_phy_ocp(sc, 0xb87e, 0x320e);
1740 	rge_write_phy_ocp(sc, 0xb87c, 0x8153);
1741 	rge_write_phy_ocp(sc, 0xb87e, 0x720e);
1742 	RGE_PHY_CLRBIT(sc, 0xa432, 0x0040);
1743 	rge_write_phy_ocp(sc, 0xb87c, 0x8529);
1744 	rge_write_phy_ocp(sc, 0xb87e, 0x050e);
1745 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN + 32);
1746 	rge_write_phy_ocp(sc, 0xa436, 0x816c);
1747 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1748 	rge_write_phy_ocp(sc, 0xa436, 0x8170);
1749 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1750 	rge_write_phy_ocp(sc, 0xa436, 0x8174);
1751 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1752 	rge_write_phy_ocp(sc, 0xa436, 0x8178);
1753 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1754 	rge_write_phy_ocp(sc, 0xa436, 0x817c);
1755 	rge_write_phy_ocp(sc, 0xa438, 0x0719);
1756 	rge_write_phy_ocp(sc, 0xa436, 0x8ff4);
1757 	rge_write_phy_ocp(sc, 0xa438, 0x0400);
1758 	rge_write_phy_ocp(sc, 0xa436, 0x8ff1);
1759 	rge_write_phy_ocp(sc, 0xa438, 0x0404);
1760 	rge_write_phy_ocp(sc, 0xbf4a, 0x001b);
1761 	for (i = 0; i < 6; i++) {
1762 		rge_write_phy_ocp(sc, 0xb87c, 0x8033 + i * 4);
1763 		if (i == 2)
1764 			rge_write_phy_ocp(sc, 0xb87e, 0xfc32);
1765 		else
1766 			rge_write_phy_ocp(sc, 0xb87e, 0x7c13);
1767 	}
1768 	rge_write_phy_ocp(sc, 0xb87c, 0x8145);
1769 	rge_write_phy_ocp(sc, 0xb87e, 0x370e);
1770 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1771 	rge_write_phy_ocp(sc, 0xb87e, 0x770e);
1772 	rge_write_phy_ocp(sc, 0xb87c, 0x8169);
1773 	rge_write_phy_ocp(sc, 0xb87e, 0x0d0a);
1774 	rge_write_phy_ocp(sc, 0xb87c, 0x817b);
1775 	rge_write_phy_ocp(sc, 0xb87e, 0x1d0a);
1776 	rge_write_phy_ocp(sc, 0xa436, 0x8217);
1777 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1778 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1779 	rge_write_phy_ocp(sc, 0xa436, 0x821a);
1780 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1781 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1782 	rge_write_phy_ocp(sc, 0xa436, 0x80da);
1783 	rge_write_phy_ocp(sc, 0xa438, 0x0403);
1784 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1785 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1786 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1787 	rge_write_phy_ocp(sc, 0xa436, 0x80b3);
1788 	rge_write_phy_ocp(sc, 0xa438, 0x0384);
1789 	rge_write_phy_ocp(sc, 0xa436, 0x80b7);
1790 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
1791 	rge_write_phy_ocp(sc, 0xa436, 0x80ba);
1792 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1793 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1794 	rge_write_phy_ocp(sc, 0xa436, 0x80b5);
1795 	rge_write_phy_ocp(sc, 0xa438, 0xf009);
1796 	rge_write_phy_ocp(sc, 0xa436, 0x80bd);
1797 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1798 	rge_write_phy_ocp(sc, 0xa438, val | 0x9f00);
1799 	rge_write_phy_ocp(sc, 0xa436, 0x80c7);
1800 	rge_write_phy_ocp(sc, 0xa438, 0xf083);
1801 	rge_write_phy_ocp(sc, 0xa436, 0x80dd);
1802 	rge_write_phy_ocp(sc, 0xa438, 0x03f0);
1803 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
1804 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1805 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1806 	rge_write_phy_ocp(sc, 0xa436, 0x80cb);
1807 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
1808 	rge_write_phy_ocp(sc, 0xa436, 0x80ce);
1809 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1810 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1811 	rge_write_phy_ocp(sc, 0xa436, 0x80c9);
1812 	rge_write_phy_ocp(sc, 0xa438, 0x8009);
1813 	rge_write_phy_ocp(sc, 0xa436, 0x80d1);
1814 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1815 	rge_write_phy_ocp(sc, 0xa438, val | 0x8000);
1816 	rge_write_phy_ocp(sc, 0xa436, 0x80a3);
1817 	rge_write_phy_ocp(sc, 0xa438, 0x200a);
1818 	rge_write_phy_ocp(sc, 0xa436, 0x80a5);
1819 	rge_write_phy_ocp(sc, 0xa438, 0xf0ad);
1820 	rge_write_phy_ocp(sc, 0xa436, 0x809f);
1821 	rge_write_phy_ocp(sc, 0xa438, 0x6073);
1822 	rge_write_phy_ocp(sc, 0xa436, 0x80a1);
1823 	rge_write_phy_ocp(sc, 0xa438, 0x000b);
1824 	rge_write_phy_ocp(sc, 0xa436, 0x80a9);
1825 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1826 	rge_write_phy_ocp(sc, 0xa438, val | 0xc000);
1827 	rge_patch_phy_mcu(sc, 1);
1828 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1829 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1830 	rge_write_phy_ocp(sc, 0xb88e, 0xc23e);
1831 	rge_write_phy_ocp(sc, 0xb890, 0x0000);
1832 	rge_write_phy_ocp(sc, 0xb88e, 0xc240);
1833 	rge_write_phy_ocp(sc, 0xb890, 0x0103);
1834 	rge_write_phy_ocp(sc, 0xb88e, 0xc242);
1835 	rge_write_phy_ocp(sc, 0xb890, 0x0507);
1836 	rge_write_phy_ocp(sc, 0xb88e, 0xc244);
1837 	rge_write_phy_ocp(sc, 0xb890, 0x090b);
1838 	rge_write_phy_ocp(sc, 0xb88e, 0xc246);
1839 	rge_write_phy_ocp(sc, 0xb890, 0x0c0e);
1840 	rge_write_phy_ocp(sc, 0xb88e, 0xc248);
1841 	rge_write_phy_ocp(sc, 0xb890, 0x1012);
1842 	rge_write_phy_ocp(sc, 0xb88e, 0xc24a);
1843 	rge_write_phy_ocp(sc, 0xb890, 0x1416);
1844 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1845 	rge_patch_phy_mcu(sc, 0);
1846 	RGE_PHY_SETBIT(sc, 0xa86a, 0x0001);
1847 	RGE_PHY_SETBIT(sc, 0xa6f0, 0x0001);
1848 	rge_write_phy_ocp(sc, 0xbfa0, 0xd70d);
1849 	rge_write_phy_ocp(sc, 0xbfa2, 0x4100);
1850 	rge_write_phy_ocp(sc, 0xbfa4, 0xe868);
1851 	rge_write_phy_ocp(sc, 0xbfa6, 0xdc59);
1852 	rge_write_phy_ocp(sc, 0xb54c, 0x3c18);
1853 	RGE_PHY_CLRBIT(sc, 0xbfa4, 0x0020);
1854 	rge_write_phy_ocp(sc, 0xa436, 0x817d);
1855 	RGE_PHY_SETBIT(sc, 0xa438, 0x1000);
1856 }
1857 
1858 void
1859 rge_phy_config_mac_cfg5(struct rge_softc *sc)
1860 {
1861 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1862 	uint16_t val;
1863 	int i;
1864 
1865 	for (i = 0; i < nitems(rtl8125_mac_cfg5_ephy); i++)
1866 		rge_write_ephy(sc, rtl8125_mac_cfg5_ephy[i].reg,
1867 		    rtl8125_mac_cfg5_ephy[i].val);
1868 
1869 	val = rge_read_ephy(sc, 0x0022) & ~0x0030;
1870 	rge_write_ephy(sc, 0x0022, val | 0x0020);
1871 	val = rge_read_ephy(sc, 0x0062) & ~0x0030;
1872 	rge_write_ephy(sc, 0x0062, val | 0x0020);
1873 
1874 	rge_write_phy_ocp(sc, 0xbf86, 0x9000);
1875 	RGE_PHY_SETBIT(sc, 0xc402, 0x0400);
1876 	RGE_PHY_CLRBIT(sc, 0xc402, 0x0400);
1877 	rge_write_phy_ocp(sc, 0xbd86, 0x1010);
1878 	rge_write_phy_ocp(sc, 0xbd88, 0x1010);
1879 	val = rge_read_phy_ocp(sc, 0xbd4e) & ~0x0c00;
1880 	rge_write_phy_ocp(sc, 0xbd4e, val | 0x0800);
1881 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1882 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0700);
1883 
1884 	rge_phy_config_mcu(sc, RGE_MAC_CFG5_MCODE_VER);
1885 
1886 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1887 	val = rge_read_phy_ocp(sc, 0xac46) & ~0x00f0;
1888 	rge_write_phy_ocp(sc, 0xac46, val | 0x0090);
1889 	val = rge_read_phy_ocp(sc, 0xad30) & ~0x0003;
1890 	rge_write_phy_ocp(sc, 0xad30, val | 0x0001);
1891 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN + 32);
1892 	rge_write_phy_ocp(sc, 0xb87c, 0x80f5);
1893 	rge_write_phy_ocp(sc, 0xb87e, 0x760e);
1894 	rge_write_phy_ocp(sc, 0xb87c, 0x8107);
1895 	rge_write_phy_ocp(sc, 0xb87e, 0x360e);
1896 	rge_write_phy_ocp(sc, 0xb87c, 0x8551);
1897 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1898 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
1899 	val = rge_read_phy_ocp(sc, 0xbf00) & ~0xe000;
1900 	rge_write_phy_ocp(sc, 0xbf00, val | 0xa000);
1901 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1902 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0300);
1903 	for (i = 0; i < 10; i++) {
1904 		rge_write_phy_ocp(sc, 0xa436, 0x8044 + i * 6);
1905 		rge_write_phy_ocp(sc, 0xa438, 0x2417);
1906 	}
1907 	RGE_PHY_SETBIT(sc, 0xa4ca, 0x0040);
1908 }
1909 
1910 void
1911 rge_phy_config_mcu(struct rge_softc *sc, uint16_t mcode_version)
1912 {
1913 	if (sc->rge_mcodever != mcode_version) {
1914 		int i;
1915 
1916 		rge_patch_phy_mcu(sc, 1);
1917 
1918 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1919 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
1920 			if (sc->rge_type == MAC_CFG2)
1921 				rge_write_phy_ocp(sc, 0xa438, 0x8600);
1922 			else
1923 				rge_write_phy_ocp(sc, 0xa438, 0x8601);
1924 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
1925 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
1926 
1927 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
1928 		}
1929 
1930 		if (sc->rge_type == MAC_CFG2) {
1931 			for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
1932 				rge_write_phy_ocp(sc,
1933 				    rtl8125_mac_cfg2_mcu[i].reg,
1934 				    rtl8125_mac_cfg2_mcu[i].val);
1935 			}
1936 		} else if (sc->rge_type == MAC_CFG3) {
1937 			for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
1938 				rge_write_phy_ocp(sc,
1939 				    rtl8125_mac_cfg3_mcu[i].reg,
1940 				    rtl8125_mac_cfg3_mcu[i].val);
1941 			}
1942 		} else if (sc->rge_type == MAC_CFG4) {
1943 			for (i = 0; i < nitems(rtl8125_mac_cfg4_mcu); i++) {
1944 				rge_write_phy_ocp(sc,
1945 				    rtl8125_mac_cfg4_mcu[i].reg,
1946 				    rtl8125_mac_cfg4_mcu[i].val);
1947 			}
1948 		} else if (sc->rge_type == MAC_CFG5) {
1949 			for (i = 0; i < nitems(rtl8125_mac_cfg5_mcu); i++) {
1950 				rge_write_phy_ocp(sc,
1951 				    rtl8125_mac_cfg5_mcu[i].reg,
1952 				    rtl8125_mac_cfg5_mcu[i].val);
1953 			}
1954 		}
1955 
1956 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1957 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
1958 
1959 			rge_write_phy_ocp(sc, 0xa436, 0);
1960 			rge_write_phy_ocp(sc, 0xa438, 0);
1961 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
1962 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
1963 			rge_write_phy_ocp(sc, 0xa438, 0);
1964 		}
1965 
1966 		rge_patch_phy_mcu(sc, 0);
1967 
1968 		/* Write microcode version. */
1969 		rge_write_phy_ocp(sc, 0xa436, 0x801e);
1970 		rge_write_phy_ocp(sc, 0xa438, mcode_version);
1971 	}
1972 }
1973 
1974 void
1975 rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
1976 {
1977 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
1978 	RGE_WRITE_4(sc, RGE_MAC0,
1979 	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1980 	RGE_WRITE_4(sc, RGE_MAC4,
1981 	    addr[5] <<  8 | addr[4]);
1982 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
1983 }
1984 
1985 void
1986 rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
1987 {
1988 	*(uint32_t *)&addr[0] = RGE_READ_4(sc, RGE_ADDR0);
1989 	*(uint16_t *)&addr[4] = RGE_READ_2(sc, RGE_ADDR1);
1990 }
1991 
1992 void
1993 rge_hw_init(struct rge_softc *sc)
1994 {
1995 	int i;
1996 
1997 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
1998 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
1999 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
2000 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2001 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
2002 
2003 	/* Disable UPS. */
2004 	RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
2005 
2006 	/* Configure MAC MCU. */
2007 	rge_write_mac_ocp(sc, 0xfc38, 0);
2008 
2009 	for (i = 0xfc28; i < 0xfc38; i += 2)
2010 		rge_write_mac_ocp(sc, i, 0);
2011 
2012 	DELAY(3000);
2013 	rge_write_mac_ocp(sc, 0xfc26, 0);
2014 
2015 	if (sc->rge_type == MAC_CFG3) {
2016 		for (i = 0; i < nitems(rtl8125_mac_bps); i++) {
2017 			rge_write_mac_ocp(sc, rtl8125_mac_bps[i].reg,
2018 			    rtl8125_mac_bps[i].val);
2019 		}
2020 	} else if (sc->rge_type == MAC_CFG5) {
2021 		for (i = 0; i < nitems(rtl8125b_mac_bps); i++) {
2022 			rge_write_mac_ocp(sc, rtl8125b_mac_bps[i].reg,
2023 			    rtl8125b_mac_bps[i].val);
2024 		}
2025 	}
2026 
2027 	/* Disable PHY power saving. */
2028 	rge_disable_phy_ocp_pwrsave(sc);
2029 
2030 	/* Set PCIe uncorrectable error status. */
2031 	rge_write_csi(sc, 0x108,
2032 	    rge_read_csi(sc, 0x108) | 0x00100000);
2033 }
2034 
2035 void
2036 rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
2037 {
2038 	if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
2039 		rge_patch_phy_mcu(sc, 1);
2040 		rge_write_phy_ocp(sc, 0xc416, 0);
2041 		rge_write_phy_ocp(sc, 0xc416, 0x0500);
2042 		rge_patch_phy_mcu(sc, 0);
2043 	}
2044 }
2045 
2046 void
2047 rge_patch_phy_mcu(struct rge_softc *sc, int set)
2048 {
2049 	int i;
2050 
2051 	if (set)
2052 		RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
2053 	else
2054 		RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
2055 
2056 	for (i = 0; i < 1000; i++) {
2057 		if ((rge_read_phy_ocp(sc, 0xb800) & 0x0040) == 0x0040)
2058 			break;
2059 		DELAY(100);
2060 	}
2061 	if (i == 1000) {
2062 		DPRINTF(("timeout waiting to patch phy mcu\n"));
2063 		return;
2064 	}
2065 }
2066 
2067 void
2068 rge_add_media_types(struct rge_softc *sc)
2069 {
2070 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
2071 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
2072 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
2073 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
2074 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
2075 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2076 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
2077 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
2078 }
2079 
2080 void
2081 rge_config_imtype(struct rge_softc *sc, int imtype)
2082 {
2083 	switch (imtype) {
2084 	case RGE_IMTYPE_NONE:
2085 		sc->rge_intrs = RGE_INTRS;
2086 		sc->rge_rx_ack = RGE_ISR_RX_OK | RGE_ISR_RX_DESC_UNAVAIL |
2087 		    RGE_ISR_RX_FIFO_OFLOW;
2088 		sc->rge_tx_ack = RGE_ISR_TX_OK;
2089 		break;
2090 	case RGE_IMTYPE_SIM:
2091 		sc->rge_intrs = RGE_INTRS_TIMER;
2092 		sc->rge_rx_ack = RGE_ISR_PCS_TIMEOUT;
2093 		sc->rge_tx_ack = RGE_ISR_PCS_TIMEOUT;
2094 		break;
2095 	default:
2096 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
2097 	}
2098 }
2099 
2100 void
2101 rge_disable_hw_im(struct rge_softc *sc)
2102 {
2103 	RGE_WRITE_2(sc, RGE_IM, 0);
2104 }
2105 
2106 void
2107 rge_disable_sim_im(struct rge_softc *sc)
2108 {
2109 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
2110 	sc->rge_timerintr = 0;
2111 }
2112 
2113 void
2114 rge_setup_sim_im(struct rge_softc *sc)
2115 {
2116 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0x2600);
2117 	RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
2118 	sc->rge_timerintr = 1;
2119 }
2120 
2121 void
2122 rge_setup_intr(struct rge_softc *sc, int imtype)
2123 {
2124 	rge_config_imtype(sc, imtype);
2125 
2126 	/* Enable interrupts. */
2127 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
2128 
2129 	switch (imtype) {
2130 	case RGE_IMTYPE_NONE:
2131 		rge_disable_sim_im(sc);
2132 		rge_disable_hw_im(sc);
2133 		break;
2134 	case RGE_IMTYPE_SIM:
2135 		rge_disable_hw_im(sc);
2136 		rge_setup_sim_im(sc);
2137 		break;
2138 	default:
2139 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
2140 	}
2141 }
2142 
2143 void
2144 rge_exit_oob(struct rge_softc *sc)
2145 {
2146 	int i;
2147 
2148 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
2149 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
2150 	    RGE_RXCFG_ERRPKT);
2151 
2152 	/* Disable RealWoW. */
2153 	rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
2154 
2155 	rge_reset(sc);
2156 
2157 	/* Disable OOB. */
2158 	RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
2159 
2160 	RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
2161 
2162 	for (i = 0; i < 10; i++) {
2163 		DELAY(100);
2164 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2165 			break;
2166 	}
2167 
2168 	rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
2169 	rge_write_mac_ocp(sc, 0xc0a6, 0x01b5);
2170 	rge_write_mac_ocp(sc, 0xc01e, 0x5555);
2171 
2172 	for (i = 0; i < 10; i++) {
2173 		DELAY(100);
2174 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2175 			break;
2176 	}
2177 
2178 	if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
2179 		printf("%s: rge_exit_oob(): rtl8125_is_ups_resume!!\n",
2180 		    sc->sc_dev.dv_xname);
2181 		for (i = 0; i < RGE_TIMEOUT; i++) {
2182 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 2)
2183 				break;
2184 			DELAY(1000);
2185 		}
2186 		RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
2187 		if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5)
2188 			RGE_PHY_CLRBIT(sc, 0xa466, 0x0001);
2189 		RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
2190 	}
2191 }
2192 
2193 void
2194 rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
2195 {
2196 	int i;
2197 
2198 	RGE_WRITE_4(sc, RGE_CSIDR, val);
2199 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2200 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
2201 
2202 	for (i = 0; i < 10; i++) {
2203 		 DELAY(100);
2204 		 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
2205 			break;
2206 	}
2207 
2208 	DELAY(20);
2209 }
2210 
2211 uint32_t
2212 rge_read_csi(struct rge_softc *sc, uint32_t reg)
2213 {
2214 	int i;
2215 
2216 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2217 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
2218 
2219 	for (i = 0; i < 10; i++) {
2220 		 DELAY(100);
2221 		 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
2222 			break;
2223 	}
2224 
2225 	DELAY(20);
2226 
2227 	return (RGE_READ_4(sc, RGE_CSIDR));
2228 }
2229 
2230 void
2231 rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2232 {
2233 	uint32_t tmp;
2234 
2235 	tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2236 	tmp += val;
2237 	tmp |= RGE_MACOCP_BUSY;
2238 	RGE_WRITE_4(sc, RGE_MACOCP, tmp);
2239 }
2240 
2241 uint16_t
2242 rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
2243 {
2244 	uint32_t val;
2245 
2246 	val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2247 	RGE_WRITE_4(sc, RGE_MACOCP, val);
2248 
2249 	return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
2250 }
2251 
2252 void
2253 rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
2254 {
2255 	uint32_t tmp;
2256 	int i;
2257 
2258 	tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2259 	tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
2260 	RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
2261 
2262 	for (i = 0; i < 10; i++) {
2263 		DELAY(100);
2264 		if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
2265 			break;
2266 	}
2267 
2268 	DELAY(20);
2269 }
2270 
2271 uint16_t
2272 rge_read_ephy(struct rge_softc *sc, uint16_t reg)
2273 {
2274 	uint32_t val;
2275 	int i;
2276 
2277 	val = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2278 	RGE_WRITE_4(sc, RGE_EPHYAR, val);
2279 
2280 	for (i = 0; i < 10; i++) {
2281 		DELAY(100);
2282 		val = RGE_READ_4(sc, RGE_EPHYAR);
2283 		if (val & RGE_EPHYAR_BUSY)
2284 			break;
2285 	}
2286 
2287 	DELAY(20);
2288 
2289 	return (val & RGE_EPHYAR_DATA_MASK);
2290 }
2291 
2292 void
2293 rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
2294 {
2295 	uint16_t off, phyaddr;
2296 
2297 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2298 	phyaddr <<= 4;
2299 
2300 	off = addr ? reg : 0x10 + (reg % 8);
2301 
2302 	phyaddr += (off - 16) << 1;
2303 
2304 	rge_write_phy_ocp(sc, phyaddr, val);
2305 }
2306 
2307 void
2308 rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2309 {
2310 	uint32_t tmp;
2311 	int i;
2312 
2313 	tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2314 	tmp |= RGE_PHYOCP_BUSY | val;
2315 	//printf("%s: data32 = %x\n", sc->sc_dev.dv_xname, tmp);
2316 	RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
2317 
2318 	for (i = 0; i < RGE_TIMEOUT; i++) {
2319 		DELAY(1);
2320 		if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
2321 			break;
2322 	}
2323 }
2324 
2325 uint16_t
2326 rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
2327 {
2328 	uint32_t val;
2329 	int i;
2330 
2331 	val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2332 	RGE_WRITE_4(sc, RGE_PHYOCP, val);
2333 
2334 	for (i = 0; i < RGE_TIMEOUT; i++) {
2335 		DELAY(1);
2336 		val = RGE_READ_4(sc, RGE_PHYOCP);
2337 		if (val & RGE_PHYOCP_BUSY)
2338 			break;
2339 	}
2340 
2341 	return (val & RGE_PHYOCP_DATA_MASK);
2342 }
2343 
2344 int
2345 rge_get_link_status(struct rge_softc *sc)
2346 {
2347 	return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
2348 }
2349 
2350 void
2351 rge_txstart(void *arg)
2352 {
2353 	struct rge_softc *sc = arg;
2354 
2355 	RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
2356 }
2357 
2358 void
2359 rge_tick(void *arg)
2360 {
2361 	struct rge_softc *sc = arg;
2362 	int s;
2363 
2364 	s = splnet();
2365 	rge_link_state(sc);
2366 	splx(s);
2367 
2368 	timeout_add_sec(&sc->sc_timeout, 1);
2369 }
2370 
2371 void
2372 rge_link_state(struct rge_softc *sc)
2373 {
2374 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2375 	int link = LINK_STATE_DOWN;
2376 
2377 	if (rge_get_link_status(sc))
2378 		link = LINK_STATE_UP;
2379 
2380 	if (ifp->if_link_state != link) {
2381 		ifp->if_link_state = link;
2382 		if_link_state_change(ifp);
2383 	}
2384 }
2385