xref: /openbsd/sys/dev/pci/if_rge.c (revision d415bd75)
1 /*	$OpenBSD: if_rge.c,v 1.22 2023/11/10 15:51:20 bluhm Exp $	*/
2 
3 /*
4  * Copyright (c) 2019, 2020 Kevin Lo <kevlo@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bpfilter.h"
20 #include "vlan.h"
21 #include "kstat.h"
22 
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/sockio.h>
26 #include <sys/mbuf.h>
27 #include <sys/malloc.h>
28 #include <sys/kernel.h>
29 #include <sys/socket.h>
30 #include <sys/device.h>
31 #include <sys/endian.h>
32 
33 #include <net/if.h>
34 #include <net/if_media.h>
35 
36 #include <netinet/in.h>
37 #include <netinet/if_ether.h>
38 
39 #if NBPFILTER > 0
40 #include <net/bpf.h>
41 #endif
42 
43 #if NKSTAT > 0
44 #include <sys/kstat.h>
45 #endif
46 
47 #include <machine/bus.h>
48 #include <machine/intr.h>
49 
50 #include <dev/mii/mii.h>
51 
52 #include <dev/pci/pcivar.h>
53 #include <dev/pci/pcireg.h>
54 #include <dev/pci/pcidevs.h>
55 
56 #include <dev/pci/if_rgereg.h>
57 
58 #ifdef RGE_DEBUG
59 #define DPRINTF(x)	do { if (rge_debug > 0) printf x; } while (0)
60 int rge_debug = 0;
61 #else
62 #define DPRINTF(x)
63 #endif
64 
65 int		rge_match(struct device *, void *, void *);
66 void		rge_attach(struct device *, struct device *, void *);
67 int		rge_activate(struct device *, int);
68 int		rge_intr(void *);
69 int		rge_encap(struct rge_queues *, struct mbuf *, int);
70 int		rge_ioctl(struct ifnet *, u_long, caddr_t);
71 void		rge_start(struct ifqueue *);
72 void		rge_watchdog(struct ifnet *);
73 int		rge_init(struct ifnet *);
74 void		rge_stop(struct ifnet *);
75 int		rge_ifmedia_upd(struct ifnet *);
76 void		rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
77 int		rge_allocmem(struct rge_softc *);
78 int		rge_newbuf(struct rge_queues *);
79 void		rge_discard_rxbuf(struct rge_queues *, int);
80 void		rge_rx_list_init(struct rge_queues *);
81 void		rge_tx_list_init(struct rge_queues *);
82 void		rge_fill_rx_ring(struct rge_queues *);
83 int		rge_rxeof(struct rge_queues *);
84 int		rge_txeof(struct rge_queues *);
85 void		rge_reset(struct rge_softc *);
86 void		rge_iff(struct rge_softc *);
87 void		rge_set_phy_power(struct rge_softc *, int);
88 void		rge_phy_config(struct rge_softc *);
89 void		rge_phy_config_mac_cfg2(struct rge_softc *);
90 void		rge_phy_config_mac_cfg3(struct rge_softc *);
91 void		rge_phy_config_mac_cfg4(struct rge_softc *);
92 void		rge_phy_config_mac_cfg5(struct rge_softc *);
93 void		rge_phy_config_mcu(struct rge_softc *, uint16_t);
94 void		rge_set_macaddr(struct rge_softc *, const uint8_t *);
95 void		rge_get_macaddr(struct rge_softc *, uint8_t *);
96 void		rge_hw_init(struct rge_softc *);
97 void		rge_disable_phy_ocp_pwrsave(struct rge_softc *);
98 void		rge_patch_phy_mcu(struct rge_softc *, int);
99 void		rge_add_media_types(struct rge_softc *);
100 void		rge_config_imtype(struct rge_softc *, int);
101 void		rge_disable_hw_im(struct rge_softc *);
102 void		rge_disable_sim_im(struct rge_softc *);
103 void		rge_setup_sim_im(struct rge_softc *);
104 void		rge_setup_intr(struct rge_softc *, int);
105 void		rge_exit_oob(struct rge_softc *);
106 void		rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
107 uint32_t	rge_read_csi(struct rge_softc *, uint32_t);
108 void		rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
109 uint16_t	rge_read_mac_ocp(struct rge_softc *, uint16_t);
110 void		rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
111 uint16_t	rge_read_ephy(struct rge_softc *, uint16_t);
112 void		rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
113 uint16_t	rge_read_phy(struct rge_softc *, uint16_t, uint16_t);
114 void		rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
115 uint16_t	rge_read_phy_ocp(struct rge_softc *, uint16_t);
116 int		rge_get_link_status(struct rge_softc *);
117 void		rge_txstart(void *);
118 void		rge_tick(void *);
119 void		rge_link_state(struct rge_softc *);
120 #ifndef SMALL_KERNEL
121 int		rge_wol(struct ifnet *, int);
122 void		rge_wol_power(struct rge_softc *);
123 #endif
124 
125 #if NKSTAT > 0
126 void		rge_kstat_attach(struct rge_softc *);
127 #endif
128 
129 static const struct {
130 	uint16_t reg;
131 	uint16_t val;
132 }  rtl8125_mac_cfg2_mcu[] = {
133 	RTL8125_MAC_CFG2_MCU
134 }, rtl8125_mac_cfg3_mcu[] = {
135 	RTL8125_MAC_CFG3_MCU
136 }, rtl8125_mac_cfg4_mcu[] = {
137 	RTL8125_MAC_CFG4_MCU
138 }, rtl8125_mac_cfg5_mcu[] = {
139 	RTL8125_MAC_CFG5_MCU
140 };
141 
142 const struct cfattach rge_ca = {
143 	sizeof(struct rge_softc), rge_match, rge_attach, NULL, rge_activate
144 };
145 
146 struct cfdriver rge_cd = {
147 	NULL, "rge", DV_IFNET
148 };
149 
150 const struct pci_matchid rge_devices[] = {
151 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000 },
152 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8125 }
153 };
154 
155 int
156 rge_match(struct device *parent, void *match, void *aux)
157 {
158 	return (pci_matchbyid((struct pci_attach_args *)aux, rge_devices,
159 	    nitems(rge_devices)));
160 }
161 
162 void
163 rge_attach(struct device *parent, struct device *self, void *aux)
164 {
165 	struct rge_softc *sc = (struct rge_softc *)self;
166 	struct pci_attach_args *pa = aux;
167 	pci_chipset_tag_t pc = pa->pa_pc;
168 	pci_intr_handle_t ih;
169 	const char *intrstr = NULL;
170 	struct ifnet *ifp;
171 	struct rge_queues *q;
172 	pcireg_t reg;
173 	uint32_t hwrev;
174 	uint8_t eaddr[ETHER_ADDR_LEN];
175 	int offset;
176 
177 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
178 
179 	/*
180 	 * Map control/status registers.
181 	 */
182 	if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
183 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
184 	    NULL, &sc->rge_bsize, 0)) {
185 		if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
186 		    PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
187 		    &sc->rge_bhandle, NULL, &sc->rge_bsize, 0)) {
188 			if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
189 			    0, &sc->rge_btag, &sc->rge_bhandle, NULL,
190 			    &sc->rge_bsize, 0)) {
191 				printf(": can't map mem or i/o space\n");
192 				return;
193 			}
194 		}
195 	}
196 
197 	q = malloc(sizeof(struct rge_queues), M_DEVBUF, M_NOWAIT | M_ZERO);
198 	if (q == NULL) {
199 		printf(": unable to allocate queue memory\n");
200 		return;
201 	}
202 	q->q_sc = sc;
203 	q->q_index = 0;
204 
205 	sc->sc_queues = q;
206 	sc->sc_nqueues = 1;
207 
208 	/*
209 	 * Allocate interrupt.
210 	 */
211 	if (pci_intr_map_msi(pa, &ih) == 0)
212 		sc->rge_flags |= RGE_FLAG_MSI;
213 	else if (pci_intr_map(pa, &ih) != 0) {
214 		printf(": couldn't map interrupt\n");
215 		return;
216 	}
217 	intrstr = pci_intr_string(pc, ih);
218 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET | IPL_MPSAFE, rge_intr,
219 	    sc, sc->sc_dev.dv_xname);
220 	if (sc->sc_ih == NULL) {
221 		printf(": couldn't establish interrupt");
222 		if (intrstr != NULL)
223 			printf(" at %s", intrstr);
224 		printf("\n");
225 		return;
226 	}
227 	printf(": %s", intrstr);
228 
229 	sc->sc_dmat = pa->pa_dmat;
230 	sc->sc_pc = pa->pa_pc;
231 	sc->sc_tag = pa->pa_tag;
232 
233 	/* Determine hardware revision */
234 	hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
235 	switch (hwrev) {
236 	case 0x60800000:
237 		sc->rge_type = MAC_CFG2;
238 		break;
239 	case 0x60900000:
240 		sc->rge_type = MAC_CFG3;
241 		break;
242 	case 0x64000000:
243 		sc->rge_type = MAC_CFG4;
244 		break;
245 	case 0x64100000:
246 		sc->rge_type = MAC_CFG5;
247 		break;
248 	default:
249 		printf(": unknown version 0x%08x\n", hwrev);
250 		return;
251 	}
252 
253 	rge_config_imtype(sc, RGE_IMTYPE_SIM);
254 
255 	/*
256 	 * PCI Express check.
257 	 */
258 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
259 	    &offset, NULL)) {
260 		/* Disable PCIe ASPM and ECPM. */
261 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
262 		    offset + PCI_PCIE_LCSR);
263 		reg &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1 |
264 		    PCI_PCIE_LCSR_ECPM);
265 		pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCI_PCIE_LCSR,
266 		    reg);
267 	}
268 
269 	rge_exit_oob(sc);
270 	rge_hw_init(sc);
271 
272 	rge_get_macaddr(sc, eaddr);
273 	printf(", address %s\n", ether_sprintf(eaddr));
274 
275 	memcpy(sc->sc_arpcom.ac_enaddr, eaddr, ETHER_ADDR_LEN);
276 
277 	rge_set_phy_power(sc, 1);
278 	rge_phy_config(sc);
279 
280 	if (rge_allocmem(sc))
281 		return;
282 
283 	ifp = &sc->sc_arpcom.ac_if;
284 	ifp->if_softc = sc;
285 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
286 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
287 	ifp->if_xflags = IFXF_MPSAFE;
288 	ifp->if_ioctl = rge_ioctl;
289 	ifp->if_qstart = rge_start;
290 	ifp->if_watchdog = rge_watchdog;
291 	ifq_init_maxlen(&ifp->if_snd, RGE_TX_LIST_CNT - 1);
292 	ifp->if_hardmtu = RGE_JUMBO_MTU;
293 
294 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
295 	    IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
296 
297 #if NVLAN > 0
298 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
299 #endif
300 
301 #ifndef SMALL_KERNEL
302 	ifp->if_capabilities |= IFCAP_WOL;
303 	ifp->if_wol = rge_wol;
304 	rge_wol(ifp, 0);
305 #endif
306 	timeout_set(&sc->sc_timeout, rge_tick, sc);
307 	task_set(&sc->sc_task, rge_txstart, sc);
308 
309 	/* Initialize ifmedia structures. */
310 	ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
311 	    rge_ifmedia_sts);
312 	rge_add_media_types(sc);
313 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
314 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
315 	sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
316 
317 	if_attach(ifp);
318 	ether_ifattach(ifp);
319 
320 #if NKSTAT > 0
321 	rge_kstat_attach(sc);
322 #endif
323 }
324 
325 int
326 rge_activate(struct device *self, int act)
327 {
328 #ifndef SMALL_KERNEL
329 	struct rge_softc *sc = (struct rge_softc *)self;
330 #endif
331 	int rv = 0;
332 
333 	switch (act) {
334 	case DVACT_POWERDOWN:
335 		rv = config_activate_children(self, act);
336 #ifndef SMALL_KERNEL
337 		rge_wol_power(sc);
338 #endif
339 		break;
340 	default:
341 		rv = config_activate_children(self, act);
342 		break;
343 	}
344 	return (rv);
345 }
346 
347 int
348 rge_intr(void *arg)
349 {
350 	struct rge_softc *sc = arg;
351 	struct rge_queues *q = sc->sc_queues;
352 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
353 	uint32_t status;
354 	int claimed = 0, rv;
355 
356 	if (!(ifp->if_flags & IFF_RUNNING))
357 		return (0);
358 
359 	/* Disable interrupts. */
360 	RGE_WRITE_4(sc, RGE_IMR, 0);
361 
362 	if (!(sc->rge_flags & RGE_FLAG_MSI)) {
363 		if ((RGE_READ_4(sc, RGE_ISR) & sc->rge_intrs) == 0)
364 			return (0);
365 	}
366 
367 	status = RGE_READ_4(sc, RGE_ISR);
368 	if (status)
369 		RGE_WRITE_4(sc, RGE_ISR, status);
370 
371 	if (status & RGE_ISR_PCS_TIMEOUT)
372 		claimed = 1;
373 
374 	rv = 0;
375 	if (status & sc->rge_intrs) {
376 		rv |= rge_rxeof(q);
377 		rv |= rge_txeof(q);
378 
379 		if (status & RGE_ISR_SYSTEM_ERR) {
380 			KERNEL_LOCK();
381 			rge_init(ifp);
382 			KERNEL_UNLOCK();
383 		}
384 		claimed = 1;
385 	}
386 
387 	if (sc->rge_timerintr) {
388 		if (!rv) {
389 			/*
390 			 * Nothing needs to be processed, fallback
391 			 * to use TX/RX interrupts.
392 			 */
393 			rge_setup_intr(sc, RGE_IMTYPE_NONE);
394 
395 			/*
396 			 * Recollect, mainly to avoid the possible
397 			 * race introduced by changing interrupt
398 			 * masks.
399 			 */
400 			rge_rxeof(q);
401 			rge_txeof(q);
402 		} else
403 			RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
404 	} else if (rv) {
405 		/*
406 		 * Assume that using simulated interrupt moderation
407 		 * (hardware timer based) could reduce the interrupt
408 		 * rate.
409 		 */
410 		rge_setup_intr(sc, RGE_IMTYPE_SIM);
411 	}
412 
413 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
414 
415 	return (claimed);
416 }
417 
418 int
419 rge_encap(struct rge_queues *q, struct mbuf *m, int idx)
420 {
421 	struct rge_softc *sc = q->q_sc;
422 	struct rge_tx_desc *d = NULL;
423 	struct rge_txq *txq;
424 	bus_dmamap_t txmap;
425 	uint32_t cmdsts, cflags = 0;
426 	int cur, error, i, last, nsegs;
427 
428 	/*
429 	 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
430 	 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
431 	 * take affect.
432 	 */
433 	if ((m->m_pkthdr.csum_flags &
434 	    (M_IPV4_CSUM_OUT | M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) != 0) {
435 		cflags |= RGE_TDEXTSTS_IPCSUM;
436 		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
437 			cflags |= RGE_TDEXTSTS_TCPCSUM;
438 		if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
439 			cflags |= RGE_TDEXTSTS_UDPCSUM;
440 	}
441 
442 	txq = &q->q_tx.rge_txq[idx];
443 	txmap = txq->txq_dmamap;
444 
445 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
446 	switch (error) {
447 	case 0:
448 		break;
449 	case EFBIG: /* mbuf chain is too fragmented */
450 		if (m_defrag(m, M_DONTWAIT) == 0 &&
451 		    bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
452 		    BUS_DMA_NOWAIT) == 0)
453 			break;
454 
455 		/* FALLTHROUGH */
456 	default:
457 		return (0);
458 	}
459 
460 	bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
461 	    BUS_DMASYNC_PREWRITE);
462 
463 	nsegs = txmap->dm_nsegs;
464 
465 	/* Set up hardware VLAN tagging. */
466 #if NVLAN > 0
467 	if (m->m_flags & M_VLANTAG)
468 		cflags |= swap16(m->m_pkthdr.ether_vtag) | RGE_TDEXTSTS_VTAG;
469 #endif
470 
471 	cur = idx;
472 	cmdsts = RGE_TDCMDSTS_SOF;
473 
474 	for (i = 0; i < txmap->dm_nsegs; i++) {
475 		d = &q->q_tx.rge_tx_list[cur];
476 
477 		d->rge_extsts = htole32(cflags);
478 		d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
479 		d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
480 
481 		cmdsts |= txmap->dm_segs[i].ds_len;
482 
483 		if (cur == RGE_TX_LIST_CNT - 1)
484 			cmdsts |= RGE_TDCMDSTS_EOR;
485 
486 		d->rge_cmdsts = htole32(cmdsts);
487 
488 		last = cur;
489 		cmdsts = RGE_TDCMDSTS_OWN;
490 		cur = RGE_NEXT_TX_DESC(cur);
491 	}
492 
493 	/* Set EOF on the last descriptor. */
494 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
495 
496 	/* Transfer ownership of packet to the chip. */
497 	d = &q->q_tx.rge_tx_list[idx];
498 
499 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
500 
501 	bus_dmamap_sync(sc->sc_dmat, q->q_tx.rge_tx_list_map,
502 	    cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
503 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
504 
505 	/* Update info of TX queue and descriptors. */
506 	txq->txq_mbuf = m;
507 	txq->txq_descidx = last;
508 
509 	return (nsegs);
510 }
511 
512 int
513 rge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
514 {
515 	struct rge_softc *sc = ifp->if_softc;
516 	struct ifreq *ifr = (struct ifreq *)data;
517 	int s, error = 0;
518 
519 	s = splnet();
520 
521 	switch (cmd) {
522 	case SIOCSIFADDR:
523 		ifp->if_flags |= IFF_UP;
524 		if (!(ifp->if_flags & IFF_RUNNING))
525 			rge_init(ifp);
526 		break;
527 	case SIOCSIFFLAGS:
528 		if (ifp->if_flags & IFF_UP) {
529 			if (ifp->if_flags & IFF_RUNNING)
530 				error = ENETRESET;
531 			else
532 				rge_init(ifp);
533 		} else {
534 			if (ifp->if_flags & IFF_RUNNING)
535 				rge_stop(ifp);
536 		}
537 		break;
538 	case SIOCGIFMEDIA:
539 	case SIOCSIFMEDIA:
540 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
541 		break;
542 	case SIOCGIFRXR:
543 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
544 		    NULL, RGE_JUMBO_FRAMELEN, &sc->sc_queues->q_rx.rge_rx_ring);
545 		break;
546 	default:
547 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
548 	}
549 
550 	if (error == ENETRESET) {
551 		if (ifp->if_flags & IFF_RUNNING)
552 			rge_iff(sc);
553 		error = 0;
554 	}
555 
556 	splx(s);
557 	return (error);
558 }
559 
560 void
561 rge_start(struct ifqueue *ifq)
562 {
563 	struct ifnet *ifp = ifq->ifq_if;
564 	struct rge_softc *sc = ifp->if_softc;
565 	struct rge_queues *q = sc->sc_queues;
566 	struct mbuf *m;
567 	int free, idx, used;
568 	int queued = 0;
569 
570 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
571 		ifq_purge(ifq);
572 		return;
573 	}
574 
575 	/* Calculate free space. */
576 	idx = q->q_tx.rge_txq_prodidx;
577 	free = q->q_tx.rge_txq_considx;
578 	if (free <= idx)
579 		free += RGE_TX_LIST_CNT;
580 	free -= idx;
581 
582 	for (;;) {
583 		if (RGE_TX_NSEGS >= free + 2) {
584 			ifq_set_oactive(&ifp->if_snd);
585 			break;
586 		}
587 
588 		m = ifq_dequeue(ifq);
589 		if (m == NULL)
590 			break;
591 
592 		used = rge_encap(q, m, idx);
593 		if (used == 0) {
594 			m_freem(m);
595 			continue;
596 		}
597 
598 		KASSERT(used <= free);
599 		free -= used;
600 
601 #if NBPFILTER > 0
602 		if (ifp->if_bpf)
603 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
604 #endif
605 
606 		idx += used;
607 		if (idx >= RGE_TX_LIST_CNT)
608 			idx -= RGE_TX_LIST_CNT;
609 
610 		queued++;
611 	}
612 
613 	if (queued == 0)
614 		return;
615 
616 	/* Set a timeout in case the chip goes out to lunch. */
617 	ifp->if_timer = 5;
618 
619 	q->q_tx.rge_txq_prodidx = idx;
620 	ifq_serialize(ifq, &sc->sc_task);
621 }
622 
623 void
624 rge_watchdog(struct ifnet *ifp)
625 {
626 	struct rge_softc *sc = ifp->if_softc;
627 
628 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
629 	ifp->if_oerrors++;
630 
631 	rge_init(ifp);
632 }
633 
634 int
635 rge_init(struct ifnet *ifp)
636 {
637 	struct rge_softc *sc = ifp->if_softc;
638 	struct rge_queues *q = sc->sc_queues;
639 	uint32_t val;
640 	int i;
641 
642 	rge_stop(ifp);
643 
644 	/* Set MAC address. */
645 	rge_set_macaddr(sc, sc->sc_arpcom.ac_enaddr);
646 
647 	/* Set Maximum frame size. */
648 	RGE_WRITE_2(sc, RGE_RXMAXSIZE, RGE_JUMBO_FRAMELEN);
649 
650 	/* Initialize RX and TX descriptors lists. */
651 	rge_rx_list_init(q);
652 	rge_tx_list_init(q);
653 
654 	/* Load the addresses of the RX and TX lists into the chip. */
655 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
656 	    RGE_ADDR_LO(q->q_rx.rge_rx_list_map->dm_segs[0].ds_addr));
657 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
658 	    RGE_ADDR_HI(q->q_rx.rge_rx_list_map->dm_segs[0].ds_addr));
659 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
660 	    RGE_ADDR_LO(q->q_tx.rge_tx_list_map->dm_segs[0].ds_addr));
661 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
662 	    RGE_ADDR_HI(q->q_tx.rge_tx_list_map->dm_segs[0].ds_addr));
663 
664 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
665 
666 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
667 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
668 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
669 	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
670 
671 	/* Clear interrupt moderation timer. */
672 	for (i = 0; i < 64; i++)
673 		RGE_WRITE_4(sc, RGE_INTMITI(i), 0);
674 
675 	/* Set the initial RX and TX configurations. */
676 	RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
677 	RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
678 
679 	val = rge_read_csi(sc, 0x70c) & ~0xff000000;
680 	rge_write_csi(sc, 0x70c, val | 0x27000000);
681 
682 	/* Enable hardware optimization function. */
683 	val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
684 	pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
685 
686 	RGE_WRITE_2(sc, 0x0382, 0x221b);
687 
688 	RGE_WRITE_1(sc, RGE_RSS_CTRL, 0);
689 
690 	val = RGE_READ_2(sc, RGE_RXQUEUE_CTRL) & ~0x001c;
691 	RGE_WRITE_2(sc, RGE_RXQUEUE_CTRL, val | (fls(sc->sc_nqueues) - 1) << 2);
692 
693 	RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
694 
695 	rge_write_mac_ocp(sc, 0xc140, 0xffff);
696 	rge_write_mac_ocp(sc, 0xc142, 0xffff);
697 
698 	RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
699 
700 	val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
701 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
702 		rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
703 	else
704 		rge_write_mac_ocp(sc, 0xe614, val | 0x0200);
705 
706 	val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0c00;
707 	rge_write_mac_ocp(sc, 0xe63e, val |
708 	    ((fls(sc->sc_nqueues) - 1) & 0x03) << 10);
709 
710 	RGE_MAC_CLRBIT(sc, 0xe63e, 0x0030);
711 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
712 		RGE_MAC_SETBIT(sc, 0xe63e, 0x0020);
713 
714 	RGE_MAC_CLRBIT(sc, 0xc0b4, 0x0001);
715 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x0001);
716 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
717 
718 	val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x00ff;
719 	rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
720 
721 	val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
722 	rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
723 
724 	val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
725 	rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
726 
727 	RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
728 
729 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_TX_10M_PS_EN);
730 
731 	RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
732 
733 	val = rge_read_mac_ocp(sc, 0xea1c) & ~0x0003;
734 	rge_write_mac_ocp(sc, 0xea1c, val | 0x0001);
735 
736 	val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
737 	rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
738 
739 	RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
740 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
741 
742 	val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
743 	rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
744 
745 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_PFM_EN | RGE_DLLPR_TX_10M_PS_EN);
746 
747 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
748 		RGE_SETBIT_1(sc, RGE_MCUCMD, 0x01);
749 
750 	/* Disable EEE plus. */
751 	RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
752 
753 	RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
754 
755 	RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
756 	DELAY(1);
757 	RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
758 
759 	RGE_CLRBIT_4(sc, 0x1880, 0x0030);
760 
761 	rge_write_mac_ocp(sc, 0xe098, 0xc302);
762 
763 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
764 		RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
765 
766 	RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
767 
768 	for (i = 0; i < 10; i++) {
769 		if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
770 			break;
771 		DELAY(1000);
772 	}
773 
774 	/* Disable RXDV gate. */
775 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
776 	DELAY(2000);
777 
778 	rge_ifmedia_upd(ifp);
779 
780 	/* Enable transmit and receive. */
781 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
782 
783 	/* Program promiscuous mode and multicast filters. */
784 	rge_iff(sc);
785 
786 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
787 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
788 
789 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
790 
791 	/* Enable interrupts. */
792 	rge_setup_intr(sc, RGE_IMTYPE_SIM);
793 
794 	ifp->if_flags |= IFF_RUNNING;
795 	ifq_clr_oactive(&ifp->if_snd);
796 
797 	timeout_add_sec(&sc->sc_timeout, 1);
798 
799 	return (0);
800 }
801 
802 /*
803  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
804  */
805 void
806 rge_stop(struct ifnet *ifp)
807 {
808 	struct rge_softc *sc = ifp->if_softc;
809 	struct rge_queues *q = sc->sc_queues;
810 	int i;
811 
812 	timeout_del(&sc->sc_timeout);
813 
814 	ifp->if_timer = 0;
815 	ifp->if_flags &= ~IFF_RUNNING;
816 	sc->rge_timerintr = 0;
817 
818 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
819 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
820 	    RGE_RXCFG_ERRPKT);
821 
822 	RGE_WRITE_4(sc, RGE_IMR, 0);
823 	RGE_WRITE_4(sc, RGE_ISR, 0);
824 
825 	/* Clear timer interrupts. */
826 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
827 	RGE_WRITE_4(sc, RGE_TIMERINT1, 0);
828 	RGE_WRITE_4(sc, RGE_TIMERINT2, 0);
829 	RGE_WRITE_4(sc, RGE_TIMERINT3, 0);
830 
831 	rge_reset(sc);
832 
833 	intr_barrier(sc->sc_ih);
834 	ifq_barrier(&ifp->if_snd);
835 	ifq_clr_oactive(&ifp->if_snd);
836 
837 	if (q->q_rx.rge_head != NULL) {
838 		m_freem(q->q_rx.rge_head);
839 		q->q_rx.rge_head = q->q_rx.rge_tail = NULL;
840 	}
841 
842 	/* Free the TX list buffers. */
843 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
844 		if (q->q_tx.rge_txq[i].txq_mbuf != NULL) {
845 			bus_dmamap_unload(sc->sc_dmat,
846 			    q->q_tx.rge_txq[i].txq_dmamap);
847 			m_freem(q->q_tx.rge_txq[i].txq_mbuf);
848 			q->q_tx.rge_txq[i].txq_mbuf = NULL;
849 		}
850 	}
851 
852 	/* Free the RX list buffers. */
853 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
854 		if (q->q_rx.rge_rxq[i].rxq_mbuf != NULL) {
855 			bus_dmamap_unload(sc->sc_dmat,
856 			    q->q_rx.rge_rxq[i].rxq_dmamap);
857 			m_freem(q->q_rx.rge_rxq[i].rxq_mbuf);
858 			q->q_rx.rge_rxq[i].rxq_mbuf = NULL;
859 		}
860 	}
861 }
862 
863 /*
864  * Set media options.
865  */
866 int
867 rge_ifmedia_upd(struct ifnet *ifp)
868 {
869 	struct rge_softc *sc = ifp->if_softc;
870 	struct ifmedia *ifm = &sc->sc_media;
871 	int anar, gig, val;
872 
873 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
874 		return (EINVAL);
875 
876 	/* Disable Gigabit Lite. */
877 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
878 	RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
879 
880 	val = rge_read_phy_ocp(sc, 0xa5d4);
881 	val &= ~RGE_ADV_2500TFDX;
882 
883 	anar = gig = 0;
884 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
885 	case IFM_AUTO:
886 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
887 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
888 		val |= RGE_ADV_2500TFDX;
889 		break;
890 	case IFM_2500_T:
891 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
892 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
893 		val |= RGE_ADV_2500TFDX;
894 		ifp->if_baudrate = IF_Mbps(2500);
895 		break;
896 	case IFM_1000_T:
897 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
898 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
899 		ifp->if_baudrate = IF_Gbps(1);
900 		break;
901 	case IFM_100_TX:
902 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
903 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
904 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
905 		    ANAR_TX | ANAR_TX_FD | ANAR_10_FD | ANAR_10 :
906 		    ANAR_TX | ANAR_10_FD | ANAR_10;
907 		ifp->if_baudrate = IF_Mbps(100);
908 		break;
909 	case IFM_10_T:
910 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
911 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
912 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
913 		    ANAR_10_FD | ANAR_10 : ANAR_10;
914 		ifp->if_baudrate = IF_Mbps(10);
915 		break;
916 	default:
917 		printf("%s: unsupported media type\n", sc->sc_dev.dv_xname);
918 		return (EINVAL);
919 	}
920 
921 	rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
922 	rge_write_phy(sc, 0, MII_100T2CR, gig);
923 	rge_write_phy_ocp(sc, 0xa5d4, val);
924 	rge_write_phy(sc, 0, MII_BMCR, BMCR_RESET | BMCR_AUTOEN |
925 	    BMCR_STARTNEG);
926 
927 	return (0);
928 }
929 
930 /*
931  * Report current media status.
932  */
933 void
934 rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
935 {
936 	struct rge_softc *sc = ifp->if_softc;
937 	uint16_t status = 0;
938 
939 	ifmr->ifm_status = IFM_AVALID;
940 	ifmr->ifm_active = IFM_ETHER;
941 
942 	if (rge_get_link_status(sc)) {
943 		ifmr->ifm_status |= IFM_ACTIVE;
944 
945 		status = RGE_READ_2(sc, RGE_PHYSTAT);
946 		if ((status & RGE_PHYSTAT_FDX) ||
947 		    (status & RGE_PHYSTAT_2500MBPS))
948 			ifmr->ifm_active |= IFM_FDX;
949 		else
950 			ifmr->ifm_active |= IFM_HDX;
951 
952 		if (status & RGE_PHYSTAT_10MBPS)
953 			ifmr->ifm_active |= IFM_10_T;
954 		else if (status & RGE_PHYSTAT_100MBPS)
955 			ifmr->ifm_active |= IFM_100_TX;
956 		else if (status & RGE_PHYSTAT_1000MBPS)
957 			ifmr->ifm_active |= IFM_1000_T;
958 		else if (status & RGE_PHYSTAT_2500MBPS)
959 			ifmr->ifm_active |= IFM_2500_T;
960 	}
961 }
962 
963 /*
964  * Allocate memory for RX/TX rings.
965  */
966 int
967 rge_allocmem(struct rge_softc *sc)
968 {
969 	struct rge_queues *q = sc->sc_queues;
970 	int error, i;
971 
972 	/* Allocate DMA'able memory for the TX ring. */
973 	error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
974 	    RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &q->q_tx.rge_tx_list_map);
975 	if (error) {
976 		printf("%s: can't create TX list map\n", sc->sc_dev.dv_xname);
977 		return (error);
978 	}
979 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
980 	    &q->q_tx.rge_tx_listseg, 1, &q->q_tx.rge_tx_listnseg,
981 	    BUS_DMA_NOWAIT| BUS_DMA_ZERO);
982 	if (error) {
983 		printf("%s: can't alloc TX list\n", sc->sc_dev.dv_xname);
984 		return (error);
985 	}
986 
987 	/* Load the map for the TX ring. */
988 	error = bus_dmamem_map(sc->sc_dmat, &q->q_tx.rge_tx_listseg,
989 	    q->q_tx.rge_tx_listnseg, RGE_TX_LIST_SZ,
990 	    (caddr_t *)&q->q_tx.rge_tx_list, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
991 	if (error) {
992 		printf("%s: can't map TX dma buffers\n", sc->sc_dev.dv_xname);
993 		bus_dmamem_free(sc->sc_dmat, &q->q_tx.rge_tx_listseg,
994 		    q->q_tx.rge_tx_listnseg);
995 		return (error);
996 	}
997 	error = bus_dmamap_load(sc->sc_dmat, q->q_tx.rge_tx_list_map,
998 	    q->q_tx.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
999 	if (error) {
1000 		printf("%s: can't load TX dma map\n", sc->sc_dev.dv_xname);
1001 		bus_dmamap_destroy(sc->sc_dmat, q->q_tx.rge_tx_list_map);
1002 		bus_dmamem_unmap(sc->sc_dmat,
1003 		    (caddr_t)q->q_tx.rge_tx_list, RGE_TX_LIST_SZ);
1004 		bus_dmamem_free(sc->sc_dmat, &q->q_tx.rge_tx_listseg,
1005 		    q->q_tx.rge_tx_listnseg);
1006 		return (error);
1007 	}
1008 
1009 	/* Create DMA maps for TX buffers. */
1010 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
1011 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
1012 		    RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, BUS_DMA_NOWAIT,
1013 		    &q->q_tx.rge_txq[i].txq_dmamap);
1014 		if (error) {
1015 			printf("%s: can't create DMA map for TX\n",
1016 			    sc->sc_dev.dv_xname);
1017 			return (error);
1018 		}
1019 	}
1020 
1021 	/* Allocate DMA'able memory for the RX ring. */
1022 	error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
1023 	    RGE_RX_LIST_SZ, 0, BUS_DMA_NOWAIT, &q->q_rx.rge_rx_list_map);
1024 	if (error) {
1025 		printf("%s: can't create RX list map\n", sc->sc_dev.dv_xname);
1026 		return (error);
1027 	}
1028 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
1029 	    &q->q_rx.rge_rx_listseg, 1, &q->q_rx.rge_rx_listnseg,
1030 	    BUS_DMA_NOWAIT| BUS_DMA_ZERO);
1031 	if (error) {
1032 		printf("%s: can't alloc RX list\n", sc->sc_dev.dv_xname);
1033 		return (error);
1034 	}
1035 
1036 	/* Load the map for the RX ring. */
1037 	error = bus_dmamem_map(sc->sc_dmat, &q->q_rx.rge_rx_listseg,
1038 	    q->q_rx.rge_rx_listnseg, RGE_RX_LIST_SZ,
1039 	    (caddr_t *)&q->q_rx.rge_rx_list, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1040 	if (error) {
1041 		printf("%s: can't map RX dma buffers\n", sc->sc_dev.dv_xname);
1042 		bus_dmamem_free(sc->sc_dmat, &q->q_rx.rge_rx_listseg,
1043 		    q->q_rx.rge_rx_listnseg);
1044 		return (error);
1045 	}
1046 	error = bus_dmamap_load(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1047 	    q->q_rx.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1048 	if (error) {
1049 		printf("%s: can't load RX dma map\n", sc->sc_dev.dv_xname);
1050 		bus_dmamap_destroy(sc->sc_dmat, q->q_rx.rge_rx_list_map);
1051 		bus_dmamem_unmap(sc->sc_dmat,
1052 		    (caddr_t)q->q_rx.rge_rx_list, RGE_RX_LIST_SZ);
1053 		bus_dmamem_free(sc->sc_dmat, &q->q_rx.rge_rx_listseg,
1054 		    q->q_rx.rge_rx_listnseg);
1055 		return (error);
1056 	}
1057 
1058 	/* Create DMA maps for RX buffers. */
1059 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1060 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
1061 		    RGE_JUMBO_FRAMELEN, 0, BUS_DMA_NOWAIT,
1062 		    &q->q_rx.rge_rxq[i].rxq_dmamap);
1063 		if (error) {
1064 			printf("%s: can't create DMA map for RX\n",
1065 			    sc->sc_dev.dv_xname);
1066 			return (error);
1067 		}
1068 	}
1069 
1070 	return (error);
1071 }
1072 
1073 /*
1074  * Initialize the RX descriptor and attach an mbuf cluster.
1075  */
1076 int
1077 rge_newbuf(struct rge_queues *q)
1078 {
1079 	struct rge_softc *sc = q->q_sc;
1080 	struct mbuf *m;
1081 	struct rge_rx_desc *r;
1082 	struct rge_rxq *rxq;
1083 	bus_dmamap_t rxmap;
1084 	int idx;
1085 
1086 	m = MCLGETL(NULL, M_DONTWAIT, RGE_JUMBO_FRAMELEN);
1087 	if (m == NULL)
1088 		return (ENOBUFS);
1089 
1090 	m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
1091 
1092 	idx = q->q_rx.rge_rxq_prodidx;
1093 	rxq = &q->q_rx.rge_rxq[idx];
1094 	rxmap = rxq->rxq_dmamap;
1095 
1096 	if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT)) {
1097 		m_freem(m);
1098 		return (ENOBUFS);
1099 	}
1100 
1101 	bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1102 	    BUS_DMASYNC_PREREAD);
1103 
1104 	/* Map the segments into RX descriptors. */
1105 	r = &q->q_rx.rge_rx_list[idx];
1106 
1107 	rxq->rxq_mbuf = m;
1108 
1109 	r->hi_qword1.rx_qword4.rge_extsts = 0;
1110 	r->hi_qword0.rge_addr = htole64(rxmap->dm_segs[0].ds_addr);
1111 
1112 	r->hi_qword1.rx_qword4.rge_cmdsts = htole32(rxmap->dm_segs[0].ds_len);
1113 	if (idx == RGE_RX_LIST_CNT - 1)
1114 		r->hi_qword1.rx_qword4.rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1115 
1116 	r->hi_qword1.rx_qword4.rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1117 
1118 	bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1119 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1120 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1121 
1122 	q->q_rx.rge_rxq_prodidx = RGE_NEXT_RX_DESC(idx);
1123 
1124 	return (0);
1125 }
1126 
1127 void
1128 rge_discard_rxbuf(struct rge_queues *q, int idx)
1129 {
1130 	struct rge_softc *sc = q->q_sc;
1131 	struct rge_rx_desc *r;
1132 
1133 	r = &q->q_rx.rge_rx_list[idx];
1134 
1135 	r->hi_qword1.rx_qword4.rge_cmdsts = htole32(RGE_JUMBO_FRAMELEN);
1136 	r->hi_qword1.rx_qword4.rge_extsts = 0;
1137 	if (idx == RGE_RX_LIST_CNT - 1)
1138 		r->hi_qword1.rx_qword4.rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1139 	r->hi_qword1.rx_qword4.rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1140 
1141 	bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1142 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1143 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1144 }
1145 
1146 void
1147 rge_rx_list_init(struct rge_queues *q)
1148 {
1149 	memset(q->q_rx.rge_rx_list, 0, RGE_RX_LIST_SZ);
1150 
1151 	q->q_rx.rge_rxq_prodidx = q->q_rx.rge_rxq_considx = 0;
1152 	q->q_rx.rge_head = q->q_rx.rge_tail = NULL;
1153 
1154 	if_rxr_init(&q->q_rx.rge_rx_ring, 2, RGE_RX_LIST_CNT - 1);
1155 	rge_fill_rx_ring(q);
1156 }
1157 
1158 void
1159 rge_fill_rx_ring(struct rge_queues *q)
1160 {
1161 	struct if_rxring *rxr = &q->q_rx.rge_rx_ring;
1162 	int slots;
1163 
1164 	for (slots = if_rxr_get(rxr, RGE_RX_LIST_CNT); slots > 0; slots--) {
1165 		if (rge_newbuf(q) == ENOBUFS)
1166 			break;
1167 	}
1168 	if_rxr_put(rxr, slots);
1169 }
1170 
1171 void
1172 rge_tx_list_init(struct rge_queues *q)
1173 {
1174 	struct rge_softc *sc = q->q_sc;
1175 	int i;
1176 
1177 	memset(q->q_tx.rge_tx_list, 0, RGE_TX_LIST_SZ);
1178 
1179 	for (i = 0; i < RGE_TX_LIST_CNT; i++)
1180 		q->q_tx.rge_txq[i].txq_mbuf = NULL;
1181 
1182 	bus_dmamap_sync(sc->sc_dmat, q->q_tx.rge_tx_list_map, 0,
1183 	    q->q_tx.rge_tx_list_map->dm_mapsize,
1184 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1185 
1186 	q->q_tx.rge_txq_prodidx = q->q_tx.rge_txq_considx = 0;
1187 }
1188 
1189 int
1190 rge_rxeof(struct rge_queues *q)
1191 {
1192 	struct rge_softc *sc = q->q_sc;
1193 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1194 	struct mbuf *m;
1195 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1196 	struct if_rxring *rxr = &q->q_rx.rge_rx_ring;
1197 	struct rge_rx_desc *cur_rx;
1198 	struct rge_rxq *rxq;
1199 	uint32_t rxstat, extsts;
1200 	int i, total_len, rx = 0;
1201 
1202 	for (i = q->q_rx.rge_rxq_considx; if_rxr_inuse(rxr) > 0;
1203 	    i = RGE_NEXT_RX_DESC(i)) {
1204 		/* Invalidate the descriptor memory. */
1205 		bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1206 		    i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1207 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1208 
1209 		cur_rx = &q->q_rx.rge_rx_list[i];
1210 
1211 		if (RGE_OWN(cur_rx))
1212 			break;
1213 
1214 		rxstat = letoh32(cur_rx->hi_qword1.rx_qword4.rge_cmdsts);
1215 		extsts = letoh32(cur_rx->hi_qword1.rx_qword4.rge_extsts);
1216 
1217 		total_len = RGE_RXBYTES(cur_rx);
1218 		rxq = &q->q_rx.rge_rxq[i];
1219 		m = rxq->rxq_mbuf;
1220 		rxq->rxq_mbuf = NULL;
1221 		if_rxr_put(rxr, 1);
1222 		rx = 1;
1223 
1224 		/* Invalidate the RX mbuf and unload its map. */
1225 		bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
1226 		    rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1227 		bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
1228 
1229 		if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
1230 		    (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
1231 			ifp->if_ierrors++;
1232 			m_freem(m);
1233 			rge_discard_rxbuf(q, i);
1234 			continue;
1235 		}
1236 
1237 		if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
1238 			ifp->if_ierrors++;
1239 			/*
1240 			 * If this is part of a multi-fragment packet,
1241 			 * discard all the pieces.
1242 			 */
1243 			 if (q->q_rx.rge_head != NULL) {
1244 				m_freem(q->q_rx.rge_head);
1245 				q->q_rx.rge_head = q->q_rx.rge_tail = NULL;
1246 			}
1247 			m_freem(m);
1248 			rge_discard_rxbuf(q, i);
1249 			continue;
1250 		}
1251 
1252 		if (q->q_rx.rge_head != NULL) {
1253 			m->m_len = total_len;
1254 			/*
1255 			 * Special case: if there's 4 bytes or less
1256 			 * in this buffer, the mbuf can be discarded:
1257 			 * the last 4 bytes is the CRC, which we don't
1258 			 * care about anyway.
1259 			 */
1260 			if (m->m_len <= ETHER_CRC_LEN) {
1261 				q->q_rx.rge_tail->m_len -=
1262 				    (ETHER_CRC_LEN - m->m_len);
1263 				m_freem(m);
1264 			} else {
1265 				m->m_len -= ETHER_CRC_LEN;
1266 				m->m_flags &= ~M_PKTHDR;
1267 				q->q_rx.rge_tail->m_next = m;
1268 			}
1269 			m = q->q_rx.rge_head;
1270 			q->q_rx.rge_head = q->q_rx.rge_tail = NULL;
1271 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1272 		} else
1273 			m->m_pkthdr.len = m->m_len =
1274 			    (total_len - ETHER_CRC_LEN);
1275 
1276 		/* Check IP header checksum. */
1277 		if (!(extsts & RGE_RDEXTSTS_IPCSUMERR) &&
1278 		    (extsts & RGE_RDEXTSTS_IPV4))
1279 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1280 
1281 		/* Check TCP/UDP checksum. */
1282 		if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
1283 		    (((extsts & RGE_RDEXTSTS_TCPPKT) &&
1284 		    !(extsts & RGE_RDEXTSTS_TCPCSUMERR)) ||
1285 		    ((extsts & RGE_RDEXTSTS_UDPPKT) &&
1286 		    !(extsts & RGE_RDEXTSTS_UDPCSUMERR))))
1287 			m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
1288 			    M_UDP_CSUM_IN_OK;
1289 
1290 #if NVLAN > 0
1291 		if (extsts & RGE_RDEXTSTS_VTAG) {
1292 			m->m_pkthdr.ether_vtag =
1293 			    ntohs(extsts & RGE_RDEXTSTS_VLAN_MASK);
1294 			m->m_flags |= M_VLANTAG;
1295 		}
1296 #endif
1297 
1298 		ml_enqueue(&ml, m);
1299 	}
1300 
1301 	if (ifiq_input(&ifp->if_rcv, &ml))
1302 		if_rxr_livelocked(rxr);
1303 
1304 	q->q_rx.rge_rxq_considx = i;
1305 	rge_fill_rx_ring(q);
1306 
1307 	return (rx);
1308 }
1309 
1310 int
1311 rge_txeof(struct rge_queues *q)
1312 {
1313 	struct rge_softc *sc = q->q_sc;
1314 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1315 	struct rge_txq *txq;
1316 	uint32_t txstat;
1317 	int cons, idx, prod;
1318 	int free = 0;
1319 
1320 	prod = q->q_tx.rge_txq_prodidx;
1321 	cons = q->q_tx.rge_txq_considx;
1322 
1323 	while (prod != cons) {
1324 		txq = &q->q_tx.rge_txq[cons];
1325 		idx = txq->txq_descidx;
1326 
1327 		bus_dmamap_sync(sc->sc_dmat, q->q_tx.rge_tx_list_map,
1328 		    idx * sizeof(struct rge_tx_desc),
1329 		    sizeof(struct rge_tx_desc),
1330 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1331 
1332 		txstat = letoh32(q->q_tx.rge_tx_list[idx].rge_cmdsts);
1333 
1334 		if (txstat & RGE_TDCMDSTS_OWN) {
1335 			free = 2;
1336 			break;
1337 		}
1338 
1339 		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
1340 		    txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1341 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
1342 		m_freem(txq->txq_mbuf);
1343 		txq->txq_mbuf = NULL;
1344 
1345 		if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
1346 			ifp->if_collisions++;
1347 		if (txstat & RGE_TDCMDSTS_TXERR)
1348 			ifp->if_oerrors++;
1349 
1350 		bus_dmamap_sync(sc->sc_dmat, q->q_tx.rge_tx_list_map,
1351 		    idx * sizeof(struct rge_tx_desc),
1352 		    sizeof(struct rge_tx_desc),
1353 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1354 
1355 		cons = RGE_NEXT_TX_DESC(idx);
1356 		free = 1;
1357 	}
1358 
1359 	if (free == 0)
1360 		return (0);
1361 
1362 	q->q_tx.rge_txq_considx = cons;
1363 
1364 	if (ifq_is_oactive(&ifp->if_snd))
1365 		ifq_restart(&ifp->if_snd);
1366 	else if (free == 2)
1367 		ifq_serialize(&ifp->if_snd, &sc->sc_task);
1368 	else
1369 		ifp->if_timer = 0;
1370 
1371 	return (1);
1372 }
1373 
1374 void
1375 rge_reset(struct rge_softc *sc)
1376 {
1377 	int i;
1378 
1379 	/* Enable RXDV gate. */
1380 	RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
1381 	DELAY(2000);
1382 
1383 	for (i = 0; i < 3000; i++) {
1384 		DELAY(50);
1385 		if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
1386 		    RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
1387 		    RGE_MCUCMD_TXFIFO_EMPTY))
1388 			break;
1389 	}
1390 	if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5) {
1391 		for (i = 0; i < 3000; i++) {
1392 			DELAY(50);
1393 			if ((RGE_READ_2(sc, RGE_IM) & 0x0103) == 0x0103)
1394 				break;
1395 		}
1396 	}
1397 
1398 	DELAY(2000);
1399 
1400 	/* Soft reset. */
1401 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
1402 
1403 	for (i = 0; i < RGE_TIMEOUT; i++) {
1404 		DELAY(100);
1405 		if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
1406 			break;
1407 	}
1408 	if (i == RGE_TIMEOUT)
1409 		printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
1410 }
1411 
1412 void
1413 rge_iff(struct rge_softc *sc)
1414 {
1415 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1416 	struct arpcom *ac = &sc->sc_arpcom;
1417 	struct ether_multi *enm;
1418 	struct ether_multistep step;
1419 	uint32_t hashes[2];
1420 	uint32_t rxfilt;
1421 	int h = 0;
1422 
1423 	rxfilt = RGE_READ_4(sc, RGE_RXCFG);
1424 	rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
1425 	ifp->if_flags &= ~IFF_ALLMULTI;
1426 
1427 	/*
1428 	 * Always accept frames destined to our station address.
1429 	 * Always accept broadcast frames.
1430 	 */
1431 	rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
1432 
1433 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1434 		ifp->if_flags |= IFF_ALLMULTI;
1435 		rxfilt |= RGE_RXCFG_MULTI;
1436 		if (ifp->if_flags & IFF_PROMISC)
1437 			rxfilt |= RGE_RXCFG_ALLPHYS;
1438 		hashes[0] = hashes[1] = 0xffffffff;
1439 	} else {
1440 		rxfilt |= RGE_RXCFG_MULTI;
1441 		/* Program new filter. */
1442 		memset(hashes, 0, sizeof(hashes));
1443 
1444 		ETHER_FIRST_MULTI(step, ac, enm);
1445 		while (enm != NULL) {
1446 			h = ether_crc32_be(enm->enm_addrlo,
1447 			    ETHER_ADDR_LEN) >> 26;
1448 
1449 			if (h < 32)
1450 				hashes[0] |= (1 << h);
1451 			else
1452 				hashes[1] |= (1 << (h - 32));
1453 
1454 			ETHER_NEXT_MULTI(step, enm);
1455 		}
1456 	}
1457 
1458 	RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
1459 	RGE_WRITE_4(sc, RGE_MAR0, swap32(hashes[1]));
1460 	RGE_WRITE_4(sc, RGE_MAR4, swap32(hashes[0]));
1461 }
1462 
1463 void
1464 rge_set_phy_power(struct rge_softc *sc, int on)
1465 {
1466 	int i;
1467 
1468 	if (on) {
1469 		RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
1470 
1471 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
1472 
1473 		for (i = 0; i < RGE_TIMEOUT; i++) {
1474 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 3)
1475 				break;
1476 			DELAY(1000);
1477 		}
1478 	} else {
1479 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
1480 		RGE_CLRBIT_1(sc, RGE_PMCH, 0x80);
1481 		RGE_CLRBIT_1(sc, RGE_PPSW, 0x40);
1482 	}
1483 }
1484 
1485 void
1486 rge_phy_config(struct rge_softc *sc)
1487 {
1488 	/* Read microcode version. */
1489 	rge_write_phy_ocp(sc, 0xa436, 0x801e);
1490 	sc->rge_mcodever = rge_read_phy_ocp(sc, 0xa438);
1491 
1492 	switch (sc->rge_type) {
1493 	case MAC_CFG2:
1494 		rge_phy_config_mac_cfg2(sc);
1495 		break;
1496 	case MAC_CFG3:
1497 		rge_phy_config_mac_cfg3(sc);
1498 		break;
1499 	case MAC_CFG4:
1500 		rge_phy_config_mac_cfg4(sc);
1501 		break;
1502 	case MAC_CFG5:
1503 		rge_phy_config_mac_cfg5(sc);
1504 		break;
1505 	default:
1506 		break;	/* Can't happen. */
1507 	}
1508 
1509 	rge_write_phy(sc, 0x0a5b, 0x12,
1510 	    rge_read_phy(sc, 0x0a5b, 0x12) & ~0x8000);
1511 
1512 	/* Disable EEE. */
1513 	RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
1514 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1515 		RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
1516 		RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
1517 	}
1518 	RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
1519 	RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
1520 	RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
1521 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
1522 	RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
1523 
1524 	rge_patch_phy_mcu(sc, 1);
1525 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
1526 	RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
1527 	RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
1528 	rge_patch_phy_mcu(sc, 0);
1529 }
1530 
1531 void
1532 rge_phy_config_mac_cfg2(struct rge_softc *sc)
1533 {
1534 	uint16_t val;
1535 	int i;
1536 
1537 	for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++)
1538 		rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
1539 		    rtl8125_mac_cfg2_ephy[i].val);
1540 
1541 	rge_phy_config_mcu(sc, RGE_MAC_CFG2_MCODE_VER);
1542 
1543 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
1544 	rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
1545 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1546 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1547 	rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
1548 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
1549 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1550 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
1551 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
1552 	rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
1553 	RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
1554 	RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
1555 	RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
1556 
1557 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1558 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1559 	rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
1560 	rge_write_phy_ocp(sc, 0xa436, 0x80eb);
1561 	val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
1562 	rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
1563 	rge_write_phy_ocp(sc, 0xa436, 0x80f8);
1564 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1565 	rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
1566 	rge_write_phy_ocp(sc, 0xa436, 0x80f1);
1567 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1568 	rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
1569 	rge_write_phy_ocp(sc, 0xa436, 0x80fe);
1570 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1571 	rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
1572 	rge_write_phy_ocp(sc, 0xa436, 0x8102);
1573 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1574 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1575 	rge_write_phy_ocp(sc, 0xa436, 0x8105);
1576 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1577 	rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
1578 	rge_write_phy_ocp(sc, 0xa436, 0x8100);
1579 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1580 	rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
1581 	rge_write_phy_ocp(sc, 0xa436, 0x8104);
1582 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1583 	rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
1584 	rge_write_phy_ocp(sc, 0xa436, 0x8106);
1585 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1586 	rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
1587 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1588 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1589 	rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
1590 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
1591 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1592 	rge_write_phy_ocp(sc, 0xa436, 0x80e1);
1593 	RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
1594 	val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
1595 	rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
1596 	rge_write_phy_ocp(sc, 0xa436, 0x819f);
1597 	rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
1598 	rge_write_phy_ocp(sc, 0xbc34, 0x5555);
1599 	val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
1600 	rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
1601 	RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
1602 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1603 }
1604 
1605 void
1606 rge_phy_config_mac_cfg3(struct rge_softc *sc)
1607 {
1608 	uint16_t val;
1609 	int i;
1610 	static const uint16_t mac_cfg3_a438_value[] =
1611 	    { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
1612 	      0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
1613 
1614 	static const uint16_t mac_cfg3_b88e_value[] =
1615 	    { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
1616 	      0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
1617 	      0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
1618 	      0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
1619 	      0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
1620 	      0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
1621 
1622 	for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
1623 		rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
1624 		    rtl8125_mac_cfg3_ephy[i].val);
1625 
1626 	val = rge_read_ephy(sc, 0x002a) & ~0x7000;
1627 	rge_write_ephy(sc, 0x002a, val | 0x3000);
1628 	RGE_EPHY_CLRBIT(sc, 0x0019, 0x0040);
1629 	RGE_EPHY_SETBIT(sc, 0x001b, 0x0e00);
1630 	RGE_EPHY_CLRBIT(sc, 0x001b, 0x7000);
1631 	rge_write_ephy(sc, 0x0002, 0x6042);
1632 	rge_write_ephy(sc, 0x0006, 0x0014);
1633 	val = rge_read_ephy(sc, 0x006a) & ~0x7000;
1634 	rge_write_ephy(sc, 0x006a, val | 0x3000);
1635 	RGE_EPHY_CLRBIT(sc, 0x0059, 0x0040);
1636 	RGE_EPHY_SETBIT(sc, 0x005b, 0x0e00);
1637 	RGE_EPHY_CLRBIT(sc, 0x005b, 0x7000);
1638 	rge_write_ephy(sc, 0x0042, 0x6042);
1639 	rge_write_ephy(sc, 0x0046, 0x0014);
1640 
1641 	rge_phy_config_mcu(sc, RGE_MAC_CFG3_MCODE_VER);
1642 
1643 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1644 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1645 	rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
1646 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
1647 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1648 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
1649 	RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
1650 	val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
1651 	rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
1652 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
1653 	rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
1654 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
1655 	rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
1656 	RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
1657 	RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
1658 	val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
1659 	rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
1660 	rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
1661 	rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
1662 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
1663 	rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
1664 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1665 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1666 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0500);
1667 	rge_write_phy_ocp(sc, 0xb87c, 0x8159);
1668 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1669 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
1670 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, RGE_JUMBO_MTU + ETHER_HDR_LEN +
1671 	    32);
1672 	rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
1673 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1674 	rge_write_phy_ocp(sc, 0xb87c, 0x809c);
1675 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1676 
1677 	rge_write_phy_ocp(sc, 0xa436, 0x81b3);
1678 	for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
1679 		rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
1680 	for (i = 0; i < 26; i++)
1681 		rge_write_phy_ocp(sc, 0xa438, 0);
1682 	rge_write_phy_ocp(sc, 0xa436, 0x8257);
1683 	rge_write_phy_ocp(sc, 0xa438, 0x020f);
1684 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1685 	rge_write_phy_ocp(sc, 0xa438, 0x7843);
1686 
1687 	rge_patch_phy_mcu(sc, 1);
1688 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1689 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1690 	for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
1691 		rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
1692 		rge_write_phy_ocp(sc, 0xb890, mac_cfg3_b88e_value[i + 1]);
1693 	}
1694 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1695 	rge_patch_phy_mcu(sc, 0);
1696 
1697 	RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
1698 	rge_write_phy_ocp(sc, 0xa436, 0x81a2);
1699 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1700 	val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
1701 	rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
1702 	RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
1703 	RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
1704 	RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
1705 	RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
1706 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1707 }
1708 
1709 void
1710 rge_phy_config_mac_cfg4(struct rge_softc *sc)
1711 {
1712 	uint16_t val;
1713 	int i;
1714 	static const uint16_t mac_cfg4_b87c_value[] =
1715 	    { 0x8013, 0x0700, 0x8fb9, 0x2801, 0x8fba, 0x0100, 0x8fbc, 0x1900,
1716 	      0x8fbe, 0xe100, 0x8fc0, 0x0800, 0x8fc2, 0xe500, 0x8fc4, 0x0f00,
1717 	      0x8fc6, 0xf100, 0x8fc8, 0x0400, 0x8fca, 0xf300, 0x8fcc, 0xfd00,
1718 	      0x8fce, 0xff00, 0x8fd0, 0xfb00, 0x8fd2, 0x0100, 0x8fd4, 0xf400,
1719 	      0x8fd6, 0xff00, 0x8fd8, 0xf600, 0x813d, 0x390e, 0x814f, 0x790e,
1720 	      0x80b0, 0x0f31 };
1721 
1722 	for (i = 0; i < nitems(rtl8125_mac_cfg4_ephy); i++)
1723 		rge_write_ephy(sc, rtl8125_mac_cfg4_ephy[i].reg,
1724 		    rtl8125_mac_cfg4_ephy[i].val);
1725 
1726 	rge_write_phy_ocp(sc, 0xbf86, 0x9000);
1727 	RGE_PHY_SETBIT(sc, 0xc402, 0x0400);
1728 	RGE_PHY_CLRBIT(sc, 0xc402, 0x0400);
1729 	rge_write_phy_ocp(sc, 0xbd86, 0x1010);
1730 	rge_write_phy_ocp(sc, 0xbd88, 0x1010);
1731 	val = rge_read_phy_ocp(sc, 0xbd4e) & ~0x0c00;
1732 	rge_write_phy_ocp(sc, 0xbd4e, val | 0x0800);
1733 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1734 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0700);
1735 
1736 	rge_phy_config_mcu(sc, RGE_MAC_CFG4_MCODE_VER);
1737 
1738 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1739 	RGE_PHY_SETBIT(sc, 0xbc08, 0x000c);
1740 	rge_write_phy_ocp(sc, 0xa436, 0x8fff);
1741 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1742 	rge_write_phy_ocp(sc, 0xa438, val | 0x0400);
1743 	for (i = 0; i < 6; i++) {
1744 		rge_write_phy_ocp(sc, 0xb87c, 0x8560 + i * 2);
1745 		if (i < 3)
1746 			rge_write_phy_ocp(sc, 0xb87e, 0x19cc);
1747 		else
1748 			rge_write_phy_ocp(sc, 0xb87e, 0x147d);
1749 	}
1750 	rge_write_phy_ocp(sc, 0xb87c, 0x8ffe);
1751 	rge_write_phy_ocp(sc, 0xb87e, 0x0907);
1752 	val = rge_read_phy_ocp(sc, 0xacda) & ~0xff00;
1753 	rge_write_phy_ocp(sc, 0xacda, val | 0xff00);
1754 	val = rge_read_phy_ocp(sc, 0xacde) & ~0xf000;
1755 	rge_write_phy_ocp(sc, 0xacde, val | 0xf000);
1756 	rge_write_phy_ocp(sc, 0xb87c, 0x80d6);
1757 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1758 	rge_write_phy_ocp(sc, 0xb87c, 0x80F2);
1759 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1760 	rge_write_phy_ocp(sc, 0xb87c, 0x80f4);
1761 	rge_write_phy_ocp(sc, 0xb87e, 0x6077);
1762 	rge_write_phy_ocp(sc, 0xb506, 0x01e7);
1763 	rge_write_phy_ocp(sc, 0xac8c, 0x0ffc);
1764 	rge_write_phy_ocp(sc, 0xac46, 0xb7b4);
1765 	rge_write_phy_ocp(sc, 0xac50, 0x0fbc);
1766 	rge_write_phy_ocp(sc, 0xac3c, 0x9240);
1767 	rge_write_phy_ocp(sc, 0xac4E, 0x0db4);
1768 	rge_write_phy_ocp(sc, 0xacc6, 0x0707);
1769 	rge_write_phy_ocp(sc, 0xacc8, 0xa0d3);
1770 	rge_write_phy_ocp(sc, 0xad08, 0x0007);
1771 	for (i = 0; i < nitems(mac_cfg4_b87c_value); i += 2) {
1772 		rge_write_phy_ocp(sc, 0xb87c, mac_cfg4_b87c_value[i]);
1773 		rge_write_phy_ocp(sc, 0xb87e, mac_cfg4_b87c_value[i + 1]);
1774 	}
1775 	RGE_PHY_SETBIT(sc, 0xbf4c, 0x0002);
1776 	RGE_PHY_SETBIT(sc, 0xbcca, 0x0300);
1777 	rge_write_phy_ocp(sc, 0xb87c, 0x8141);
1778 	rge_write_phy_ocp(sc, 0xb87e, 0x320e);
1779 	rge_write_phy_ocp(sc, 0xb87c, 0x8153);
1780 	rge_write_phy_ocp(sc, 0xb87e, 0x720e);
1781 	RGE_PHY_CLRBIT(sc, 0xa432, 0x0040);
1782 	rge_write_phy_ocp(sc, 0xb87c, 0x8529);
1783 	rge_write_phy_ocp(sc, 0xb87e, 0x050e);
1784 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, RGE_JUMBO_MTU + ETHER_HDR_LEN +
1785 	    32);
1786 	rge_write_phy_ocp(sc, 0xa436, 0x816c);
1787 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1788 	rge_write_phy_ocp(sc, 0xa436, 0x8170);
1789 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1790 	rge_write_phy_ocp(sc, 0xa436, 0x8174);
1791 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1792 	rge_write_phy_ocp(sc, 0xa436, 0x8178);
1793 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1794 	rge_write_phy_ocp(sc, 0xa436, 0x817c);
1795 	rge_write_phy_ocp(sc, 0xa438, 0x0719);
1796 	rge_write_phy_ocp(sc, 0xa436, 0x8ff4);
1797 	rge_write_phy_ocp(sc, 0xa438, 0x0400);
1798 	rge_write_phy_ocp(sc, 0xa436, 0x8ff1);
1799 	rge_write_phy_ocp(sc, 0xa438, 0x0404);
1800 	rge_write_phy_ocp(sc, 0xbf4a, 0x001b);
1801 	for (i = 0; i < 6; i++) {
1802 		rge_write_phy_ocp(sc, 0xb87c, 0x8033 + i * 4);
1803 		if (i == 2)
1804 			rge_write_phy_ocp(sc, 0xb87e, 0xfc32);
1805 		else
1806 			rge_write_phy_ocp(sc, 0xb87e, 0x7c13);
1807 	}
1808 	rge_write_phy_ocp(sc, 0xb87c, 0x8145);
1809 	rge_write_phy_ocp(sc, 0xb87e, 0x370e);
1810 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1811 	rge_write_phy_ocp(sc, 0xb87e, 0x770e);
1812 	rge_write_phy_ocp(sc, 0xb87c, 0x8169);
1813 	rge_write_phy_ocp(sc, 0xb87e, 0x0d0a);
1814 	rge_write_phy_ocp(sc, 0xb87c, 0x817b);
1815 	rge_write_phy_ocp(sc, 0xb87e, 0x1d0a);
1816 	rge_write_phy_ocp(sc, 0xa436, 0x8217);
1817 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1818 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1819 	rge_write_phy_ocp(sc, 0xa436, 0x821a);
1820 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1821 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1822 	rge_write_phy_ocp(sc, 0xa436, 0x80da);
1823 	rge_write_phy_ocp(sc, 0xa438, 0x0403);
1824 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1825 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1826 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1827 	rge_write_phy_ocp(sc, 0xa436, 0x80b3);
1828 	rge_write_phy_ocp(sc, 0xa438, 0x0384);
1829 	rge_write_phy_ocp(sc, 0xa436, 0x80b7);
1830 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
1831 	rge_write_phy_ocp(sc, 0xa436, 0x80ba);
1832 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1833 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1834 	rge_write_phy_ocp(sc, 0xa436, 0x80b5);
1835 	rge_write_phy_ocp(sc, 0xa438, 0xf009);
1836 	rge_write_phy_ocp(sc, 0xa436, 0x80bd);
1837 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1838 	rge_write_phy_ocp(sc, 0xa438, val | 0x9f00);
1839 	rge_write_phy_ocp(sc, 0xa436, 0x80c7);
1840 	rge_write_phy_ocp(sc, 0xa438, 0xf083);
1841 	rge_write_phy_ocp(sc, 0xa436, 0x80dd);
1842 	rge_write_phy_ocp(sc, 0xa438, 0x03f0);
1843 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
1844 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1845 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1846 	rge_write_phy_ocp(sc, 0xa436, 0x80cb);
1847 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
1848 	rge_write_phy_ocp(sc, 0xa436, 0x80ce);
1849 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1850 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1851 	rge_write_phy_ocp(sc, 0xa436, 0x80c9);
1852 	rge_write_phy_ocp(sc, 0xa438, 0x8009);
1853 	rge_write_phy_ocp(sc, 0xa436, 0x80d1);
1854 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1855 	rge_write_phy_ocp(sc, 0xa438, val | 0x8000);
1856 	rge_write_phy_ocp(sc, 0xa436, 0x80a3);
1857 	rge_write_phy_ocp(sc, 0xa438, 0x200a);
1858 	rge_write_phy_ocp(sc, 0xa436, 0x80a5);
1859 	rge_write_phy_ocp(sc, 0xa438, 0xf0ad);
1860 	rge_write_phy_ocp(sc, 0xa436, 0x809f);
1861 	rge_write_phy_ocp(sc, 0xa438, 0x6073);
1862 	rge_write_phy_ocp(sc, 0xa436, 0x80a1);
1863 	rge_write_phy_ocp(sc, 0xa438, 0x000b);
1864 	rge_write_phy_ocp(sc, 0xa436, 0x80a9);
1865 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1866 	rge_write_phy_ocp(sc, 0xa438, val | 0xc000);
1867 	rge_patch_phy_mcu(sc, 1);
1868 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1869 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1870 	rge_write_phy_ocp(sc, 0xb88e, 0xc23e);
1871 	rge_write_phy_ocp(sc, 0xb890, 0x0000);
1872 	rge_write_phy_ocp(sc, 0xb88e, 0xc240);
1873 	rge_write_phy_ocp(sc, 0xb890, 0x0103);
1874 	rge_write_phy_ocp(sc, 0xb88e, 0xc242);
1875 	rge_write_phy_ocp(sc, 0xb890, 0x0507);
1876 	rge_write_phy_ocp(sc, 0xb88e, 0xc244);
1877 	rge_write_phy_ocp(sc, 0xb890, 0x090b);
1878 	rge_write_phy_ocp(sc, 0xb88e, 0xc246);
1879 	rge_write_phy_ocp(sc, 0xb890, 0x0c0e);
1880 	rge_write_phy_ocp(sc, 0xb88e, 0xc248);
1881 	rge_write_phy_ocp(sc, 0xb890, 0x1012);
1882 	rge_write_phy_ocp(sc, 0xb88e, 0xc24a);
1883 	rge_write_phy_ocp(sc, 0xb890, 0x1416);
1884 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1885 	rge_patch_phy_mcu(sc, 0);
1886 	RGE_PHY_SETBIT(sc, 0xa86a, 0x0001);
1887 	RGE_PHY_SETBIT(sc, 0xa6f0, 0x0001);
1888 	rge_write_phy_ocp(sc, 0xbfa0, 0xd70d);
1889 	rge_write_phy_ocp(sc, 0xbfa2, 0x4100);
1890 	rge_write_phy_ocp(sc, 0xbfa4, 0xe868);
1891 	rge_write_phy_ocp(sc, 0xbfa6, 0xdc59);
1892 	rge_write_phy_ocp(sc, 0xb54c, 0x3c18);
1893 	RGE_PHY_CLRBIT(sc, 0xbfa4, 0x0020);
1894 	rge_write_phy_ocp(sc, 0xa436, 0x817d);
1895 	RGE_PHY_SETBIT(sc, 0xa438, 0x1000);
1896 }
1897 
1898 void
1899 rge_phy_config_mac_cfg5(struct rge_softc *sc)
1900 {
1901 	uint16_t val;
1902 	int i;
1903 
1904 	for (i = 0; i < nitems(rtl8125_mac_cfg5_ephy); i++)
1905 		rge_write_ephy(sc, rtl8125_mac_cfg5_ephy[i].reg,
1906 		    rtl8125_mac_cfg5_ephy[i].val);
1907 
1908 	rge_phy_config_mcu(sc, RGE_MAC_CFG5_MCODE_VER);
1909 
1910 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1911 	val = rge_read_phy_ocp(sc, 0xac46) & ~0x00f0;
1912 	rge_write_phy_ocp(sc, 0xac46, val | 0x0090);
1913 	val = rge_read_phy_ocp(sc, 0xad30) & ~0x0003;
1914 	rge_write_phy_ocp(sc, 0xad30, val | 0x0001);
1915 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, RGE_JUMBO_MTU + ETHER_HDR_LEN +
1916 	    32);
1917 	rge_write_phy_ocp(sc, 0xb87c, 0x80f5);
1918 	rge_write_phy_ocp(sc, 0xb87e, 0x760e);
1919 	rge_write_phy_ocp(sc, 0xb87c, 0x8107);
1920 	rge_write_phy_ocp(sc, 0xb87e, 0x360e);
1921 	rge_write_phy_ocp(sc, 0xb87c, 0x8551);
1922 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1923 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
1924 	val = rge_read_phy_ocp(sc, 0xbf00) & ~0xe000;
1925 	rge_write_phy_ocp(sc, 0xbf00, val | 0xa000);
1926 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1927 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0300);
1928 	for (i = 0; i < 10; i++) {
1929 		rge_write_phy_ocp(sc, 0xa436, 0x8044 + i * 6);
1930 		rge_write_phy_ocp(sc, 0xa438, 0x2417);
1931 	}
1932 	RGE_PHY_SETBIT(sc, 0xa4ca, 0x0040);
1933 	val = rge_read_phy_ocp(sc, 0xbf84) & ~0xe000;
1934 	rge_write_phy_ocp(sc, 0xbf84, val | 0xa000);
1935 	rge_write_phy_ocp(sc, 0xa436, 0x8170);
1936 	val = rge_read_phy_ocp(sc, 0xa438) & ~0x2700;
1937 	rge_write_phy_ocp(sc, 0xa438, val | 0xd800);
1938 }
1939 
1940 void
1941 rge_phy_config_mcu(struct rge_softc *sc, uint16_t mcode_version)
1942 {
1943 	if (sc->rge_mcodever != mcode_version) {
1944 		int i;
1945 
1946 		rge_patch_phy_mcu(sc, 1);
1947 
1948 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1949 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
1950 			if (sc->rge_type == MAC_CFG2)
1951 				rge_write_phy_ocp(sc, 0xa438, 0x8600);
1952 			else
1953 				rge_write_phy_ocp(sc, 0xa438, 0x8601);
1954 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
1955 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
1956 
1957 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
1958 		}
1959 
1960 		if (sc->rge_type == MAC_CFG2) {
1961 			for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
1962 				rge_write_phy_ocp(sc,
1963 				    rtl8125_mac_cfg2_mcu[i].reg,
1964 				    rtl8125_mac_cfg2_mcu[i].val);
1965 			}
1966 		} else if (sc->rge_type == MAC_CFG3) {
1967 			for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
1968 				rge_write_phy_ocp(sc,
1969 				    rtl8125_mac_cfg3_mcu[i].reg,
1970 				    rtl8125_mac_cfg3_mcu[i].val);
1971 			}
1972 		} else if (sc->rge_type == MAC_CFG4) {
1973 			for (i = 0; i < nitems(rtl8125_mac_cfg4_mcu); i++) {
1974 				rge_write_phy_ocp(sc,
1975 				    rtl8125_mac_cfg4_mcu[i].reg,
1976 				    rtl8125_mac_cfg4_mcu[i].val);
1977 			}
1978 		} else if (sc->rge_type == MAC_CFG5) {
1979 			for (i = 0; i < nitems(rtl8125_mac_cfg5_mcu); i++) {
1980 				rge_write_phy_ocp(sc,
1981 				    rtl8125_mac_cfg5_mcu[i].reg,
1982 				    rtl8125_mac_cfg5_mcu[i].val);
1983 			}
1984 		}
1985 
1986 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1987 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
1988 
1989 			rge_write_phy_ocp(sc, 0xa436, 0);
1990 			rge_write_phy_ocp(sc, 0xa438, 0);
1991 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
1992 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
1993 			rge_write_phy_ocp(sc, 0xa438, 0);
1994 		}
1995 
1996 		rge_patch_phy_mcu(sc, 0);
1997 
1998 		/* Write microcode version. */
1999 		rge_write_phy_ocp(sc, 0xa436, 0x801e);
2000 		rge_write_phy_ocp(sc, 0xa438, mcode_version);
2001 	}
2002 }
2003 
2004 void
2005 rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
2006 {
2007 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2008 	RGE_WRITE_4(sc, RGE_MAC0,
2009 	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2010 	RGE_WRITE_4(sc, RGE_MAC4,
2011 	    addr[5] <<  8 | addr[4]);
2012 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2013 }
2014 
2015 void
2016 rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
2017 {
2018 	*(uint32_t *)&addr[0] = RGE_READ_4(sc, RGE_ADDR0);
2019 	*(uint16_t *)&addr[4] = RGE_READ_2(sc, RGE_ADDR1);
2020 }
2021 
2022 void
2023 rge_hw_init(struct rge_softc *sc)
2024 {
2025 	int i;
2026 
2027 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2028 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
2029 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
2030 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2031 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
2032 
2033 	/* Disable UPS. */
2034 	RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
2035 
2036 	/* Configure MAC MCU. */
2037 	rge_write_mac_ocp(sc, 0xfc38, 0);
2038 
2039 	for (i = 0xfc28; i < 0xfc38; i += 2)
2040 		rge_write_mac_ocp(sc, i, 0);
2041 
2042 	DELAY(3000);
2043 	rge_write_mac_ocp(sc, 0xfc26, 0);
2044 
2045 	if (sc->rge_type == MAC_CFG3) {
2046 		for (i = 0; i < nitems(rtl8125_mac_bps); i++) {
2047 			rge_write_mac_ocp(sc, rtl8125_mac_bps[i].reg,
2048 			    rtl8125_mac_bps[i].val);
2049 		}
2050 	} else if (sc->rge_type == MAC_CFG5) {
2051 		for (i = 0; i < nitems(rtl8125b_mac_bps); i++) {
2052 			rge_write_mac_ocp(sc, rtl8125b_mac_bps[i].reg,
2053 			    rtl8125b_mac_bps[i].val);
2054 		}
2055 	}
2056 
2057 	/* Disable PHY power saving. */
2058 	rge_disable_phy_ocp_pwrsave(sc);
2059 
2060 	/* Set PCIe uncorrectable error status. */
2061 	rge_write_csi(sc, 0x108,
2062 	    rge_read_csi(sc, 0x108) | 0x00100000);
2063 
2064 }
2065 
2066 void
2067 rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
2068 {
2069 	if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
2070 		rge_patch_phy_mcu(sc, 1);
2071 		rge_write_phy_ocp(sc, 0xc416, 0);
2072 		rge_write_phy_ocp(sc, 0xc416, 0x0500);
2073 		rge_patch_phy_mcu(sc, 0);
2074 	}
2075 }
2076 
2077 void
2078 rge_patch_phy_mcu(struct rge_softc *sc, int set)
2079 {
2080 	int i;
2081 
2082 	if (set)
2083 		RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
2084 	else
2085 		RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
2086 
2087 	for (i = 0; i < 1000; i++) {
2088 		if ((rge_read_phy_ocp(sc, 0xb800) & 0x0040) == 0x0040)
2089 			break;
2090 		DELAY(100);
2091 	}
2092 	if (i == 1000) {
2093 		DPRINTF(("timeout waiting to patch phy mcu\n"));
2094 		return;
2095 	}
2096 }
2097 
2098 void
2099 rge_add_media_types(struct rge_softc *sc)
2100 {
2101 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
2102 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
2103 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
2104 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
2105 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
2106 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2107 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
2108 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
2109 }
2110 
2111 void
2112 rge_config_imtype(struct rge_softc *sc, int imtype)
2113 {
2114 	switch (imtype) {
2115 	case RGE_IMTYPE_NONE:
2116 		sc->rge_intrs = RGE_INTRS;
2117 		break;
2118 	case RGE_IMTYPE_SIM:
2119 		sc->rge_intrs = RGE_INTRS_TIMER;
2120 		break;
2121 	default:
2122 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
2123 	}
2124 }
2125 
2126 void
2127 rge_disable_hw_im(struct rge_softc *sc)
2128 {
2129 	RGE_WRITE_2(sc, RGE_IM, 0);
2130 }
2131 
2132 void
2133 rge_disable_sim_im(struct rge_softc *sc)
2134 {
2135 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
2136 	sc->rge_timerintr = 0;
2137 }
2138 
2139 void
2140 rge_setup_sim_im(struct rge_softc *sc)
2141 {
2142 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0x2600);
2143 	RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
2144 	sc->rge_timerintr = 1;
2145 }
2146 
2147 void
2148 rge_setup_intr(struct rge_softc *sc, int imtype)
2149 {
2150 	rge_config_imtype(sc, imtype);
2151 
2152 	/* Enable interrupts. */
2153 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
2154 
2155 	switch (imtype) {
2156 	case RGE_IMTYPE_NONE:
2157 		rge_disable_sim_im(sc);
2158 		rge_disable_hw_im(sc);
2159 		break;
2160 	case RGE_IMTYPE_SIM:
2161 		rge_disable_hw_im(sc);
2162 		rge_setup_sim_im(sc);
2163 		break;
2164 	default:
2165 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
2166 	}
2167 }
2168 
2169 void
2170 rge_exit_oob(struct rge_softc *sc)
2171 {
2172 	int i;
2173 
2174 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
2175 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
2176 	    RGE_RXCFG_ERRPKT);
2177 
2178 	/* Disable RealWoW. */
2179 	rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
2180 
2181 	rge_reset(sc);
2182 
2183 	/* Disable OOB. */
2184 	RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
2185 
2186 	RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
2187 
2188 	for (i = 0; i < 10; i++) {
2189 		DELAY(100);
2190 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2191 			break;
2192 	}
2193 
2194 	rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
2195 	rge_write_mac_ocp(sc, 0xc0a6, 0x01b5);
2196 	rge_write_mac_ocp(sc, 0xc01e, 0x5555);
2197 
2198 	for (i = 0; i < 10; i++) {
2199 		DELAY(100);
2200 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2201 			break;
2202 	}
2203 
2204 	if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
2205 		printf("%s: rge_exit_oob(): rtl8125_is_ups_resume!!\n",
2206 		    sc->sc_dev.dv_xname);
2207 		for (i = 0; i < RGE_TIMEOUT; i++) {
2208 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 2)
2209 				break;
2210 			DELAY(1000);
2211 		}
2212 		RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
2213 		if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5)
2214 			RGE_PHY_CLRBIT(sc, 0xa466, 0x0001);
2215 		RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
2216 	}
2217 }
2218 
2219 void
2220 rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
2221 {
2222 	int i;
2223 
2224 	RGE_WRITE_4(sc, RGE_CSIDR, val);
2225 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2226 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
2227 
2228 	for (i = 0; i < 10; i++) {
2229 		 DELAY(100);
2230 		 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
2231 			break;
2232 	}
2233 
2234 	DELAY(20);
2235 }
2236 
2237 uint32_t
2238 rge_read_csi(struct rge_softc *sc, uint32_t reg)
2239 {
2240 	int i;
2241 
2242 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2243 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
2244 
2245 	for (i = 0; i < 10; i++) {
2246 		 DELAY(100);
2247 		 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
2248 			break;
2249 	}
2250 
2251 	DELAY(20);
2252 
2253 	return (RGE_READ_4(sc, RGE_CSIDR));
2254 }
2255 
2256 void
2257 rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2258 {
2259 	uint32_t tmp;
2260 
2261 	tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2262 	tmp += val;
2263 	tmp |= RGE_MACOCP_BUSY;
2264 	RGE_WRITE_4(sc, RGE_MACOCP, tmp);
2265 }
2266 
2267 uint16_t
2268 rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
2269 {
2270 	uint32_t val;
2271 
2272 	val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2273 	RGE_WRITE_4(sc, RGE_MACOCP, val);
2274 
2275 	return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
2276 }
2277 
2278 void
2279 rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
2280 {
2281 	uint32_t tmp;
2282 	int i;
2283 
2284 	tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2285 	tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
2286 	RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
2287 
2288 	for (i = 0; i < 10; i++) {
2289 		DELAY(100);
2290 		if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
2291 			break;
2292 	}
2293 
2294 	DELAY(20);
2295 }
2296 
2297 uint16_t
2298 rge_read_ephy(struct rge_softc *sc, uint16_t reg)
2299 {
2300 	uint32_t val;
2301 	int i;
2302 
2303 	val = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2304 	RGE_WRITE_4(sc, RGE_EPHYAR, val);
2305 
2306 	for (i = 0; i < 10; i++) {
2307 		DELAY(100);
2308 		val = RGE_READ_4(sc, RGE_EPHYAR);
2309 		if (val & RGE_EPHYAR_BUSY)
2310 			break;
2311 	}
2312 
2313 	DELAY(20);
2314 
2315 	return (val & RGE_EPHYAR_DATA_MASK);
2316 }
2317 
2318 void
2319 rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
2320 {
2321 	uint16_t off, phyaddr;
2322 
2323 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2324 	phyaddr <<= 4;
2325 
2326 	off = addr ? reg : 0x10 + (reg % 8);
2327 
2328 	phyaddr += (off - 16) << 1;
2329 
2330 	rge_write_phy_ocp(sc, phyaddr, val);
2331 }
2332 
2333 uint16_t
2334 rge_read_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg)
2335 {
2336 	uint16_t off, phyaddr;
2337 
2338 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2339 	phyaddr <<= 4;
2340 
2341 	off = addr ? reg : 0x10 + (reg % 8);
2342 
2343 	phyaddr += (off - 16) << 1;
2344 
2345 	return (rge_read_phy_ocp(sc, phyaddr));
2346 }
2347 
2348 void
2349 rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2350 {
2351 	uint32_t tmp;
2352 	int i;
2353 
2354 	tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2355 	tmp |= RGE_PHYOCP_BUSY | val;
2356 	RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
2357 
2358 	for (i = 0; i < RGE_TIMEOUT; i++) {
2359 		DELAY(1);
2360 		if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
2361 			break;
2362 	}
2363 }
2364 
2365 uint16_t
2366 rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
2367 {
2368 	uint32_t val;
2369 	int i;
2370 
2371 	val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2372 	RGE_WRITE_4(sc, RGE_PHYOCP, val);
2373 
2374 	for (i = 0; i < RGE_TIMEOUT; i++) {
2375 		DELAY(1);
2376 		val = RGE_READ_4(sc, RGE_PHYOCP);
2377 		if (val & RGE_PHYOCP_BUSY)
2378 			break;
2379 	}
2380 
2381 	return (val & RGE_PHYOCP_DATA_MASK);
2382 }
2383 
2384 int
2385 rge_get_link_status(struct rge_softc *sc)
2386 {
2387 	return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
2388 }
2389 
2390 void
2391 rge_txstart(void *arg)
2392 {
2393 	struct rge_softc *sc = arg;
2394 
2395 	RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
2396 }
2397 
2398 void
2399 rge_tick(void *arg)
2400 {
2401 	struct rge_softc *sc = arg;
2402 	int s;
2403 
2404 	s = splnet();
2405 	rge_link_state(sc);
2406 	splx(s);
2407 
2408 	timeout_add_sec(&sc->sc_timeout, 1);
2409 }
2410 
2411 void
2412 rge_link_state(struct rge_softc *sc)
2413 {
2414 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2415 	int link = LINK_STATE_DOWN;
2416 
2417 	if (rge_get_link_status(sc))
2418 		link = LINK_STATE_UP;
2419 
2420 	if (ifp->if_link_state != link) {
2421 		ifp->if_link_state = link;
2422 		if_link_state_change(ifp);
2423 	}
2424 }
2425 
2426 #ifndef SMALL_KERNEL
2427 int
2428 rge_wol(struct ifnet *ifp, int enable)
2429 {
2430 	struct rge_softc *sc = ifp->if_softc;
2431 
2432 	if (enable) {
2433 		if (!(RGE_READ_1(sc, RGE_CFG1) & RGE_CFG1_PM_EN)) {
2434 			printf("%s: power management is disabled, "
2435 			    "cannot do WOL\n", sc->sc_dev.dv_xname);
2436 			return (ENOTSUP);
2437 		}
2438 
2439 	}
2440 
2441 	rge_iff(sc);
2442 
2443 	if (enable)
2444 		RGE_MAC_SETBIT(sc, 0xc0b6, 0x0001);
2445 	else
2446 		RGE_MAC_CLRBIT(sc, 0xc0b6, 0x0001);
2447 
2448 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2449 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_WOL_LANWAKE | RGE_CFG5_WOL_UCAST |
2450 	    RGE_CFG5_WOL_MCAST | RGE_CFG5_WOL_BCAST);
2451 	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_WOL_LINK | RGE_CFG3_WOL_MAGIC);
2452 	if (enable)
2453 		RGE_SETBIT_1(sc, RGE_CFG5, RGE_CFG5_WOL_LANWAKE);
2454 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2455 
2456 	return (0);
2457 }
2458 
2459 void
2460 rge_wol_power(struct rge_softc *sc)
2461 {
2462 	/* Disable RXDV gate. */
2463 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
2464 	DELAY(2000);
2465 
2466 	RGE_SETBIT_1(sc, RGE_CFG1, RGE_CFG1_PM_EN);
2467 	RGE_SETBIT_1(sc, RGE_CFG2, RGE_CFG2_PMSTS_EN);
2468 }
2469 #endif
2470 
2471 #if NKSTAT > 0
2472 
2473 #define RGE_DTCCR_CMD		(1U << 3)
2474 #define RGE_DTCCR_LO		0x10
2475 #define RGE_DTCCR_HI		0x14
2476 
2477 struct rge_kstats {
2478 	struct kstat_kv		tx_ok;
2479 	struct kstat_kv		rx_ok;
2480 	struct kstat_kv		tx_er;
2481 	struct kstat_kv		rx_er;
2482 	struct kstat_kv		miss_pkt;
2483 	struct kstat_kv		fae;
2484 	struct kstat_kv		tx_1col;
2485 	struct kstat_kv		tx_mcol;
2486 	struct kstat_kv		rx_ok_phy;
2487 	struct kstat_kv		rx_ok_brd;
2488 	struct kstat_kv		rx_ok_mul;
2489 	struct kstat_kv		tx_abt;
2490 	struct kstat_kv		tx_undrn;
2491 };
2492 
2493 static const struct rge_kstats rge_kstats_tpl = {
2494 	.tx_ok =	KSTAT_KV_UNIT_INITIALIZER("TxOk",
2495 			    KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
2496 	.rx_ok =	KSTAT_KV_UNIT_INITIALIZER("RxOk",
2497 			    KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
2498 	.tx_er =	KSTAT_KV_UNIT_INITIALIZER("TxEr",
2499 			    KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
2500 	.rx_er =	KSTAT_KV_UNIT_INITIALIZER("RxEr",
2501 			    KSTAT_KV_T_COUNTER32, KSTAT_KV_U_PACKETS),
2502 	.miss_pkt =	KSTAT_KV_UNIT_INITIALIZER("MissPkt",
2503 			    KSTAT_KV_T_COUNTER16, KSTAT_KV_U_PACKETS),
2504 	.fae =		KSTAT_KV_UNIT_INITIALIZER("FAE",
2505 			    KSTAT_KV_T_COUNTER16, KSTAT_KV_U_PACKETS),
2506 	.tx_1col =	KSTAT_KV_UNIT_INITIALIZER("Tx1Col",
2507 			    KSTAT_KV_T_COUNTER32, KSTAT_KV_U_PACKETS),
2508 	.tx_mcol =	KSTAT_KV_UNIT_INITIALIZER("TxMCol",
2509 			    KSTAT_KV_T_COUNTER32, KSTAT_KV_U_PACKETS),
2510 	.rx_ok_phy =	KSTAT_KV_UNIT_INITIALIZER("RxOkPhy",
2511 			    KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
2512 	.rx_ok_brd =	KSTAT_KV_UNIT_INITIALIZER("RxOkBrd",
2513 			    KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
2514 	.rx_ok_mul =	KSTAT_KV_UNIT_INITIALIZER("RxOkMul",
2515 			    KSTAT_KV_T_COUNTER32, KSTAT_KV_U_PACKETS),
2516 	.tx_abt =	KSTAT_KV_UNIT_INITIALIZER("TxAbt",
2517 			    KSTAT_KV_T_COUNTER16, KSTAT_KV_U_PACKETS),
2518 	.tx_undrn =	KSTAT_KV_UNIT_INITIALIZER("TxUndrn",
2519 			    KSTAT_KV_T_COUNTER16, KSTAT_KV_U_PACKETS),
2520 };
2521 
2522 struct rge_kstat_softc {
2523 	struct rge_stats	*rge_ks_sc_stats;
2524 
2525 	bus_dmamap_t		 rge_ks_sc_map;
2526 	bus_dma_segment_t	 rge_ks_sc_seg;
2527 	int			 rge_ks_sc_nsegs;
2528 
2529 	struct rwlock		 rge_ks_sc_rwl;
2530 };
2531 
2532 static int
2533 rge_kstat_read(struct kstat *ks)
2534 {
2535 	struct rge_softc *sc = ks->ks_softc;
2536 	struct rge_kstat_softc *rge_ks_sc = ks->ks_ptr;
2537 	bus_dmamap_t map;
2538 	uint64_t cmd;
2539 	uint32_t reg;
2540 	uint8_t command;
2541 	int tmo;
2542 
2543 	command = RGE_READ_1(sc, RGE_CMD);
2544 	if (!ISSET(command, RGE_CMD_RXENB) || command == 0xff)
2545 		return (ENETDOWN);
2546 
2547 	map = rge_ks_sc->rge_ks_sc_map;
2548 	cmd = map->dm_segs[0].ds_addr | RGE_DTCCR_CMD;
2549 
2550 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2551 	    BUS_DMASYNC_PREREAD);
2552 
2553 	RGE_WRITE_4(sc, RGE_DTCCR_HI, cmd >> 32);
2554 	bus_space_barrier(sc->rge_btag, sc->rge_bhandle, RGE_DTCCR_HI, 8,
2555 	    BUS_SPACE_BARRIER_WRITE);
2556 	RGE_WRITE_4(sc, RGE_DTCCR_LO, cmd);
2557 	bus_space_barrier(sc->rge_btag, sc->rge_bhandle, RGE_DTCCR_LO, 4,
2558 	    BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE);
2559 
2560 	tmo = 1000;
2561 	do {
2562 		reg = RGE_READ_4(sc, RGE_DTCCR_LO);
2563 		if (!ISSET(reg, RGE_DTCCR_CMD))
2564 			break;
2565 
2566 		delay(10);
2567 		bus_space_barrier(sc->rge_btag, sc->rge_bhandle,
2568 		    RGE_DTCCR_LO, 4, BUS_SPACE_BARRIER_READ);
2569 	} while (--tmo);
2570 
2571 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2572 	    BUS_DMASYNC_POSTREAD);
2573 
2574 	if (ISSET(reg, RGE_DTCCR_CMD))
2575 		return (EIO);
2576 
2577 	nanouptime(&ks->ks_updated);
2578 
2579 	return (0);
2580 }
2581 
2582 static int
2583 rge_kstat_copy(struct kstat *ks, void *dst)
2584 {
2585 	struct rge_kstat_softc *rge_ks_sc = ks->ks_ptr;
2586 	struct rge_stats *rs = rge_ks_sc->rge_ks_sc_stats;
2587 	struct rge_kstats *kvs = dst;
2588 
2589 	*kvs = rge_kstats_tpl;
2590 	kstat_kv_u64(&kvs->tx_ok) = lemtoh64(&rs->rge_tx_ok);
2591 	kstat_kv_u64(&kvs->rx_ok) = lemtoh64(&rs->rge_rx_ok);
2592 	kstat_kv_u64(&kvs->tx_er) = lemtoh64(&rs->rge_tx_er);
2593 	kstat_kv_u32(&kvs->rx_er) = lemtoh32(&rs->rge_rx_er);
2594 	kstat_kv_u16(&kvs->miss_pkt) = lemtoh16(&rs->rge_miss_pkt);
2595 	kstat_kv_u16(&kvs->fae) = lemtoh16(&rs->rge_fae);
2596 	kstat_kv_u32(&kvs->tx_1col) = lemtoh32(&rs->rge_tx_1col);
2597 	kstat_kv_u32(&kvs->tx_mcol) = lemtoh32(&rs->rge_tx_mcol);
2598 	kstat_kv_u64(&kvs->rx_ok_phy) = lemtoh64(&rs->rge_rx_ok_phy);
2599 	kstat_kv_u64(&kvs->rx_ok_brd) = lemtoh64(&rs->rge_rx_ok_brd);
2600 	kstat_kv_u32(&kvs->rx_ok_mul) = lemtoh32(&rs->rge_rx_ok_mul);
2601 	kstat_kv_u16(&kvs->tx_abt) = lemtoh16(&rs->rge_tx_abt);
2602 	kstat_kv_u16(&kvs->tx_undrn) = lemtoh16(&rs->rge_tx_undrn);
2603 
2604 	return (0);
2605 }
2606 
2607 void
2608 rge_kstat_attach(struct rge_softc *sc)
2609 {
2610 	struct rge_kstat_softc *rge_ks_sc;
2611 	struct kstat *ks;
2612 
2613 	rge_ks_sc = malloc(sizeof(*rge_ks_sc), M_DEVBUF, M_NOWAIT);
2614 	if (rge_ks_sc == NULL) {
2615 		printf("%s: cannot allocate kstat softc\n",
2616 		    sc->sc_dev.dv_xname);
2617 		return;
2618 	}
2619 
2620 	if (bus_dmamap_create(sc->sc_dmat,
2621 	    sizeof(struct rge_stats), 1, sizeof(struct rge_stats), 0,
2622 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2623 	    &rge_ks_sc->rge_ks_sc_map) != 0) {
2624 		printf("%s: cannot create counter dma memory map\n",
2625 		    sc->sc_dev.dv_xname);
2626 		goto free;
2627 	}
2628 
2629 	if (bus_dmamem_alloc(sc->sc_dmat,
2630 	    sizeof(struct rge_stats), RGE_STATS_ALIGNMENT, 0,
2631 	    &rge_ks_sc->rge_ks_sc_seg, 1, &rge_ks_sc->rge_ks_sc_nsegs,
2632 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) {
2633 		printf("%s: cannot allocate counter dma memory\n",
2634 		    sc->sc_dev.dv_xname);
2635 		goto destroy;
2636 	}
2637 
2638 	if (bus_dmamem_map(sc->sc_dmat,
2639 	    &rge_ks_sc->rge_ks_sc_seg, rge_ks_sc->rge_ks_sc_nsegs,
2640 	    sizeof(struct rge_stats), (caddr_t *)&rge_ks_sc->rge_ks_sc_stats,
2641 	    BUS_DMA_NOWAIT) != 0) {
2642 		printf("%s: cannot map counter dma memory\n",
2643 		    sc->sc_dev.dv_xname);
2644 		goto freedma;
2645 	}
2646 
2647 	if (bus_dmamap_load(sc->sc_dmat, rge_ks_sc->rge_ks_sc_map,
2648 	    (caddr_t)rge_ks_sc->rge_ks_sc_stats, sizeof(struct rge_stats),
2649 	    NULL, BUS_DMA_NOWAIT) != 0) {
2650 		printf("%s: cannot load counter dma memory\n",
2651 		    sc->sc_dev.dv_xname);
2652 		goto unmap;
2653 	}
2654 
2655 	ks = kstat_create(sc->sc_dev.dv_xname, 0, "re-stats", 0,
2656 	    KSTAT_T_KV, 0);
2657 	if (ks == NULL) {
2658 		printf("%s: cannot create re-stats kstat\n",
2659 		    sc->sc_dev.dv_xname);
2660 		goto unload;
2661 	}
2662 
2663 	ks->ks_datalen = sizeof(rge_kstats_tpl);
2664 
2665 	rw_init(&rge_ks_sc->rge_ks_sc_rwl, "rgestats");
2666 	kstat_set_wlock(ks, &rge_ks_sc->rge_ks_sc_rwl);
2667 	ks->ks_softc = sc;
2668 	ks->ks_ptr = rge_ks_sc;
2669 	ks->ks_read = rge_kstat_read;
2670 	ks->ks_copy = rge_kstat_copy;
2671 
2672 	kstat_install(ks);
2673 
2674 	sc->sc_kstat = ks;
2675 
2676 	return;
2677 
2678 unload:
2679 	bus_dmamap_unload(sc->sc_dmat, rge_ks_sc->rge_ks_sc_map);
2680 unmap:
2681 	bus_dmamem_unmap(sc->sc_dmat,
2682 	    (caddr_t)rge_ks_sc->rge_ks_sc_stats, sizeof(struct rge_stats));
2683 freedma:
2684 	bus_dmamem_free(sc->sc_dmat, &rge_ks_sc->rge_ks_sc_seg, 1);
2685 destroy:
2686 	bus_dmamap_destroy(sc->sc_dmat, rge_ks_sc->rge_ks_sc_map);
2687 free:
2688 	free(rge_ks_sc, M_DEVBUF, sizeof(*rge_ks_sc));
2689 }
2690 #endif /* NKSTAT > 0 */
2691