xref: /openbsd/sys/dev/pci/if_rge.c (revision 8bb071cd)
1 /*	$OpenBSD: if_rge.c,v 1.35 2024/08/31 16:23:09 deraadt Exp $	*/
2 
3 /*
4  * Copyright (c) 2019, 2020, 2023, 2024
5  *	Kevin Lo <kevlo@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "bpfilter.h"
21 #include "vlan.h"
22 #include "kstat.h"
23 
24 #include <sys/param.h>
25 #include <sys/systm.h>
26 #include <sys/sockio.h>
27 #include <sys/mbuf.h>
28 #include <sys/malloc.h>
29 #include <sys/device.h>
30 #include <sys/endian.h>
31 
32 #include <net/if.h>
33 #include <net/if_media.h>
34 
35 #include <netinet/in.h>
36 #include <netinet/if_ether.h>
37 
38 #if NBPFILTER > 0
39 #include <net/bpf.h>
40 #endif
41 
42 #if NKSTAT > 0
43 #include <sys/kstat.h>
44 #endif
45 
46 #include <machine/bus.h>
47 #include <machine/intr.h>
48 
49 #include <dev/mii/mii.h>
50 
51 #include <dev/pci/pcivar.h>
52 #include <dev/pci/pcireg.h>
53 #include <dev/pci/pcidevs.h>
54 
55 #include <dev/pci/if_rgereg.h>
56 
57 #ifdef RGE_DEBUG
58 #define DPRINTF(x)	do { if (rge_debug > 0) printf x; } while (0)
59 int rge_debug = 0;
60 #else
61 #define DPRINTF(x)
62 #endif
63 
64 int		rge_match(struct device *, void *, void *);
65 void		rge_attach(struct device *, struct device *, void *);
66 int		rge_activate(struct device *, int);
67 int		rge_intr(void *);
68 int		rge_ioctl(struct ifnet *, u_long, caddr_t);
69 void		rge_start(struct ifqueue *);
70 void		rge_watchdog(struct ifnet *);
71 void		rge_init(struct ifnet *);
72 void		rge_stop(struct ifnet *);
73 int		rge_ifmedia_upd(struct ifnet *);
74 void		rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
75 int		rge_allocmem(struct rge_softc *);
76 int		rge_newbuf(struct rge_queues *);
77 void		rge_rx_list_init(struct rge_queues *);
78 void		rge_tx_list_init(struct rge_queues *);
79 void		rge_fill_rx_ring(struct rge_queues *);
80 int		rge_rxeof(struct rge_queues *);
81 int		rge_txeof(struct rge_queues *);
82 void		rge_reset(struct rge_softc *);
83 void		rge_iff(struct rge_softc *);
84 void		rge_chipinit(struct rge_softc *);
85 void		rge_set_phy_power(struct rge_softc *, int);
86 void		rge_ephy_config(struct rge_softc *);
87 void		rge_ephy_config_mac_cfg3(struct rge_softc *);
88 void		rge_ephy_config_mac_cfg5(struct rge_softc *);
89 int		rge_phy_config(struct rge_softc *);
90 void		rge_phy_config_mac_cfg2_8126(struct rge_softc *);
91 void		rge_phy_config_mac_cfg3(struct rge_softc *);
92 void		rge_phy_config_mac_cfg5(struct rge_softc *);
93 void		rge_phy_config_mcu(struct rge_softc *, uint16_t);
94 void		rge_set_macaddr(struct rge_softc *, const uint8_t *);
95 void		rge_get_macaddr(struct rge_softc *, uint8_t *);
96 void		rge_hw_init(struct rge_softc *);
97 void		rge_hw_reset(struct rge_softc *);
98 void		rge_disable_phy_ocp_pwrsave(struct rge_softc *);
99 void		rge_patch_phy_mcu(struct rge_softc *, int);
100 void		rge_add_media_types(struct rge_softc *);
101 void		rge_config_imtype(struct rge_softc *, int);
102 void		rge_disable_aspm_clkreq(struct rge_softc *);
103 void		rge_disable_hw_im(struct rge_softc *);
104 void		rge_disable_sim_im(struct rge_softc *);
105 void		rge_setup_sim_im(struct rge_softc *);
106 void		rge_setup_intr(struct rge_softc *, int);
107 void		rge_switch_mcu_ram_page(struct rge_softc *, int);
108 void		rge_exit_oob(struct rge_softc *);
109 void		rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
110 uint32_t	rge_read_csi(struct rge_softc *, uint32_t);
111 void		rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
112 uint16_t	rge_read_mac_ocp(struct rge_softc *, uint16_t);
113 void		rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
114 uint16_t	rge_read_ephy(struct rge_softc *, uint16_t);
115 void		rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
116 uint16_t	rge_read_phy(struct rge_softc *, uint16_t, uint16_t);
117 void		rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
118 uint16_t	rge_read_phy_ocp(struct rge_softc *, uint16_t);
119 int		rge_get_link_status(struct rge_softc *);
120 void		rge_txstart(void *);
121 void		rge_tick(void *);
122 void		rge_link_state(struct rge_softc *);
123 #ifndef SMALL_KERNEL
124 int		rge_wol(struct ifnet *, int);
125 void		rge_wol_power(struct rge_softc *);
126 #endif
127 
128 #if NKSTAT > 0
129 void		rge_kstat_attach(struct rge_softc *);
130 #endif
131 
132 static const struct {
133 	uint16_t reg;
134 	uint16_t val;
135 }  rtl8125_mac_cfg3_mcu[] = {
136 	RTL8125_MAC_CFG3_MCU
137 }, rtl8125_mac_cfg5_mcu[] = {
138 	RTL8125_MAC_CFG5_MCU
139 }, rtl8126_mac_cfg2_mcu[] = {
140 	RTL8126_MAC_CFG2_MCU
141 };
142 
143 const struct cfattach rge_ca = {
144 	sizeof(struct rge_softc), rge_match, rge_attach, NULL, rge_activate
145 };
146 
147 struct cfdriver rge_cd = {
148 	NULL, "rge", DV_IFNET
149 };
150 
151 const struct pci_matchid rge_devices[] = {
152 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000 },
153 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8125 },
154 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8126 }
155 };
156 
157 int
rge_match(struct device * parent,void * match,void * aux)158 rge_match(struct device *parent, void *match, void *aux)
159 {
160 	return (pci_matchbyid((struct pci_attach_args *)aux, rge_devices,
161 	    nitems(rge_devices)));
162 }
163 
164 void
rge_attach(struct device * parent,struct device * self,void * aux)165 rge_attach(struct device *parent, struct device *self, void *aux)
166 {
167 	struct rge_softc *sc = (struct rge_softc *)self;
168 	struct pci_attach_args *pa = aux;
169 	pci_chipset_tag_t pc = pa->pa_pc;
170 	pci_intr_handle_t ih;
171 	const char *intrstr = NULL;
172 	struct ifnet *ifp;
173 	struct rge_queues *q;
174 	pcireg_t reg;
175 	uint32_t hwrev;
176 	uint8_t eaddr[ETHER_ADDR_LEN];
177 	int offset;
178 
179 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
180 
181 	/*
182 	 * Map control/status registers.
183 	 */
184 	if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
185 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
186 	    NULL, &sc->rge_bsize, 0)) {
187 		if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
188 		    PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
189 		    &sc->rge_bhandle, NULL, &sc->rge_bsize, 0)) {
190 			if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
191 			    0, &sc->rge_btag, &sc->rge_bhandle, NULL,
192 			    &sc->rge_bsize, 0)) {
193 				printf(": can't map mem or i/o space\n");
194 				return;
195 			}
196 		}
197 	}
198 
199 	q = malloc(sizeof(struct rge_queues), M_DEVBUF, M_NOWAIT | M_ZERO);
200 	if (q == NULL) {
201 		printf(": unable to allocate queue memory\n");
202 		return;
203 	}
204 	q->q_sc = sc;
205 	q->q_index = 0;
206 
207 	sc->sc_queues = q;
208 	sc->sc_nqueues = 1;
209 
210 	/*
211 	 * Allocate interrupt.
212 	 */
213 	if (pci_intr_map_msix(pa, 0, &ih) == 0 ||
214 	    pci_intr_map_msi(pa, &ih) == 0)
215 		sc->rge_flags |= RGE_FLAG_MSI;
216 	else if (pci_intr_map(pa, &ih) != 0) {
217 		printf(": couldn't map interrupt\n");
218 		return;
219 	}
220 	intrstr = pci_intr_string(pc, ih);
221 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET | IPL_MPSAFE, rge_intr,
222 	    sc, sc->sc_dev.dv_xname);
223 	if (sc->sc_ih == NULL) {
224 		printf(": couldn't establish interrupt");
225 		if (intrstr != NULL)
226 			printf(" at %s", intrstr);
227 		printf("\n");
228 		return;
229 	}
230 	printf(": %s", intrstr);
231 
232 	sc->sc_dmat = pa->pa_dmat;
233 	sc->sc_pc = pa->pa_pc;
234 	sc->sc_tag = pa->pa_tag;
235 
236 	/* Determine hardware revision */
237 	hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
238 	switch (hwrev) {
239 	case 0x60900000:
240 		sc->rge_type = MAC_CFG3;
241 		break;
242 	case 0x64100000:
243 		sc->rge_type = MAC_CFG5;
244 		break;
245 	case 0x64900000:
246 		sc->rge_type = MAC_CFG2_8126;
247 		break;
248 	default:
249 		printf(": unknown version 0x%08x\n", hwrev);
250 		return;
251 	}
252 
253 	rge_config_imtype(sc, RGE_IMTYPE_SIM);
254 
255 	/*
256 	 * PCI Express check.
257 	 */
258 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
259 	    &offset, NULL)) {
260 		/* Disable PCIe ASPM and ECPM. */
261 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
262 		    offset + PCI_PCIE_LCSR);
263 		reg &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1 |
264 		    PCI_PCIE_LCSR_ECPM);
265 		pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCI_PCIE_LCSR,
266 		    reg);
267 	}
268 
269 	rge_chipinit(sc);
270 
271 	rge_get_macaddr(sc, eaddr);
272 	printf(", address %s\n", ether_sprintf(eaddr));
273 
274 	memcpy(sc->sc_arpcom.ac_enaddr, eaddr, ETHER_ADDR_LEN);
275 
276 	if (rge_allocmem(sc))
277 		return;
278 
279 	ifp = &sc->sc_arpcom.ac_if;
280 	ifp->if_softc = sc;
281 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
282 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
283 	ifp->if_xflags = IFXF_MPSAFE;
284 	ifp->if_ioctl = rge_ioctl;
285 	ifp->if_qstart = rge_start;
286 	ifp->if_watchdog = rge_watchdog;
287 	ifq_init_maxlen(&ifp->if_snd, RGE_TX_LIST_CNT - 1);
288 	ifp->if_hardmtu = RGE_JUMBO_MTU;
289 
290 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
291 	    IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
292 
293 #if NVLAN > 0
294 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
295 #endif
296 
297 #ifndef SMALL_KERNEL
298 	ifp->if_capabilities |= IFCAP_WOL;
299 	ifp->if_wol = rge_wol;
300 	rge_wol(ifp, 0);
301 #endif
302 	timeout_set(&sc->sc_timeout, rge_tick, sc);
303 	task_set(&sc->sc_task, rge_txstart, sc);
304 
305 	/* Initialize ifmedia structures. */
306 	ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
307 	    rge_ifmedia_sts);
308 	rge_add_media_types(sc);
309 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
310 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
311 	sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
312 
313 	if_attach(ifp);
314 	ether_ifattach(ifp);
315 
316 #if NKSTAT > 0
317 	rge_kstat_attach(sc);
318 #endif
319 }
320 
321 int
rge_activate(struct device * self,int act)322 rge_activate(struct device *self, int act)
323 {
324 #ifndef SMALL_KERNEL
325 	struct rge_softc *sc = (struct rge_softc *)self;
326 #endif
327 
328 	switch (act) {
329 	case DVACT_POWERDOWN:
330 #ifndef SMALL_KERNEL
331 		rge_wol_power(sc);
332 #endif
333 		break;
334 	}
335 	return (0);
336 }
337 
338 int
rge_intr(void * arg)339 rge_intr(void *arg)
340 {
341 	struct rge_softc *sc = arg;
342 	struct rge_queues *q = sc->sc_queues;
343 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
344 	uint32_t status;
345 	int claimed = 0, rv;
346 
347 	if (!(ifp->if_flags & IFF_RUNNING))
348 		return (0);
349 
350 	/* Disable interrupts. */
351 	RGE_WRITE_4(sc, RGE_IMR, 0);
352 
353 	if (!(sc->rge_flags & RGE_FLAG_MSI)) {
354 		if ((RGE_READ_4(sc, RGE_ISR) & sc->rge_intrs) == 0)
355 			return (0);
356 	}
357 
358 	status = RGE_READ_4(sc, RGE_ISR);
359 	if (status)
360 		RGE_WRITE_4(sc, RGE_ISR, status);
361 
362 	if (status & RGE_ISR_PCS_TIMEOUT)
363 		claimed = 1;
364 
365 	rv = 0;
366 	if (status & sc->rge_intrs) {
367 		rv |= rge_rxeof(q);
368 		rv |= rge_txeof(q);
369 
370 		if (status & RGE_ISR_SYSTEM_ERR) {
371 			KERNEL_LOCK();
372 			rge_init(ifp);
373 			KERNEL_UNLOCK();
374 		}
375 		claimed = 1;
376 	}
377 
378 	if (sc->rge_timerintr) {
379 		if (!rv) {
380 			/*
381 			 * Nothing needs to be processed, fallback
382 			 * to use TX/RX interrupts.
383 			 */
384 			rge_setup_intr(sc, RGE_IMTYPE_NONE);
385 
386 			/*
387 			 * Recollect, mainly to avoid the possible
388 			 * race introduced by changing interrupt
389 			 * masks.
390 			 */
391 			rge_rxeof(q);
392 			rge_txeof(q);
393 		} else
394 			RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
395 	} else if (rv) {
396 		/*
397 		 * Assume that using simulated interrupt moderation
398 		 * (hardware timer based) could reduce the interrupt
399 		 * rate.
400 		 */
401 		rge_setup_intr(sc, RGE_IMTYPE_SIM);
402 	}
403 
404 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
405 
406 	return (claimed);
407 }
408 
409 static inline void
rge_tx_list_sync(struct rge_softc * sc,struct rge_queues * q,unsigned int idx,unsigned int len,int ops)410 rge_tx_list_sync(struct rge_softc *sc, struct rge_queues *q,
411     unsigned int idx, unsigned int len, int ops)
412 {
413 	bus_dmamap_sync(sc->sc_dmat, q->q_tx.rge_tx_list_map,
414 	    idx * sizeof(struct rge_tx_desc), len * sizeof(struct rge_tx_desc),
415 	    ops);
416 }
417 
418 static int
rge_encap(struct ifnet * ifp,struct rge_queues * q,struct mbuf * m,int idx)419 rge_encap(struct ifnet *ifp, struct rge_queues *q, struct mbuf *m, int idx)
420 {
421 	struct rge_softc *sc = q->q_sc;
422 	struct rge_tx_desc *d = NULL;
423 	struct rge_txq *txq;
424 	bus_dmamap_t txmap;
425 	uint32_t cmdsts, cflags = 0;
426 	int cur, error, i;
427 #if NBPFILTER > 0
428 	caddr_t if_bpf;
429 #endif
430 
431 	txq = &q->q_tx.rge_txq[idx];
432 	txmap = txq->txq_dmamap;
433 
434 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
435 	switch (error) {
436 	case 0:
437 		break;
438 	case EFBIG: /* mbuf chain is too fragmented */
439 		if (m_defrag(m, M_DONTWAIT) == 0 &&
440 		    bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
441 		    BUS_DMA_NOWAIT) == 0)
442 			break;
443 
444 		/* FALLTHROUGH */
445 	default:
446 		return (0);
447 	}
448 
449 #if NBPFILTER > 0
450 	if_bpf = READ_ONCE(ifp->if_bpf);
451 	if (if_bpf)
452 		bpf_mtap_ether(if_bpf, m, BPF_DIRECTION_OUT);
453 #endif
454 
455 	bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
456 	    BUS_DMASYNC_PREWRITE);
457 
458 	/*
459 	 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
460 	 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
461 	 * take affect.
462 	 */
463 	if ((m->m_pkthdr.csum_flags &
464 	    (M_IPV4_CSUM_OUT | M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) != 0) {
465 		cflags |= RGE_TDEXTSTS_IPCSUM;
466 		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
467 			cflags |= RGE_TDEXTSTS_TCPCSUM;
468 		if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
469 			cflags |= RGE_TDEXTSTS_UDPCSUM;
470 	}
471 
472 	/* Set up hardware VLAN tagging. */
473 #if NVLAN > 0
474 	if (m->m_flags & M_VLANTAG)
475 		cflags |= swap16(m->m_pkthdr.ether_vtag) | RGE_TDEXTSTS_VTAG;
476 #endif
477 
478 	cur = idx;
479 	for (i = 1; i < txmap->dm_nsegs; i++) {
480 		cur = RGE_NEXT_TX_DESC(cur);
481 
482 		cmdsts = RGE_TDCMDSTS_OWN;
483 		cmdsts |= txmap->dm_segs[i].ds_len;
484 
485 		if (cur == RGE_TX_LIST_CNT - 1)
486 			cmdsts |= RGE_TDCMDSTS_EOR;
487 		if (i == txmap->dm_nsegs - 1)
488 			cmdsts |= RGE_TDCMDSTS_EOF;
489 
490 		d = &q->q_tx.rge_tx_list[cur];
491 		d->rge_cmdsts = htole32(cmdsts);
492 		d->rge_extsts = htole32(cflags);
493 		d->rge_addr = htole64(txmap->dm_segs[i].ds_addr);
494 	}
495 
496 	/* Update info of TX queue and descriptors. */
497 	txq->txq_mbuf = m;
498 	txq->txq_descidx = cur;
499 
500 	cmdsts = RGE_TDCMDSTS_SOF;
501 	cmdsts |= txmap->dm_segs[0].ds_len;
502 
503 	if (idx == RGE_TX_LIST_CNT - 1)
504 		cmdsts |= RGE_TDCMDSTS_EOR;
505 	if (txmap->dm_nsegs == 1)
506 		cmdsts |= RGE_TDCMDSTS_EOF;
507 
508 	d = &q->q_tx.rge_tx_list[idx];
509 	d->rge_cmdsts = htole32(cmdsts);
510 	d->rge_extsts = htole32(cflags);
511 	d->rge_addr = htole64(txmap->dm_segs[0].ds_addr);
512 
513 	if (cur >= idx) {
514 		rge_tx_list_sync(sc, q, idx, txmap->dm_nsegs,
515 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
516 	} else {
517 		rge_tx_list_sync(sc, q, idx, RGE_TX_LIST_CNT - idx,
518 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
519 		rge_tx_list_sync(sc, q, 0, cur + 1,
520 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
521 	}
522 
523 	/* Transfer ownership of packet to the chip. */
524 	cmdsts |= RGE_TDCMDSTS_OWN;
525 	rge_tx_list_sync(sc, q, idx, 1, BUS_DMASYNC_POSTWRITE);
526 	d->rge_cmdsts = htole32(cmdsts);
527 	rge_tx_list_sync(sc, q, idx, 1, BUS_DMASYNC_PREWRITE);
528 
529 	return (txmap->dm_nsegs);
530 }
531 
532 int
rge_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)533 rge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
534 {
535 	struct rge_softc *sc = ifp->if_softc;
536 	struct ifreq *ifr = (struct ifreq *)data;
537 	int s, error = 0;
538 
539 	s = splnet();
540 
541 	switch (cmd) {
542 	case SIOCSIFADDR:
543 		ifp->if_flags |= IFF_UP;
544 		if (!(ifp->if_flags & IFF_RUNNING))
545 			rge_init(ifp);
546 		break;
547 	case SIOCSIFFLAGS:
548 		if (ifp->if_flags & IFF_UP) {
549 			if (ifp->if_flags & IFF_RUNNING)
550 				error = ENETRESET;
551 			else
552 				rge_init(ifp);
553 		} else {
554 			if (ifp->if_flags & IFF_RUNNING)
555 				rge_stop(ifp);
556 		}
557 		break;
558 	case SIOCGIFMEDIA:
559 	case SIOCSIFMEDIA:
560 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
561 		break;
562 	case SIOCGIFRXR:
563 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
564 		    NULL, MCLBYTES, &sc->sc_queues->q_rx.rge_rx_ring);
565 		break;
566 	default:
567 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
568 	}
569 
570 	if (error == ENETRESET) {
571 		if (ifp->if_flags & IFF_RUNNING)
572 			rge_iff(sc);
573 		error = 0;
574 	}
575 
576 	splx(s);
577 	return (error);
578 }
579 
580 void
rge_start(struct ifqueue * ifq)581 rge_start(struct ifqueue *ifq)
582 {
583 	struct ifnet *ifp = ifq->ifq_if;
584 	struct rge_softc *sc = ifp->if_softc;
585 	struct rge_queues *q = sc->sc_queues;
586 	struct mbuf *m;
587 	int free, idx, used;
588 	int queued = 0;
589 
590 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
591 		ifq_purge(ifq);
592 		return;
593 	}
594 
595 	/* Calculate free space. */
596 	idx = q->q_tx.rge_txq_prodidx;
597 	free = q->q_tx.rge_txq_considx;
598 	if (free <= idx)
599 		free += RGE_TX_LIST_CNT;
600 	free -= idx;
601 
602 	for (;;) {
603 		if (free < RGE_TX_NSEGS + 2) {
604 			ifq_set_oactive(&ifp->if_snd);
605 			break;
606 		}
607 
608 		m = ifq_dequeue(ifq);
609 		if (m == NULL)
610 			break;
611 
612 		used = rge_encap(ifp, q, m, idx);
613 		if (used == 0) {
614 			m_freem(m);
615 			continue;
616 		}
617 
618 		KASSERT(used < free);
619 		free -= used;
620 
621 		idx += used;
622 		if (idx >= RGE_TX_LIST_CNT)
623 			idx -= RGE_TX_LIST_CNT;
624 
625 		queued++;
626 	}
627 
628 	if (queued == 0)
629 		return;
630 
631 	/* Set a timeout in case the chip goes out to lunch. */
632 	ifp->if_timer = 5;
633 
634 	q->q_tx.rge_txq_prodidx = idx;
635 	ifq_serialize(ifq, &sc->sc_task);
636 }
637 
638 void
rge_watchdog(struct ifnet * ifp)639 rge_watchdog(struct ifnet *ifp)
640 {
641 	struct rge_softc *sc = ifp->if_softc;
642 
643 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
644 	ifp->if_oerrors++;
645 
646 	rge_init(ifp);
647 }
648 
649 void
rge_init(struct ifnet * ifp)650 rge_init(struct ifnet *ifp)
651 {
652 	struct rge_softc *sc = ifp->if_softc;
653 	struct rge_queues *q = sc->sc_queues;
654 	uint32_t rxconf, val;
655 	int i, num_miti;
656 
657 	rge_stop(ifp);
658 
659 	/* Set MAC address. */
660 	rge_set_macaddr(sc, sc->sc_arpcom.ac_enaddr);
661 
662 	/* Initialize RX and TX descriptors lists. */
663 	rge_rx_list_init(q);
664 	rge_tx_list_init(q);
665 
666 	rge_chipinit(sc);
667 
668 	if (rge_phy_config(sc))
669 		return;
670 
671 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
672 
673 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
674 	rge_disable_aspm_clkreq(sc);
675 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER,
676 	    RGE_JUMBO_MTU + ETHER_HDR_LEN + 32);
677 
678 	/* Load the addresses of the RX and TX lists into the chip. */
679 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
680 	    RGE_ADDR_LO(q->q_rx.rge_rx_list_map->dm_segs[0].ds_addr));
681 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
682 	    RGE_ADDR_HI(q->q_rx.rge_rx_list_map->dm_segs[0].ds_addr));
683 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
684 	    RGE_ADDR_LO(q->q_tx.rge_tx_list_map->dm_segs[0].ds_addr));
685 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
686 	    RGE_ADDR_HI(q->q_tx.rge_tx_list_map->dm_segs[0].ds_addr));
687 
688 	/* Set the initial RX and TX configurations. */
689 	if (sc->rge_type == MAC_CFG3)
690 		rxconf = RGE_RXCFG_CONFIG;
691 	else if (sc->rge_type == MAC_CFG5)
692 		rxconf = RGE_RXCFG_CONFIG_8125B;
693 	else
694 		rxconf = RGE_RXCFG_CONFIG_8126;
695 	RGE_WRITE_4(sc, RGE_RXCFG, rxconf);
696 	RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
697 
698 	val = rge_read_csi(sc, 0x70c) & ~0xff000000;
699 	rge_write_csi(sc, 0x70c, val | 0x27000000);
700 
701 	if (sc->rge_type == MAC_CFG2_8126) {
702 		/* Disable L1 timeout. */
703 		val = rge_read_csi(sc, 0x890) & ~0x00000001;
704 		rge_write_csi(sc, 0x890, val);
705 	} else
706 		RGE_WRITE_2(sc, 0x0382, 0x221b);
707 
708 	RGE_WRITE_1(sc, RGE_RSS_CTRL, 0);
709 
710 	val = RGE_READ_2(sc, RGE_RXQUEUE_CTRL) & ~0x001c;
711 	RGE_WRITE_2(sc, RGE_RXQUEUE_CTRL, val | (fls(sc->sc_nqueues) - 1) << 2);
712 
713 	RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
714 
715 	rge_write_mac_ocp(sc, 0xc140, 0xffff);
716 	rge_write_mac_ocp(sc, 0xc142, 0xffff);
717 
718 	RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
719 
720 	if (sc->rge_type == MAC_CFG2_8126)
721 		RGE_CLRBIT_1(sc, 0xd8, 0x02);
722 
723 	val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
724 	if (sc->rge_type == MAC_CFG3)
725 		rge_write_mac_ocp(sc, 0xe614, val | 0x0300);
726 	else if (sc->rge_type == MAC_CFG5)
727 		rge_write_mac_ocp(sc, 0xe614, val | 0x0200);
728 	else
729 		rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
730 
731 	val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0c00;
732 	rge_write_mac_ocp(sc, 0xe63e, val |
733 	    ((fls(sc->sc_nqueues) - 1) & 0x03) << 10);
734 
735 	RGE_MAC_CLRBIT(sc, 0xe63e, 0x0030);
736 	if (sc->rge_type != MAC_CFG5)
737 		RGE_MAC_SETBIT(sc, 0xe63e, 0x0020);
738 
739 	RGE_MAC_CLRBIT(sc, 0xc0b4, 0x0001);
740 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x0001);
741 
742 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
743 
744 	val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x00ff;
745 	rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
746 
747 	val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
748 	rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
749 
750 	RGE_MAC_CLRBIT(sc, 0xe056, 0x00f0);
751 
752 	RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
753 
754 	RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
755 
756 	val = rge_read_mac_ocp(sc, 0xea1c) & ~0x0003;
757 	rge_write_mac_ocp(sc, 0xea1c, val | 0x0001);
758 
759 	rge_write_mac_ocp(sc, 0xe0c0, 0x4000);
760 
761 	RGE_MAC_SETBIT(sc, 0xe052, 0x0060);
762 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0088);
763 
764 	val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
765 	rge_write_mac_ocp(sc, 0xd430, val | 0x045f);
766 
767 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_PFM_EN | RGE_DLLPR_TX_10M_PS_EN);
768 
769 	if (sc->rge_type == MAC_CFG3)
770 		RGE_SETBIT_1(sc, RGE_MCUCMD, 0x01);
771 
772 	/* Disable EEE plus. */
773 	RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
774 
775 	if (sc->rge_type == MAC_CFG2_8126)
776 		RGE_MAC_CLRBIT(sc, 0xea1c, 0x0304);
777 	else
778 		RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
779 
780 	RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
781 	DELAY(1);
782 	RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
783 
784 	RGE_CLRBIT_2(sc, 0x1880, 0x0030);
785 
786 	/* Config interrupt type for RTL8125B/RTL8126. */
787 	if (sc->rge_type != MAC_CFG3)
788 		RGE_CLRBIT_1(sc, RGE_INT_CFG0, RGE_INT_CFG0_EN);
789 
790 	/* Clear timer interrupts. */
791 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
792 	RGE_WRITE_4(sc, RGE_TIMERINT1, 0);
793 	RGE_WRITE_4(sc, RGE_TIMERINT2, 0);
794 	RGE_WRITE_4(sc, RGE_TIMERINT3, 0);
795 
796 	num_miti = (sc->rge_type == MAC_CFG3) ? 64 : 32;
797 	/* Clear interrupt moderation timer. */
798 	for (i = 0; i < num_miti; i++)
799 		RGE_WRITE_4(sc, RGE_INTMITI(i), 0);
800 
801 	if (sc->rge_type == MAC_CFG5) {
802 		RGE_CLRBIT_1(sc, RGE_INT_CFG0,
803 		    RGE_INT_CFG0_TIMEOUT_BYPASS |
804 		    RGE_INT_CFG0_MITIGATION_BYPASS);
805 		RGE_WRITE_2(sc, RGE_INT_CFG1, 0);
806 	}
807 
808 	RGE_MAC_SETBIT(sc, 0xc0ac, 0x1f80);
809 
810 	rge_write_mac_ocp(sc, 0xe098, 0xc302);
811 
812 	RGE_MAC_CLRBIT(sc, 0xe032, 0x0003);
813 	val = rge_read_csi(sc, 0x98) & ~0x0000ff00;
814 	rge_write_csi(sc, 0x98, val);
815 
816 	val = rge_read_mac_ocp(sc, 0xe092) & ~0x00ff;
817 	rge_write_mac_ocp(sc, 0xe092, val);
818 
819 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
820 		RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
821 
822 	RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
823 
824 	/* Set Maximum frame size. */
825 	RGE_WRITE_2(sc, RGE_RXMAXSIZE, RGE_JUMBO_FRAMELEN);
826 
827 	/* Disable RXDV gate. */
828 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
829 	DELAY(2000);
830 
831 	/* Program promiscuous mode and multicast filters. */
832 	rge_iff(sc);
833 
834 	rge_disable_aspm_clkreq(sc);
835 
836 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
837 	DELAY(10);
838 
839 	rge_ifmedia_upd(ifp);
840 
841 	/* Enable transmit and receive. */
842 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
843 
844 	/* Enable interrupts. */
845 	rge_setup_intr(sc, RGE_IMTYPE_SIM);
846 
847 	ifp->if_flags |= IFF_RUNNING;
848 	ifq_clr_oactive(&ifp->if_snd);
849 
850 	timeout_add_sec(&sc->sc_timeout, 1);
851 }
852 
853 /*
854  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
855  */
856 void
rge_stop(struct ifnet * ifp)857 rge_stop(struct ifnet *ifp)
858 {
859 	struct rge_softc *sc = ifp->if_softc;
860 	struct rge_queues *q = sc->sc_queues;
861 	int i;
862 
863 	timeout_del(&sc->sc_timeout);
864 
865 	ifp->if_timer = 0;
866 	ifp->if_flags &= ~IFF_RUNNING;
867 	sc->rge_timerintr = 0;
868 
869 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
870 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
871 	    RGE_RXCFG_ERRPKT);
872 
873 	rge_hw_reset(sc);
874 
875 	RGE_MAC_CLRBIT(sc, 0xc0ac, 0x1f80);
876 
877 	intr_barrier(sc->sc_ih);
878 	ifq_barrier(&ifp->if_snd);
879 	ifq_clr_oactive(&ifp->if_snd);
880 
881 	if (q->q_rx.rge_head != NULL) {
882 		m_freem(q->q_rx.rge_head);
883 		q->q_rx.rge_head = NULL;
884 		q->q_rx.rge_tail = &q->q_rx.rge_head;
885 	}
886 
887 	/* Free the TX list buffers. */
888 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
889 		if (q->q_tx.rge_txq[i].txq_mbuf != NULL) {
890 			bus_dmamap_unload(sc->sc_dmat,
891 			    q->q_tx.rge_txq[i].txq_dmamap);
892 			m_freem(q->q_tx.rge_txq[i].txq_mbuf);
893 			q->q_tx.rge_txq[i].txq_mbuf = NULL;
894 		}
895 	}
896 
897 	/* Free the RX list buffers. */
898 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
899 		if (q->q_rx.rge_rxq[i].rxq_mbuf != NULL) {
900 			bus_dmamap_unload(sc->sc_dmat,
901 			    q->q_rx.rge_rxq[i].rxq_dmamap);
902 			m_freem(q->q_rx.rge_rxq[i].rxq_mbuf);
903 			q->q_rx.rge_rxq[i].rxq_mbuf = NULL;
904 		}
905 	}
906 }
907 
908 /*
909  * Set media options.
910  */
911 int
rge_ifmedia_upd(struct ifnet * ifp)912 rge_ifmedia_upd(struct ifnet *ifp)
913 {
914 	struct rge_softc *sc = ifp->if_softc;
915 	struct ifmedia *ifm = &sc->sc_media;
916 	int anar, gig, val;
917 
918 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
919 		return (EINVAL);
920 
921 	/* Disable Gigabit Lite. */
922 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
923 	RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
924 	if (sc->rge_type == MAC_CFG2_8126)
925 		RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0002);
926 
927 	val = rge_read_phy_ocp(sc, 0xa5d4);
928 	val &= ~RGE_ADV_2500TFDX;
929 	if (sc->rge_type == MAC_CFG2_8126)
930 		val &= ~RGE_ADV_5000TFDX;
931 
932 	anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
933 	gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
934 
935 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
936 	case IFM_AUTO:
937 		val |= (sc->rge_type != MAC_CFG2_8126) ?
938 		    RGE_ADV_2500TFDX : (RGE_ADV_2500TFDX | RGE_ADV_5000TFDX);
939 		break;
940 	case IFM_5000_T:
941 		val |= RGE_ADV_5000TFDX;
942 		ifp->if_baudrate = IF_Gbps(5);
943 		break;
944 	case IFM_2500_T:
945 		val |= RGE_ADV_2500TFDX;
946 		ifp->if_baudrate = IF_Mbps(2500);
947 		break;
948 	case IFM_1000_T:
949 		ifp->if_baudrate = IF_Gbps(1);
950 		break;
951 	case IFM_100_TX:
952 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
953 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
954 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
955 		    ANAR_TX | ANAR_TX_FD | ANAR_10_FD | ANAR_10 :
956 		    ANAR_TX | ANAR_10_FD | ANAR_10;
957 		ifp->if_baudrate = IF_Mbps(100);
958 		break;
959 	case IFM_10_T:
960 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
961 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
962 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
963 		    ANAR_10_FD | ANAR_10 : ANAR_10;
964 		ifp->if_baudrate = IF_Mbps(10);
965 		break;
966 	default:
967 		printf("%s: unsupported media type\n", sc->sc_dev.dv_xname);
968 		return (EINVAL);
969 	}
970 
971 	rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
972 	rge_write_phy(sc, 0, MII_100T2CR, gig);
973 	rge_write_phy_ocp(sc, 0xa5d4, val);
974 	rge_write_phy(sc, 0, MII_BMCR, BMCR_RESET | BMCR_AUTOEN |
975 	    BMCR_STARTNEG);
976 
977 	return (0);
978 }
979 
980 /*
981  * Report current media status.
982  */
983 void
rge_ifmedia_sts(struct ifnet * ifp,struct ifmediareq * ifmr)984 rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
985 {
986 	struct rge_softc *sc = ifp->if_softc;
987 	uint16_t status = 0;
988 
989 	ifmr->ifm_status = IFM_AVALID;
990 	ifmr->ifm_active = IFM_ETHER;
991 
992 	if (rge_get_link_status(sc)) {
993 		ifmr->ifm_status |= IFM_ACTIVE;
994 
995 		status = RGE_READ_2(sc, RGE_PHYSTAT);
996 		if ((status & RGE_PHYSTAT_FDX) ||
997 		    (status & (RGE_PHYSTAT_2500MBPS | RGE_PHYSTAT_5000MBPS)))
998 			ifmr->ifm_active |= IFM_FDX;
999 		else
1000 			ifmr->ifm_active |= IFM_HDX;
1001 
1002 		if (status & RGE_PHYSTAT_10MBPS)
1003 			ifmr->ifm_active |= IFM_10_T;
1004 		else if (status & RGE_PHYSTAT_100MBPS)
1005 			ifmr->ifm_active |= IFM_100_TX;
1006 		else if (status & RGE_PHYSTAT_1000MBPS)
1007 			ifmr->ifm_active |= IFM_1000_T;
1008 		else if (status & RGE_PHYSTAT_2500MBPS)
1009 			ifmr->ifm_active |= IFM_2500_T;
1010 		else if (status & RGE_PHYSTAT_5000MBPS)
1011 			ifmr->ifm_active |= IFM_5000_T;
1012 	}
1013 }
1014 
1015 /*
1016  * Allocate memory for RX/TX rings.
1017  */
1018 int
rge_allocmem(struct rge_softc * sc)1019 rge_allocmem(struct rge_softc *sc)
1020 {
1021 	struct rge_queues *q = sc->sc_queues;
1022 	int error, i;
1023 
1024 	/* Allocate DMA'able memory for the TX ring. */
1025 	error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
1026 	    RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1027 	    &q->q_tx.rge_tx_list_map);
1028 	if (error) {
1029 		printf("%s: can't create TX list map\n", sc->sc_dev.dv_xname);
1030 		return (error);
1031 	}
1032 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
1033 	    &q->q_tx.rge_tx_listseg, 1, &q->q_tx.rge_tx_listnseg,
1034 	    BUS_DMA_NOWAIT| BUS_DMA_ZERO);
1035 	if (error) {
1036 		printf("%s: can't alloc TX list\n", sc->sc_dev.dv_xname);
1037 		return (error);
1038 	}
1039 
1040 	/* Load the map for the TX ring. */
1041 	error = bus_dmamem_map(sc->sc_dmat, &q->q_tx.rge_tx_listseg,
1042 	    q->q_tx.rge_tx_listnseg, RGE_TX_LIST_SZ,
1043 	    (caddr_t *)&q->q_tx.rge_tx_list, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1044 	if (error) {
1045 		printf("%s: can't map TX dma buffers\n", sc->sc_dev.dv_xname);
1046 		bus_dmamem_free(sc->sc_dmat, &q->q_tx.rge_tx_listseg,
1047 		    q->q_tx.rge_tx_listnseg);
1048 		return (error);
1049 	}
1050 	error = bus_dmamap_load(sc->sc_dmat, q->q_tx.rge_tx_list_map,
1051 	    q->q_tx.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1052 	if (error) {
1053 		printf("%s: can't load TX dma map\n", sc->sc_dev.dv_xname);
1054 		bus_dmamap_destroy(sc->sc_dmat, q->q_tx.rge_tx_list_map);
1055 		bus_dmamem_unmap(sc->sc_dmat,
1056 		    (caddr_t)q->q_tx.rge_tx_list, RGE_TX_LIST_SZ);
1057 		bus_dmamem_free(sc->sc_dmat, &q->q_tx.rge_tx_listseg,
1058 		    q->q_tx.rge_tx_listnseg);
1059 		return (error);
1060 	}
1061 
1062 	/* Create DMA maps for TX buffers. */
1063 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
1064 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
1065 		    RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0,
1066 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1067 		    &q->q_tx.rge_txq[i].txq_dmamap);
1068 		if (error) {
1069 			printf("%s: can't create DMA map for TX\n",
1070 			    sc->sc_dev.dv_xname);
1071 			return (error);
1072 		}
1073 	}
1074 
1075 	/* Allocate DMA'able memory for the RX ring. */
1076 	error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
1077 	    RGE_RX_LIST_SZ, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1078 	    &q->q_rx.rge_rx_list_map);
1079 	if (error) {
1080 		printf("%s: can't create RX list map\n", sc->sc_dev.dv_xname);
1081 		return (error);
1082 	}
1083 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
1084 	    &q->q_rx.rge_rx_listseg, 1, &q->q_rx.rge_rx_listnseg,
1085 	    BUS_DMA_NOWAIT| BUS_DMA_ZERO);
1086 	if (error) {
1087 		printf("%s: can't alloc RX list\n", sc->sc_dev.dv_xname);
1088 		return (error);
1089 	}
1090 
1091 	/* Load the map for the RX ring. */
1092 	error = bus_dmamem_map(sc->sc_dmat, &q->q_rx.rge_rx_listseg,
1093 	    q->q_rx.rge_rx_listnseg, RGE_RX_LIST_SZ,
1094 	    (caddr_t *)&q->q_rx.rge_rx_list, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1095 	if (error) {
1096 		printf("%s: can't map RX dma buffers\n", sc->sc_dev.dv_xname);
1097 		bus_dmamem_free(sc->sc_dmat, &q->q_rx.rge_rx_listseg,
1098 		    q->q_rx.rge_rx_listnseg);
1099 		return (error);
1100 	}
1101 	error = bus_dmamap_load(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1102 	    q->q_rx.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1103 	if (error) {
1104 		printf("%s: can't load RX dma map\n", sc->sc_dev.dv_xname);
1105 		bus_dmamap_destroy(sc->sc_dmat, q->q_rx.rge_rx_list_map);
1106 		bus_dmamem_unmap(sc->sc_dmat,
1107 		    (caddr_t)q->q_rx.rge_rx_list, RGE_RX_LIST_SZ);
1108 		bus_dmamem_free(sc->sc_dmat, &q->q_rx.rge_rx_listseg,
1109 		    q->q_rx.rge_rx_listnseg);
1110 		return (error);
1111 	}
1112 
1113 	/* Create DMA maps for RX buffers. */
1114 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1115 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
1116 		    RGE_JUMBO_FRAMELEN, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1117 		    &q->q_rx.rge_rxq[i].rxq_dmamap);
1118 		if (error) {
1119 			printf("%s: can't create DMA map for RX\n",
1120 			    sc->sc_dev.dv_xname);
1121 			return (error);
1122 		}
1123 	}
1124 
1125 	return (error);
1126 }
1127 
1128 /*
1129  * Initialize the RX descriptor and attach an mbuf cluster.
1130  */
1131 int
rge_newbuf(struct rge_queues * q)1132 rge_newbuf(struct rge_queues *q)
1133 {
1134 	struct rge_softc *sc = q->q_sc;
1135 	struct mbuf *m;
1136 	struct rge_rx_desc *r;
1137 	struct rge_rxq *rxq;
1138 	bus_dmamap_t rxmap;
1139 	uint32_t cmdsts;
1140 	int idx;
1141 
1142 	m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
1143 	if (m == NULL)
1144 		return (ENOBUFS);
1145 
1146 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1147 	m_adj(m, ETHER_ALIGN);
1148 
1149 	idx = q->q_rx.rge_rxq_prodidx;
1150 	rxq = &q->q_rx.rge_rxq[idx];
1151 	rxmap = rxq->rxq_dmamap;
1152 
1153 	if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT)) {
1154 		m_freem(m);
1155 		return (ENOBUFS);
1156 	}
1157 
1158 	bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1159 	    BUS_DMASYNC_PREREAD);
1160 
1161 	/* Map the segments into RX descriptors. */
1162 	r = &q->q_rx.rge_rx_list[idx];
1163 
1164 	rxq->rxq_mbuf = m;
1165 
1166 	cmdsts = rxmap->dm_segs[0].ds_len;
1167 	if (idx == RGE_RX_LIST_CNT - 1)
1168 		cmdsts |= RGE_RDCMDSTS_EOR;
1169 
1170 	r->hi_qword1.rx_qword4.rge_cmdsts = htole32(cmdsts);
1171 	r->hi_qword1.rx_qword4.rge_extsts = htole32(0);
1172 	r->hi_qword0.rge_addr = htole64(rxmap->dm_segs[0].ds_addr);
1173 
1174 	bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1175 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1176 	    BUS_DMASYNC_PREWRITE);
1177 
1178 	bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1179 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1180 	    BUS_DMASYNC_POSTWRITE);
1181 	cmdsts |= RGE_RDCMDSTS_OWN;
1182 	r->hi_qword1.rx_qword4.rge_cmdsts = htole32(cmdsts);
1183 	bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1184 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1185 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1186 
1187 	q->q_rx.rge_rxq_prodidx = RGE_NEXT_RX_DESC(idx);
1188 
1189 	return (0);
1190 }
1191 
1192 void
rge_rx_list_init(struct rge_queues * q)1193 rge_rx_list_init(struct rge_queues *q)
1194 {
1195 	memset(q->q_rx.rge_rx_list, 0, RGE_RX_LIST_SZ);
1196 
1197 	q->q_rx.rge_rxq_prodidx = q->q_rx.rge_rxq_considx = 0;
1198 	q->q_rx.rge_head = NULL;
1199 	q->q_rx.rge_tail = &q->q_rx.rge_head;
1200 
1201 	if_rxr_init(&q->q_rx.rge_rx_ring, 32, RGE_RX_LIST_CNT - 1);
1202 	rge_fill_rx_ring(q);
1203 }
1204 
1205 void
rge_fill_rx_ring(struct rge_queues * q)1206 rge_fill_rx_ring(struct rge_queues *q)
1207 {
1208 	struct if_rxring *rxr = &q->q_rx.rge_rx_ring;
1209 	int slots;
1210 
1211 	for (slots = if_rxr_get(rxr, RGE_RX_LIST_CNT); slots > 0; slots--) {
1212 		if (rge_newbuf(q))
1213 			break;
1214 	}
1215 	if_rxr_put(rxr, slots);
1216 }
1217 
1218 void
rge_tx_list_init(struct rge_queues * q)1219 rge_tx_list_init(struct rge_queues *q)
1220 {
1221 	struct rge_softc *sc = q->q_sc;
1222 	struct rge_tx_desc *d;
1223 	int i;
1224 
1225 	memset(q->q_tx.rge_tx_list, 0, RGE_TX_LIST_SZ);
1226 
1227 	for (i = 0; i < RGE_TX_LIST_CNT; i++)
1228 		q->q_tx.rge_txq[i].txq_mbuf = NULL;
1229 
1230 	d = &q->q_tx.rge_tx_list[RGE_TX_LIST_CNT - 1];
1231 	d->rge_cmdsts = htole32(RGE_TDCMDSTS_EOR);
1232 
1233 	bus_dmamap_sync(sc->sc_dmat, q->q_tx.rge_tx_list_map, 0,
1234 	    q->q_tx.rge_tx_list_map->dm_mapsize,
1235 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1236 
1237 	q->q_tx.rge_txq_prodidx = q->q_tx.rge_txq_considx = 0;
1238 }
1239 
1240 int
rge_rxeof(struct rge_queues * q)1241 rge_rxeof(struct rge_queues *q)
1242 {
1243 	struct rge_softc *sc = q->q_sc;
1244 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1245 	struct mbuf *m;
1246 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1247 	struct if_rxring *rxr = &q->q_rx.rge_rx_ring;
1248 	struct rge_rx_desc *cur_rx;
1249 	struct rge_rxq *rxq;
1250 	uint32_t rxstat, extsts;
1251 	int i, mlen, rx = 0;
1252 	int cons;
1253 
1254 	i = cons = q->q_rx.rge_rxq_considx;
1255 
1256 	while (if_rxr_inuse(rxr) > 0) {
1257 		cur_rx = &q->q_rx.rge_rx_list[i];
1258 
1259 		bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1260 		    i * sizeof(*cur_rx), sizeof(*cur_rx),
1261 		    BUS_DMASYNC_POSTREAD);
1262 		rxstat = letoh32(cur_rx->hi_qword1.rx_qword4.rge_cmdsts);
1263 		if (rxstat & RGE_RDCMDSTS_OWN) {
1264 			bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1265 			    i * sizeof(*cur_rx), sizeof(*cur_rx),
1266 			    BUS_DMASYNC_PREREAD);
1267 			break;
1268 		}
1269 
1270 		rxq = &q->q_rx.rge_rxq[i];
1271 		bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
1272 		    rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1273 		bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
1274 		m = rxq->rxq_mbuf;
1275 		rxq->rxq_mbuf = NULL;
1276 
1277 		i = RGE_NEXT_RX_DESC(i);
1278 		if_rxr_put(rxr, 1);
1279 		rx = 1;
1280 
1281 		if (ISSET(rxstat, RGE_RDCMDSTS_SOF)) {
1282 			if (q->q_rx.rge_head != NULL) {
1283 				ifp->if_ierrors++;
1284 				m_freem(q->q_rx.rge_head);
1285 				q->q_rx.rge_tail = &q->q_rx.rge_head;
1286 			}
1287 
1288 			m->m_pkthdr.len = 0;
1289 		} else if (q->q_rx.rge_head == NULL) {
1290 			m_freem(m);
1291 			continue;
1292 		} else
1293 			CLR(m->m_flags, M_PKTHDR);
1294 
1295 		*q->q_rx.rge_tail = m;
1296 		q->q_rx.rge_tail = &m->m_next;
1297 
1298 		mlen = rxstat & RGE_RDCMDSTS_FRAGLEN;
1299 		m->m_len = mlen;
1300 
1301 		m = q->q_rx.rge_head;
1302 		m->m_pkthdr.len += mlen;
1303 
1304 		if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
1305 			ifp->if_ierrors++;
1306 			m_freem(m);
1307 			q->q_rx.rge_head = NULL;
1308 			q->q_rx.rge_tail = &q->q_rx.rge_head;
1309 			continue;
1310 		}
1311 
1312 		if (!ISSET(rxstat, RGE_RDCMDSTS_EOF))
1313 			continue;
1314 
1315 		q->q_rx.rge_head = NULL;
1316 		q->q_rx.rge_tail = &q->q_rx.rge_head;
1317 
1318 		m_adj(m, -ETHER_CRC_LEN);
1319 
1320 		extsts = letoh32(cur_rx->hi_qword1.rx_qword4.rge_extsts);
1321 
1322 		/* Check IP header checksum. */
1323 		if (!(extsts & RGE_RDEXTSTS_IPCSUMERR) &&
1324 		    (extsts & RGE_RDEXTSTS_IPV4))
1325 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1326 
1327 		/* Check TCP/UDP checksum. */
1328 		if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
1329 		    (((extsts & RGE_RDEXTSTS_TCPPKT) &&
1330 		    !(extsts & RGE_RDEXTSTS_TCPCSUMERR)) ||
1331 		    ((extsts & RGE_RDEXTSTS_UDPPKT) &&
1332 		    !(extsts & RGE_RDEXTSTS_UDPCSUMERR))))
1333 			m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
1334 			    M_UDP_CSUM_IN_OK;
1335 
1336 #if NVLAN > 0
1337 		if (extsts & RGE_RDEXTSTS_VTAG) {
1338 			m->m_pkthdr.ether_vtag =
1339 			    ntohs(extsts & RGE_RDEXTSTS_VLAN_MASK);
1340 			m->m_flags |= M_VLANTAG;
1341 		}
1342 #endif
1343 
1344 		ml_enqueue(&ml, m);
1345 	}
1346 
1347 	if (!rx)
1348 		return (0);
1349 
1350 	if (i >= cons) {
1351 		bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1352 		    cons * sizeof(*cur_rx), (i - cons) * sizeof(*cur_rx),
1353 		    BUS_DMASYNC_POSTWRITE);
1354 	} else {
1355 		bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1356 		    cons * sizeof(*cur_rx),
1357 		    (RGE_RX_LIST_CNT - cons) * sizeof(*cur_rx),
1358 		    BUS_DMASYNC_POSTWRITE);
1359 		if (i > 0) {
1360 			bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1361 			    0, i * sizeof(*cur_rx),
1362 			    BUS_DMASYNC_POSTWRITE);
1363 		}
1364 	}
1365 
1366 	if (ifiq_input(&ifp->if_rcv, &ml))
1367 		if_rxr_livelocked(rxr);
1368 
1369 	q->q_rx.rge_rxq_considx = i;
1370 	rge_fill_rx_ring(q);
1371 
1372 	return (1);
1373 }
1374 
1375 int
rge_txeof(struct rge_queues * q)1376 rge_txeof(struct rge_queues *q)
1377 {
1378 	struct rge_softc *sc = q->q_sc;
1379 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1380 	struct rge_txq *txq;
1381 	uint32_t txstat;
1382 	int cons, prod, cur, idx;
1383 	int free = 0;
1384 
1385 	prod = q->q_tx.rge_txq_prodidx;
1386 	cons = q->q_tx.rge_txq_considx;
1387 
1388 	idx = cons;
1389 	while (idx != prod) {
1390 		txq = &q->q_tx.rge_txq[idx];
1391 		cur = txq->txq_descidx;
1392 
1393 		rge_tx_list_sync(sc, q, cur, 1, BUS_DMASYNC_POSTREAD);
1394 		txstat = q->q_tx.rge_tx_list[cur].rge_cmdsts;
1395 		rge_tx_list_sync(sc, q, cur, 1, BUS_DMASYNC_PREREAD);
1396 		if (ISSET(txstat, htole32(RGE_TDCMDSTS_OWN))) {
1397 			free = 2;
1398 			break;
1399 		}
1400 
1401 		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
1402 		    txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1403 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
1404 		m_freem(txq->txq_mbuf);
1405 		txq->txq_mbuf = NULL;
1406 
1407 		if (ISSET(txstat,
1408 		    htole32(RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL)))
1409 			ifp->if_collisions++;
1410 		if (ISSET(txstat, htole32(RGE_TDCMDSTS_TXERR)))
1411 			ifp->if_oerrors++;
1412 
1413 		idx = RGE_NEXT_TX_DESC(cur);
1414 		free = 1;
1415 	}
1416 
1417 	if (free == 0)
1418 		return (0);
1419 
1420 	if (idx >= cons) {
1421 		rge_tx_list_sync(sc, q, cons, idx - cons,
1422 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1423 	} else {
1424 		rge_tx_list_sync(sc, q, cons, RGE_TX_LIST_CNT - cons,
1425 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1426 		rge_tx_list_sync(sc, q, 0, idx,
1427 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1428 	}
1429 
1430 	q->q_tx.rge_txq_considx = idx;
1431 
1432 	if (ifq_is_oactive(&ifp->if_snd))
1433 		ifq_restart(&ifp->if_snd);
1434 	else if (free == 2)
1435 		ifq_serialize(&ifp->if_snd, &sc->sc_task);
1436 	else
1437 		ifp->if_timer = 0;
1438 
1439 	return (1);
1440 }
1441 
1442 void
rge_reset(struct rge_softc * sc)1443 rge_reset(struct rge_softc *sc)
1444 {
1445 	int i;
1446 
1447 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
1448 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
1449 	    RGE_RXCFG_ERRPKT);
1450 
1451 	/* Enable RXDV gate. */
1452 	RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
1453 	DELAY(2000);
1454 
1455 	RGE_SETBIT_1(sc, RGE_CMD, RGE_CMD_STOPREQ);
1456 	if (sc->rge_type != MAC_CFG2_8126) {
1457 		for (i = 0; i < 20; i++) {
1458 			DELAY(10);
1459 			if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_STOPREQ))
1460 				break;
1461 		}
1462 	}
1463 
1464 	for (i = 0; i < 3000; i++) {
1465 		DELAY(50);
1466 		if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
1467 		    RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
1468 		    RGE_MCUCMD_TXFIFO_EMPTY))
1469 			break;
1470 	}
1471 	if (sc->rge_type != MAC_CFG3) {
1472 		for (i = 0; i < 3000; i++) {
1473 			DELAY(50);
1474 			if ((RGE_READ_2(sc, RGE_IM) & 0x0103) == 0x0103)
1475 				break;
1476 		}
1477 	}
1478 
1479 	DELAY(2000);
1480 
1481 	/* Soft reset. */
1482 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
1483 
1484 	for (i = 0; i < RGE_TIMEOUT; i++) {
1485 		DELAY(100);
1486 		if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
1487 			break;
1488 	}
1489 	if (i == RGE_TIMEOUT)
1490 		printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
1491 }
1492 
1493 void
rge_iff(struct rge_softc * sc)1494 rge_iff(struct rge_softc *sc)
1495 {
1496 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1497 	struct arpcom *ac = &sc->sc_arpcom;
1498 	struct ether_multi *enm;
1499 	struct ether_multistep step;
1500 	uint32_t hashes[2];
1501 	uint32_t rxfilt;
1502 	int h = 0;
1503 
1504 	rxfilt = RGE_READ_4(sc, RGE_RXCFG);
1505 	rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
1506 	ifp->if_flags &= ~IFF_ALLMULTI;
1507 
1508 	/*
1509 	 * Always accept frames destined to our station address.
1510 	 * Always accept broadcast frames.
1511 	 */
1512 	rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
1513 
1514 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1515 		ifp->if_flags |= IFF_ALLMULTI;
1516 		rxfilt |= RGE_RXCFG_MULTI;
1517 		if (ifp->if_flags & IFF_PROMISC)
1518 			rxfilt |= RGE_RXCFG_ALLPHYS;
1519 		hashes[0] = hashes[1] = 0xffffffff;
1520 	} else {
1521 		rxfilt |= RGE_RXCFG_MULTI;
1522 		/* Program new filter. */
1523 		memset(hashes, 0, sizeof(hashes));
1524 
1525 		ETHER_FIRST_MULTI(step, ac, enm);
1526 		while (enm != NULL) {
1527 			h = ether_crc32_be(enm->enm_addrlo,
1528 			    ETHER_ADDR_LEN) >> 26;
1529 
1530 			if (h < 32)
1531 				hashes[0] |= (1 << h);
1532 			else
1533 				hashes[1] |= (1 << (h - 32));
1534 
1535 			ETHER_NEXT_MULTI(step, enm);
1536 		}
1537 	}
1538 
1539 	RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
1540 	RGE_WRITE_4(sc, RGE_MAR0, swap32(hashes[1]));
1541 	RGE_WRITE_4(sc, RGE_MAR4, swap32(hashes[0]));
1542 }
1543 
1544 void
rge_chipinit(struct rge_softc * sc)1545 rge_chipinit(struct rge_softc *sc)
1546 {
1547 	rge_exit_oob(sc);
1548 	rge_set_phy_power(sc, 1);
1549 	rge_hw_init(sc);
1550 	rge_hw_reset(sc);
1551 }
1552 
1553 void
rge_set_phy_power(struct rge_softc * sc,int on)1554 rge_set_phy_power(struct rge_softc *sc, int on)
1555 {
1556 	int i;
1557 
1558 	if (on) {
1559 		RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
1560 
1561 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
1562 
1563 		for (i = 0; i < RGE_TIMEOUT; i++) {
1564 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 3)
1565 				break;
1566 			DELAY(1000);
1567 		}
1568 	} else {
1569 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
1570 		RGE_CLRBIT_1(sc, RGE_PMCH, 0x80);
1571 		RGE_CLRBIT_1(sc, RGE_PPSW, 0x40);
1572 	}
1573 }
1574 
1575 void
rge_ephy_config(struct rge_softc * sc)1576 rge_ephy_config(struct rge_softc *sc)
1577 {
1578 	switch (sc->rge_type) {
1579 	case MAC_CFG3:
1580 		rge_ephy_config_mac_cfg3(sc);
1581 		break;
1582 	case MAC_CFG5:
1583 		rge_ephy_config_mac_cfg5(sc);
1584 		break;
1585 	default:
1586 		break;	/* Nothing to do. */
1587 	}
1588 }
1589 
1590 void
rge_ephy_config_mac_cfg3(struct rge_softc * sc)1591 rge_ephy_config_mac_cfg3(struct rge_softc *sc)
1592 {
1593 	uint16_t val;
1594 	int i;
1595 
1596 	for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
1597 		rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
1598 		    rtl8125_mac_cfg3_ephy[i].val);
1599 
1600 	val = rge_read_ephy(sc, 0x002a) & ~0x7000;
1601 	rge_write_ephy(sc, 0x002a, val | 0x3000);
1602 	RGE_EPHY_CLRBIT(sc, 0x0019, 0x0040);
1603 	RGE_EPHY_SETBIT(sc, 0x001b, 0x0e00);
1604 	RGE_EPHY_CLRBIT(sc, 0x001b, 0x7000);
1605 	rge_write_ephy(sc, 0x0002, 0x6042);
1606 	rge_write_ephy(sc, 0x0006, 0x0014);
1607 	val = rge_read_ephy(sc, 0x006a) & ~0x7000;
1608 	rge_write_ephy(sc, 0x006a, val | 0x3000);
1609 	RGE_EPHY_CLRBIT(sc, 0x0059, 0x0040);
1610 	RGE_EPHY_SETBIT(sc, 0x005b, 0x0e00);
1611 	RGE_EPHY_CLRBIT(sc, 0x005b, 0x7000);
1612 	rge_write_ephy(sc, 0x0042, 0x6042);
1613 	rge_write_ephy(sc, 0x0046, 0x0014);
1614 }
1615 
1616 void
rge_ephy_config_mac_cfg5(struct rge_softc * sc)1617 rge_ephy_config_mac_cfg5(struct rge_softc *sc)
1618 {
1619 	int i;
1620 
1621 	for (i = 0; i < nitems(rtl8125_mac_cfg5_ephy); i++)
1622 		rge_write_ephy(sc, rtl8125_mac_cfg5_ephy[i].reg,
1623 		    rtl8125_mac_cfg5_ephy[i].val);
1624 }
1625 
1626 int
rge_phy_config(struct rge_softc * sc)1627 rge_phy_config(struct rge_softc *sc)
1628 {
1629 	int i;
1630 
1631 	rge_ephy_config(sc);
1632 
1633 	/* PHY reset. */
1634 	rge_write_phy(sc, 0, MII_ANAR,
1635 	    rge_read_phy(sc, 0, MII_ANAR) &
1636 	    ~(ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10));
1637 	rge_write_phy(sc, 0, MII_100T2CR,
1638 	    rge_read_phy(sc, 0, MII_100T2CR) &
1639 	    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX));
1640 	if (sc->rge_type == MAC_CFG2_8126)
1641 		RGE_PHY_CLRBIT(sc, 0xa5d4, RGE_ADV_2500TFDX | RGE_ADV_5000TFDX);
1642 	else
1643 		RGE_PHY_CLRBIT(sc, 0xa5d4, RGE_ADV_2500TFDX);
1644 	rge_write_phy(sc, 0, MII_BMCR, BMCR_RESET | BMCR_AUTOEN |
1645 	    BMCR_STARTNEG);
1646 	for (i = 0; i < 2500; i++) {
1647 		if (!(rge_read_phy(sc, 0, MII_BMCR) & BMCR_RESET))
1648 			break;
1649 		DELAY(1000);
1650 	}
1651 	if (i == 2500) {
1652 		printf("%s: PHY reset failed\n", sc->sc_dev.dv_xname);
1653 		return (ETIMEDOUT);
1654 	}
1655 
1656 	/* Read microcode version. */
1657 	rge_write_phy_ocp(sc, 0xa436, 0x801e);
1658 	sc->rge_mcodever = rge_read_phy_ocp(sc, 0xa438);
1659 
1660 	switch (sc->rge_type) {
1661 	case MAC_CFG2_8126:
1662 		rge_phy_config_mac_cfg2_8126(sc);
1663 		break;
1664 	case MAC_CFG3:
1665 		rge_phy_config_mac_cfg3(sc);
1666 		break;
1667 	case MAC_CFG5:
1668 		rge_phy_config_mac_cfg5(sc);
1669 		break;
1670 	default:
1671 		break;	/* Can't happen. */
1672 	}
1673 
1674 	RGE_PHY_CLRBIT(sc, 0xa5b4, 0x8000);
1675 
1676 	/* Disable EEE. */
1677 	RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
1678 	if (sc->rge_type == MAC_CFG3) {
1679 		RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
1680 		RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
1681 	} else if (sc->rge_type == MAC_CFG5)
1682 		RGE_PHY_SETBIT(sc, 0xa432, 0x0010);
1683 
1684 	RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
1685 	RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
1686 	if (sc->rge_type == MAC_CFG2_8126)
1687 		RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0002);
1688 	RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
1689 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
1690 	RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
1691 
1692 	/* Disable advanced EEE. */
1693 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
1694 	RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
1695 	RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
1696 
1697 	return (0);
1698 }
1699 
1700 void
rge_phy_config_mac_cfg2_8126(struct rge_softc * sc)1701 rge_phy_config_mac_cfg2_8126(struct rge_softc *sc)
1702 {
1703 	uint16_t val;
1704 	int i;
1705 	static const uint16_t mac_cfg2_a438_value[] =
1706 	    { 0x0044, 0x00a8, 0x00d6, 0x00ec, 0x00f6, 0x00fc, 0x00fe,
1707 	      0x00fe, 0x00bc, 0x0058, 0x002a, 0x003f, 0x3f02, 0x023c,
1708 	      0x3b0a, 0x1c00, 0x0000, 0x0000, 0x0000, 0x0000 };
1709 
1710 	static const uint16_t mac_cfg2_b87e_value[] =
1711 	    { 0x03ed, 0x03ff, 0x0009, 0x03fe, 0x000b, 0x0021, 0x03f7,
1712 	      0x03b8, 0x03e0, 0x0049, 0x0049, 0x03e0, 0x03b8, 0x03f7,
1713 	      0x0021, 0x000b, 0x03fe, 0x0009, 0x03ff, 0x03ed, 0x000e,
1714 	      0x03fe, 0x03ed, 0x0006, 0x001a, 0x03f1, 0x03d8, 0x0023,
1715 	      0x0054, 0x0322, 0x00dd, 0x03ab, 0x03dc, 0x0027, 0x000e,
1716 	      0x03e5, 0x03f9, 0x0012, 0x0001, 0x03f1 };
1717 
1718 	rge_phy_config_mcu(sc, RGE_MAC_CFG2_8126_MCODE_VER);
1719 
1720 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1721 	rge_write_phy_ocp(sc, 0xa436, 0x80bf);
1722 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1723 	rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
1724 	rge_write_phy_ocp(sc, 0xa436, 0x80cd);
1725 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1726 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1727 	rge_write_phy_ocp(sc, 0xa436, 0x80d1);
1728 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1729 	rge_write_phy_ocp(sc, 0xa438, val | 0xc800);
1730 	rge_write_phy_ocp(sc, 0xa436, 0x80d4);
1731 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1732 	rge_write_phy_ocp(sc, 0xa438, val | 0xc800);
1733 	rge_write_phy_ocp(sc, 0xa436, 0x80e1);
1734 	rge_write_phy_ocp(sc, 0xa438, 0x10cc);
1735 	rge_write_phy_ocp(sc, 0xa436, 0x80e5);
1736 	rge_write_phy_ocp(sc, 0xa438, 0x4f0c);
1737 	rge_write_phy_ocp(sc, 0xa436, 0x8387);
1738 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1739 	rge_write_phy_ocp(sc, 0xa438, val | 0x4700);
1740 	val = rge_read_phy_ocp(sc, 0xa80c) & ~0x00c0;
1741 	rge_write_phy_ocp(sc, 0xa80c, val | 0x0080);
1742 	RGE_PHY_CLRBIT(sc, 0xac90, 0x0010);
1743 	RGE_PHY_CLRBIT(sc, 0xad2c, 0x8000);
1744 	rge_write_phy_ocp(sc, 0xb87c, 0x8321);
1745 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1746 	rge_write_phy_ocp(sc, 0xb87e, val | 0x1100);
1747 	RGE_PHY_SETBIT(sc, 0xacf8, 0x000c);
1748 	rge_write_phy_ocp(sc, 0xa436, 0x8183);
1749 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1750 	rge_write_phy_ocp(sc, 0xa438, val | 0x5900);
1751 	RGE_PHY_SETBIT(sc, 0xad94, 0x0020);
1752 	RGE_PHY_CLRBIT(sc, 0xa654, 0x0800);
1753 	RGE_PHY_SETBIT(sc, 0xb648, 0x4000);
1754 	rge_write_phy_ocp(sc, 0xb87c, 0x839e);
1755 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1756 	rge_write_phy_ocp(sc, 0xb87e, val | 0x2f00);
1757 	rge_write_phy_ocp(sc, 0xb87c, 0x83f2);
1758 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1759 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
1760 	RGE_PHY_SETBIT(sc, 0xada0, 0x0002);
1761 	rge_write_phy_ocp(sc, 0xb87c, 0x80f3);
1762 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1763 	rge_write_phy_ocp(sc, 0xb87e, val | 0x9900);
1764 	rge_write_phy_ocp(sc, 0xb87c, 0x8126);
1765 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1766 	rge_write_phy_ocp(sc, 0xb87e, val | 0xc100);
1767 	rge_write_phy_ocp(sc, 0xb87c, 0x893a);
1768 	rge_write_phy_ocp(sc, 0xb87e, 0x8080);
1769 	rge_write_phy_ocp(sc, 0xb87c, 0x8647);
1770 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1771 	rge_write_phy_ocp(sc, 0xb87e, val | 0xe600);
1772 	rge_write_phy_ocp(sc, 0xb87c, 0x862c);
1773 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1774 	rge_write_phy_ocp(sc, 0xb87e, val | 0x1200);
1775 	rge_write_phy_ocp(sc, 0xb87c, 0x864a);
1776 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1777 	rge_write_phy_ocp(sc, 0xb87e, val | 0xe600);
1778 	rge_write_phy_ocp(sc, 0xb87c, 0x80a0);
1779 	rge_write_phy_ocp(sc, 0xb87e, 0xbcbc);
1780 	rge_write_phy_ocp(sc, 0xb87c, 0x805e);
1781 	rge_write_phy_ocp(sc, 0xb87e, 0xbcbc);
1782 	rge_write_phy_ocp(sc, 0xb87c, 0x8056);
1783 	rge_write_phy_ocp(sc, 0xb87e, 0x3077);
1784 	rge_write_phy_ocp(sc, 0xb87c, 0x8058);
1785 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1786 	rge_write_phy_ocp(sc, 0xb87e, val | 0x5a00);
1787 	rge_write_phy_ocp(sc, 0xb87c, 0x8098);
1788 	rge_write_phy_ocp(sc, 0xb87e, 0x3077);
1789 	rge_write_phy_ocp(sc, 0xb87c, 0x809a);
1790 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1791 	rge_write_phy_ocp(sc, 0xb87e, val | 0x5a00);
1792 	rge_write_phy_ocp(sc, 0xb87c, 0x8052);
1793 	rge_write_phy_ocp(sc, 0xb87e, 0x3733);
1794 	rge_write_phy_ocp(sc, 0xb87c, 0x8094);
1795 	rge_write_phy_ocp(sc, 0xb87e, 0x3733);
1796 	rge_write_phy_ocp(sc, 0xb87c, 0x807f);
1797 	rge_write_phy_ocp(sc, 0xb87e, 0x7c75);
1798 	rge_write_phy_ocp(sc, 0xb87c, 0x803d);
1799 	rge_write_phy_ocp(sc, 0xb87e, 0x7c75);
1800 	rge_write_phy_ocp(sc, 0xb87c, 0x8036);
1801 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1802 	rge_write_phy_ocp(sc, 0xb87e, val | 0x3000);
1803 	rge_write_phy_ocp(sc, 0xb87c, 0x8078);
1804 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1805 	rge_write_phy_ocp(sc, 0xb87e, val | 0x3000);
1806 	rge_write_phy_ocp(sc, 0xb87c, 0x8031);
1807 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1808 	rge_write_phy_ocp(sc, 0xb87e, val | 0x3300);
1809 	rge_write_phy_ocp(sc, 0xb87c, 0x8073);
1810 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1811 	rge_write_phy_ocp(sc, 0xb87e, val | 0x3300);
1812 	val = rge_read_phy_ocp(sc, 0xae06) & ~0xfc00;
1813 	rge_write_phy_ocp(sc, 0xae06, val | 0x7c00);
1814 	rge_write_phy_ocp(sc, 0xb87c, 0x89D1);
1815 	rge_write_phy_ocp(sc, 0xb87e, 0x0004);
1816 	rge_write_phy_ocp(sc, 0xa436, 0x8fbd);
1817 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1818 	rge_write_phy_ocp(sc, 0xa438, val | 0x0a00);
1819 	rge_write_phy_ocp(sc, 0xa436, 0x8fbe);
1820 	rge_write_phy_ocp(sc, 0xa438, 0x0d09);
1821 	rge_write_phy_ocp(sc, 0xb87c, 0x89cd);
1822 	rge_write_phy_ocp(sc, 0xb87e, 0x0f0f);
1823 	rge_write_phy_ocp(sc, 0xb87c, 0x89cf);
1824 	rge_write_phy_ocp(sc, 0xb87e, 0x0f0f);
1825 	rge_write_phy_ocp(sc, 0xb87c, 0x83a4);
1826 	rge_write_phy_ocp(sc, 0xb87e, 0x6600);
1827 	rge_write_phy_ocp(sc, 0xb87c, 0x83a6);
1828 	rge_write_phy_ocp(sc, 0xb87e, 0x6601);
1829 	rge_write_phy_ocp(sc, 0xb87c, 0x83c0);
1830 	rge_write_phy_ocp(sc, 0xb87e, 0x6600);
1831 	rge_write_phy_ocp(sc, 0xb87c, 0x83c2);
1832 	rge_write_phy_ocp(sc, 0xb87e, 0x6601);
1833 	rge_write_phy_ocp(sc, 0xb87c, 0x8414);
1834 	rge_write_phy_ocp(sc, 0xb87e, 0x6600);
1835 	rge_write_phy_ocp(sc, 0xb87c, 0x8416);
1836 	rge_write_phy_ocp(sc, 0xb87e, 0x6601);
1837 	rge_write_phy_ocp(sc, 0xb87c, 0x83f8);
1838 	rge_write_phy_ocp(sc, 0xb87e, 0x6600);
1839 	rge_write_phy_ocp(sc, 0xb87c, 0x83fa);
1840 	rge_write_phy_ocp(sc, 0xb87e, 0x6601);
1841 
1842 	rge_patch_phy_mcu(sc, 1);
1843 	val = rge_read_phy_ocp(sc, 0xbd96) & ~0x1f00;
1844 	rge_write_phy_ocp(sc, 0xbd96, val | 0x1000);
1845 	val = rge_read_phy_ocp(sc, 0xbf1c) & ~0x0007;
1846 	rge_write_phy_ocp(sc, 0xbf1c, val | 0x0007);
1847 	RGE_PHY_CLRBIT(sc, 0xbfbe, 0x8000);
1848 	val = rge_read_phy_ocp(sc, 0xbf40) & ~0x0380;
1849 	rge_write_phy_ocp(sc, 0xbf40, val | 0x0280);
1850 	val = rge_read_phy_ocp(sc, 0xbf90) & ~0x0080;
1851 	rge_write_phy_ocp(sc, 0xbf90, val | 0x0060);
1852 	val = rge_read_phy_ocp(sc, 0xbf90) & ~0x0010;
1853 	rge_write_phy_ocp(sc, 0xbf90, val | 0x000c);
1854 	rge_patch_phy_mcu(sc, 0);
1855 
1856 	rge_write_phy_ocp(sc, 0xa436, 0x843b);
1857 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1858 	rge_write_phy_ocp(sc, 0xa438, val | 0x2000);
1859 	rge_write_phy_ocp(sc, 0xa436, 0x843d);
1860 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1861 	rge_write_phy_ocp(sc, 0xa438, val | 0x2000);
1862 	RGE_PHY_CLRBIT(sc, 0xb516, 0x007f);
1863 	RGE_PHY_CLRBIT(sc, 0xbf80, 0x0030);
1864 
1865 	rge_write_phy_ocp(sc, 0xa436, 0x8188);
1866 	for (i = 0; i < 11; i++)
1867 		rge_write_phy_ocp(sc, 0xa438, mac_cfg2_a438_value[i]);
1868 
1869 	rge_write_phy_ocp(sc, 0xb87c, 0x8015);
1870 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1871 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
1872 	rge_write_phy_ocp(sc, 0xb87c, 0x8ffd);
1873 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1874 	rge_write_phy_ocp(sc, 0xb87e, val | 0);
1875 	rge_write_phy_ocp(sc, 0xb87c, 0x8fff);
1876 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1877 	rge_write_phy_ocp(sc, 0xb87e, val | 0x7f00);
1878 	rge_write_phy_ocp(sc, 0xb87c, 0x8ffb);
1879 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1880 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
1881 	rge_write_phy_ocp(sc, 0xb87c, 0x8fe9);
1882 	rge_write_phy_ocp(sc, 0xb87e, 0x0002);
1883 	rge_write_phy_ocp(sc, 0xb87c, 0x8fef);
1884 	rge_write_phy_ocp(sc, 0xb87e, 0x00a5);
1885 	rge_write_phy_ocp(sc, 0xb87c, 0x8ff1);
1886 	rge_write_phy_ocp(sc, 0xb87e, 0x0106);
1887 	rge_write_phy_ocp(sc, 0xb87c, 0x8fe1);
1888 	rge_write_phy_ocp(sc, 0xb87e, 0x0102);
1889 	rge_write_phy_ocp(sc, 0xb87c, 0x8fe3);
1890 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1891 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0400);
1892 	RGE_PHY_SETBIT(sc, 0xa654, 0x0800);
1893 	RGE_PHY_CLRBIT(sc, 0xa654, 0x0003);
1894 	rge_write_phy_ocp(sc, 0xac3a, 0x5851);
1895 	val = rge_read_phy_ocp(sc, 0xac3c) & ~0xd000;
1896 	rge_write_phy_ocp(sc, 0xac3c, val | 0x2000);
1897 	val = rge_read_phy_ocp(sc, 0xac42) & ~0x0200;
1898 	rge_write_phy_ocp(sc, 0xac42, val | 0x01c0);
1899 	RGE_PHY_CLRBIT(sc, 0xac3e, 0xe000);
1900 	RGE_PHY_CLRBIT(sc, 0xac42, 0x0038);
1901 	val = rge_read_phy_ocp(sc, 0xac42) & ~0x0002;
1902 	rge_write_phy_ocp(sc, 0xac42, val | 0x0005);
1903 	rge_write_phy_ocp(sc, 0xac1a, 0x00db);
1904 	rge_write_phy_ocp(sc, 0xade4, 0x01b5);
1905 	RGE_PHY_CLRBIT(sc, 0xad9c, 0x0c00);
1906 	rge_write_phy_ocp(sc, 0xb87c, 0x814b);
1907 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1908 	rge_write_phy_ocp(sc, 0xb87e, val | 0x1100);
1909 	rge_write_phy_ocp(sc, 0xb87c, 0x814d);
1910 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1911 	rge_write_phy_ocp(sc, 0xb87e, val | 0x1100);
1912 	rge_write_phy_ocp(sc, 0xb87c, 0x814f);
1913 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1914 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0b00);
1915 	rge_write_phy_ocp(sc, 0xb87c, 0x8142);
1916 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1917 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
1918 	rge_write_phy_ocp(sc, 0xb87c, 0x8144);
1919 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1920 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
1921 	rge_write_phy_ocp(sc, 0xb87c, 0x8150);
1922 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1923 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
1924 	rge_write_phy_ocp(sc, 0xb87c, 0x8118);
1925 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1926 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
1927 	rge_write_phy_ocp(sc, 0xb87c, 0x811a);
1928 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1929 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
1930 	rge_write_phy_ocp(sc, 0xb87c, 0x811c);
1931 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1932 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0500);
1933 	rge_write_phy_ocp(sc, 0xb87c, 0x810f);
1934 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1935 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
1936 	rge_write_phy_ocp(sc, 0xb87c, 0x8111);
1937 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1938 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
1939 	rge_write_phy_ocp(sc, 0xb87c, 0x811d);
1940 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1941 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
1942 	RGE_PHY_SETBIT(sc, 0xac36, 0x1000);
1943 	RGE_PHY_CLRBIT(sc, 0xad1c, 0x0100);
1944 	val = rge_read_phy_ocp(sc, 0xade8) & ~0xffc0;
1945 	rge_write_phy_ocp(sc, 0xade8, val | 0x1400);
1946 	rge_write_phy_ocp(sc, 0xb87c, 0x864b);
1947 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1948 	rge_write_phy_ocp(sc, 0xb87e, val | 0x9d00);
1949 
1950 	rge_write_phy_ocp(sc, 0xa436, 0x8f97);
1951 	for (; i < nitems(mac_cfg2_a438_value); i++)
1952 		rge_write_phy_ocp(sc, 0xa438, mac_cfg2_a438_value[i]);
1953 
1954 	RGE_PHY_SETBIT(sc, 0xad9c, 0x0020);
1955 	rge_write_phy_ocp(sc, 0xb87c, 0x8122);
1956 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1957 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0c00);
1958 
1959 	rge_write_phy_ocp(sc, 0xb87c, 0x82c8);
1960 	for (i = 0; i < 20; i++)
1961 		rge_write_phy_ocp(sc, 0xb87e, mac_cfg2_b87e_value[i]);
1962 
1963 	rge_write_phy_ocp(sc, 0xb87c, 0x80ef);
1964 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1965 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0c00);
1966 
1967 	rge_write_phy_ocp(sc, 0xb87c, 0x82a0);
1968 	for (; i < nitems(mac_cfg2_b87e_value); i++)
1969 		rge_write_phy_ocp(sc, 0xb87e, mac_cfg2_b87e_value[i]);
1970 
1971 	rge_write_phy_ocp(sc, 0xa436, 0x8018);
1972 	RGE_PHY_SETBIT(sc, 0xa438, 0x2000);
1973 	rge_write_phy_ocp(sc, 0xb87c, 0x8fe4);
1974 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1975 	rge_write_phy_ocp(sc, 0xb87e, val | 0);
1976 	val = rge_read_phy_ocp(sc, 0xb54c) & ~0xffc0;
1977 	rge_write_phy_ocp(sc, 0xb54c, val | 0x3700);
1978 }
1979 
1980 void
rge_phy_config_mac_cfg3(struct rge_softc * sc)1981 rge_phy_config_mac_cfg3(struct rge_softc *sc)
1982 {
1983 	uint16_t val;
1984 	int i;
1985 	static const uint16_t mac_cfg3_a438_value[] =
1986 	    { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
1987 	      0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
1988 
1989 	static const uint16_t mac_cfg3_b88e_value[] =
1990 	    { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
1991 	      0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
1992 	      0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
1993 	      0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
1994 	      0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
1995 	      0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
1996 
1997 	rge_phy_config_mcu(sc, RGE_MAC_CFG3_MCODE_VER);
1998 
1999 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
2000 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
2001 	rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
2002 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
2003 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
2004 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
2005 	RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
2006 	val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
2007 	rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
2008 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
2009 	rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
2010 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
2011 	rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
2012 	RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
2013 	RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
2014 	val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
2015 	rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
2016 	rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
2017 	rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
2018 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
2019 	rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
2020 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
2021 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
2022 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0500);
2023 	rge_write_phy_ocp(sc, 0xb87c, 0x8159);
2024 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
2025 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
2026 	rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
2027 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
2028 	rge_write_phy_ocp(sc, 0xb87c, 0x809c);
2029 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
2030 
2031 	rge_write_phy_ocp(sc, 0xa436, 0x81b3);
2032 	for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
2033 		rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
2034 	for (i = 0; i < 26; i++)
2035 		rge_write_phy_ocp(sc, 0xa438, 0);
2036 	rge_write_phy_ocp(sc, 0xa436, 0x8257);
2037 	rge_write_phy_ocp(sc, 0xa438, 0x020f);
2038 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
2039 	rge_write_phy_ocp(sc, 0xa438, 0x7843);
2040 
2041 	rge_patch_phy_mcu(sc, 1);
2042 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
2043 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
2044 	for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
2045 		rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
2046 		rge_write_phy_ocp(sc, 0xb890, mac_cfg3_b88e_value[i + 1]);
2047 	}
2048 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
2049 	rge_patch_phy_mcu(sc, 0);
2050 
2051 	RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
2052 	rge_write_phy_ocp(sc, 0xa436, 0x81a2);
2053 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
2054 	val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
2055 	rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
2056 	RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
2057 	RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
2058 	RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
2059 	RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
2060 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
2061 	RGE_PHY_SETBIT(sc, 0xa424, 0x0008);
2062 }
2063 
2064 void
rge_phy_config_mac_cfg5(struct rge_softc * sc)2065 rge_phy_config_mac_cfg5(struct rge_softc *sc)
2066 {
2067 	uint16_t val;
2068 	int i;
2069 
2070 	rge_phy_config_mcu(sc, RGE_MAC_CFG5_MCODE_VER);
2071 
2072 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
2073 	val = rge_read_phy_ocp(sc, 0xac46) & ~0x00f0;
2074 	rge_write_phy_ocp(sc, 0xac46, val | 0x0090);
2075 	val = rge_read_phy_ocp(sc, 0xad30) & ~0x0003;
2076 	rge_write_phy_ocp(sc, 0xad30, val | 0x0001);
2077 	rge_write_phy_ocp(sc, 0xb87c, 0x80f5);
2078 	rge_write_phy_ocp(sc, 0xb87e, 0x760e);
2079 	rge_write_phy_ocp(sc, 0xb87c, 0x8107);
2080 	rge_write_phy_ocp(sc, 0xb87e, 0x360e);
2081 	rge_write_phy_ocp(sc, 0xb87c, 0x8551);
2082 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
2083 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
2084 	val = rge_read_phy_ocp(sc, 0xbf00) & ~0xe000;
2085 	rge_write_phy_ocp(sc, 0xbf00, val | 0xa000);
2086 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
2087 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0300);
2088 	for (i = 0; i < 10; i++) {
2089 		rge_write_phy_ocp(sc, 0xa436, 0x8044 + i * 6);
2090 		rge_write_phy_ocp(sc, 0xa438, 0x2417);
2091 	}
2092 	RGE_PHY_SETBIT(sc, 0xa4ca, 0x0040);
2093 	val = rge_read_phy_ocp(sc, 0xbf84) & ~0xe000;
2094 	rge_write_phy_ocp(sc, 0xbf84, val | 0xa000);
2095 	rge_write_phy_ocp(sc, 0xa436, 0x8170);
2096 	val = rge_read_phy_ocp(sc, 0xa438) & ~0x2700;
2097 	rge_write_phy_ocp(sc, 0xa438, val | 0xd800);
2098 	RGE_PHY_SETBIT(sc, 0xa424, 0x0008);
2099 }
2100 
2101 void
rge_phy_config_mcu(struct rge_softc * sc,uint16_t mcode_version)2102 rge_phy_config_mcu(struct rge_softc *sc, uint16_t mcode_version)
2103 {
2104 	if (sc->rge_mcodever != mcode_version) {
2105 		int i;
2106 
2107 		rge_patch_phy_mcu(sc, 1);
2108 
2109 		if (sc->rge_type == MAC_CFG3) {
2110 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
2111 			rge_write_phy_ocp(sc, 0xa438, 0x8601);
2112 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
2113 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
2114 
2115 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
2116 
2117 			for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
2118 				rge_write_phy_ocp(sc,
2119 				    rtl8125_mac_cfg3_mcu[i].reg,
2120 				    rtl8125_mac_cfg3_mcu[i].val);
2121 			}
2122 
2123 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
2124 
2125 			rge_write_phy_ocp(sc, 0xa436, 0);
2126 			rge_write_phy_ocp(sc, 0xa438, 0);
2127 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
2128 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
2129 			rge_write_phy_ocp(sc, 0xa438, 0);
2130 		} else if (sc->rge_type == MAC_CFG5) {
2131 			for (i = 0; i < nitems(rtl8125_mac_cfg5_mcu); i++) {
2132 				rge_write_phy_ocp(sc,
2133 				    rtl8125_mac_cfg5_mcu[i].reg,
2134 				    rtl8125_mac_cfg5_mcu[i].val);
2135 			}
2136 		} else if (sc->rge_type == MAC_CFG2_8126) {
2137 			for (i = 0; i < nitems(rtl8126_mac_cfg2_mcu); i++) {
2138 				rge_write_phy_ocp(sc,
2139 				    rtl8126_mac_cfg2_mcu[i].reg,
2140 				    rtl8126_mac_cfg2_mcu[i].val);
2141 			}
2142 		}
2143 
2144 		rge_patch_phy_mcu(sc, 0);
2145 
2146 		/* Write microcode version. */
2147 		rge_write_phy_ocp(sc, 0xa436, 0x801e);
2148 		rge_write_phy_ocp(sc, 0xa438, mcode_version);
2149 	}
2150 }
2151 
2152 void
rge_set_macaddr(struct rge_softc * sc,const uint8_t * addr)2153 rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
2154 {
2155 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2156 	RGE_WRITE_4(sc, RGE_MAC0,
2157 	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2158 	RGE_WRITE_4(sc, RGE_MAC4,
2159 	    addr[5] <<  8 | addr[4]);
2160 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2161 }
2162 
2163 void
rge_get_macaddr(struct rge_softc * sc,uint8_t * addr)2164 rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
2165 {
2166 	int i;
2167 
2168 	for (i = 0; i < ETHER_ADDR_LEN; i++)
2169 		addr[i] = RGE_READ_1(sc, RGE_MAC0 + i);
2170 
2171 	*(uint32_t *)&addr[0] = RGE_READ_4(sc, RGE_ADDR0);
2172 	*(uint16_t *)&addr[4] = RGE_READ_2(sc, RGE_ADDR1);
2173 
2174 	rge_set_macaddr(sc, addr);
2175 }
2176 
2177 void
rge_hw_init(struct rge_softc * sc)2178 rge_hw_init(struct rge_softc *sc)
2179 {
2180 	uint16_t reg;
2181 	int i, npages;
2182 
2183 	rge_disable_aspm_clkreq(sc);
2184 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
2185 
2186 	/* Disable UPS. */
2187 	RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
2188 
2189 	/* Disable MAC MCU. */
2190 	rge_disable_aspm_clkreq(sc);
2191 	rge_write_mac_ocp(sc, 0xfc48, 0);
2192 	for (reg = 0xfc28; reg < 0xfc48; reg += 2)
2193 		rge_write_mac_ocp(sc, reg, 0);
2194 	DELAY(3000);
2195 	rge_write_mac_ocp(sc, 0xfc26, 0);
2196 
2197 	if (sc->rge_type == MAC_CFG3) {
2198 		for (npages = 0; npages < 3; npages++) {
2199 			rge_switch_mcu_ram_page(sc, npages);
2200 			for (i = 0; i < nitems(rtl8125_mac_bps); i++) {
2201 				if (npages == 0)
2202 					rge_write_mac_ocp(sc,
2203 					    rtl8125_mac_bps[i].reg,
2204 					    rtl8125_mac_bps[i].val);
2205 				else if (npages == 1)
2206 					rge_write_mac_ocp(sc,
2207 					    rtl8125_mac_bps[i].reg, 0);
2208 				else {
2209 					if (rtl8125_mac_bps[i].reg < 0xf9f8)
2210 						rge_write_mac_ocp(sc,
2211 						    rtl8125_mac_bps[i].reg, 0);
2212 				}
2213 			}
2214 			if (npages == 2) {
2215 				rge_write_mac_ocp(sc, 0xf9f8, 0x6486);
2216 				rge_write_mac_ocp(sc, 0xf9fa, 0x0b15);
2217 				rge_write_mac_ocp(sc, 0xf9fc, 0x090e);
2218 				rge_write_mac_ocp(sc, 0xf9fe, 0x1139);
2219 			}
2220 		}
2221 		rge_write_mac_ocp(sc, 0xfc26, 0x8000);
2222 		rge_write_mac_ocp(sc, 0xfc2a, 0x0540);
2223 		rge_write_mac_ocp(sc, 0xfc2e, 0x0a06);
2224 		rge_write_mac_ocp(sc, 0xfc30, 0x0eb8);
2225 		rge_write_mac_ocp(sc, 0xfc32, 0x3a5c);
2226 		rge_write_mac_ocp(sc, 0xfc34, 0x10a8);
2227 		rge_write_mac_ocp(sc, 0xfc40, 0x0d54);
2228 		rge_write_mac_ocp(sc, 0xfc42, 0x0e24);
2229 		rge_write_mac_ocp(sc, 0xfc48, 0x307a);
2230 	} else if (sc->rge_type == MAC_CFG5) {
2231 		rge_switch_mcu_ram_page(sc, 0);
2232 		for (i = 0; i < nitems(rtl8125b_mac_bps); i++) {
2233 			rge_write_mac_ocp(sc, rtl8125b_mac_bps[i].reg,
2234 			    rtl8125b_mac_bps[i].val);
2235 		}
2236 	}
2237 
2238 	/* Disable PHY power saving. */
2239 	if (sc->rge_type == MAC_CFG3)
2240 		rge_disable_phy_ocp_pwrsave(sc);
2241 
2242 	/* Set PCIe uncorrectable error status. */
2243 	rge_write_csi(sc, 0x108,
2244 	    rge_read_csi(sc, 0x108) | 0x00100000);
2245 }
2246 
2247 void
rge_hw_reset(struct rge_softc * sc)2248 rge_hw_reset(struct rge_softc *sc)
2249 {
2250 	/* Disable interrupts */
2251 	RGE_WRITE_4(sc, RGE_IMR, 0);
2252 	RGE_WRITE_4(sc, RGE_ISR, RGE_READ_4(sc, RGE_ISR));
2253 
2254 	/* Clear timer interrupts. */
2255 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
2256 	RGE_WRITE_4(sc, RGE_TIMERINT1, 0);
2257 	RGE_WRITE_4(sc, RGE_TIMERINT2, 0);
2258 	RGE_WRITE_4(sc, RGE_TIMERINT3, 0);
2259 
2260 	rge_reset(sc);
2261 }
2262 
2263 void
rge_disable_phy_ocp_pwrsave(struct rge_softc * sc)2264 rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
2265 {
2266 	if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
2267 		rge_patch_phy_mcu(sc, 1);
2268 		rge_write_phy_ocp(sc, 0xc416, 0);
2269 		rge_write_phy_ocp(sc, 0xc416, 0x0500);
2270 		rge_patch_phy_mcu(sc, 0);
2271 	}
2272 }
2273 
2274 void
rge_patch_phy_mcu(struct rge_softc * sc,int set)2275 rge_patch_phy_mcu(struct rge_softc *sc, int set)
2276 {
2277 	int i;
2278 
2279 	if (set)
2280 		RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
2281 	else
2282 		RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
2283 
2284 	for (i = 0; i < 1000; i++) {
2285 		if (set) {
2286 			if ((rge_read_phy_ocp(sc, 0xb800) & 0x0040) != 0)
2287 				break;
2288 		} else {
2289 			if (!(rge_read_phy_ocp(sc, 0xb800) & 0x0040))
2290 				break;
2291 		}
2292 		DELAY(100);
2293 	}
2294 	if (i == 1000)
2295 		printf("%s: timeout waiting to patch phy mcu\n",
2296 		    sc->sc_dev.dv_xname);
2297 }
2298 
2299 void
rge_add_media_types(struct rge_softc * sc)2300 rge_add_media_types(struct rge_softc *sc)
2301 {
2302 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
2303 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
2304 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
2305 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
2306 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
2307 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2308 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
2309 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
2310 
2311 	if (sc->rge_type == MAC_CFG2_8126) {
2312 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_5000_T, 0, NULL);
2313 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_5000_T | IFM_FDX,
2314 		    0, NULL);
2315 	}
2316 }
2317 
2318 void
rge_config_imtype(struct rge_softc * sc,int imtype)2319 rge_config_imtype(struct rge_softc *sc, int imtype)
2320 {
2321 	switch (imtype) {
2322 	case RGE_IMTYPE_NONE:
2323 		sc->rge_intrs = RGE_INTRS;
2324 		break;
2325 	case RGE_IMTYPE_SIM:
2326 		sc->rge_intrs = RGE_INTRS_TIMER;
2327 		break;
2328 	default:
2329 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
2330 	}
2331 }
2332 
2333 void
rge_disable_aspm_clkreq(struct rge_softc * sc)2334 rge_disable_aspm_clkreq(struct rge_softc *sc)
2335 {
2336 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2337 	if (sc->rge_type == MAC_CFG2_8126)
2338 		RGE_CLRBIT_1(sc, RGE_INT_CFG0, 0x08);
2339 	else
2340 		RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
2341 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
2342 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2343 }
2344 
2345 void
rge_disable_hw_im(struct rge_softc * sc)2346 rge_disable_hw_im(struct rge_softc *sc)
2347 {
2348 	RGE_WRITE_2(sc, RGE_IM, 0);
2349 }
2350 
2351 void
rge_disable_sim_im(struct rge_softc * sc)2352 rge_disable_sim_im(struct rge_softc *sc)
2353 {
2354 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
2355 	sc->rge_timerintr = 0;
2356 }
2357 
2358 void
rge_setup_sim_im(struct rge_softc * sc)2359 rge_setup_sim_im(struct rge_softc *sc)
2360 {
2361 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0x2600);
2362 	RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
2363 	sc->rge_timerintr = 1;
2364 }
2365 
2366 void
rge_setup_intr(struct rge_softc * sc,int imtype)2367 rge_setup_intr(struct rge_softc *sc, int imtype)
2368 {
2369 	rge_config_imtype(sc, imtype);
2370 
2371 	/* Enable interrupts. */
2372 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
2373 
2374 	switch (imtype) {
2375 	case RGE_IMTYPE_NONE:
2376 		rge_disable_sim_im(sc);
2377 		rge_disable_hw_im(sc);
2378 		break;
2379 	case RGE_IMTYPE_SIM:
2380 		rge_disable_hw_im(sc);
2381 		rge_setup_sim_im(sc);
2382 		break;
2383 	default:
2384 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
2385 	}
2386 }
2387 
2388 void
rge_switch_mcu_ram_page(struct rge_softc * sc,int page)2389 rge_switch_mcu_ram_page(struct rge_softc *sc, int page)
2390 {
2391 	uint16_t val;
2392 
2393 	val = rge_read_mac_ocp(sc, 0xe446) & ~0x0003;
2394 	val |= page;
2395 	rge_write_mac_ocp(sc, 0xe446, val);
2396 }
2397 
2398 void
rge_exit_oob(struct rge_softc * sc)2399 rge_exit_oob(struct rge_softc *sc)
2400 {
2401 	int i;
2402 
2403 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
2404 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
2405 	    RGE_RXCFG_ERRPKT);
2406 
2407 	/* Disable RealWoW. */
2408 	rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
2409 
2410 	rge_reset(sc);
2411 
2412 	/* Disable OOB. */
2413 	RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
2414 
2415 	RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
2416 
2417 	for (i = 0; i < 10; i++) {
2418 		DELAY(100);
2419 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2420 			break;
2421 	}
2422 
2423 	rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
2424 	rge_write_mac_ocp(sc, 0xc0a6, 0x01b5);
2425 	rge_write_mac_ocp(sc, 0xc01e, 0x5555);
2426 
2427 	for (i = 0; i < 10; i++) {
2428 		DELAY(100);
2429 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2430 			break;
2431 	}
2432 
2433 	if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
2434 		for (i = 0; i < RGE_TIMEOUT; i++) {
2435 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 2)
2436 				break;
2437 			DELAY(1000);
2438 		}
2439 		RGE_MAC_CLRBIT(sc, 0xd42c, 0x0100);
2440 		if (sc->rge_type != MAC_CFG3)
2441 			RGE_PHY_CLRBIT(sc, 0xa466, 0x0001);
2442 		RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
2443 	}
2444 }
2445 
2446 void
rge_write_csi(struct rge_softc * sc,uint32_t reg,uint32_t val)2447 rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
2448 {
2449 	int i;
2450 
2451 	RGE_WRITE_4(sc, RGE_CSIDR, val);
2452 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2453 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
2454 
2455 	for (i = 0; i < 20000; i++) {
2456 		 DELAY(1);
2457 		 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
2458 			break;
2459 	}
2460 
2461 	DELAY(20);
2462 }
2463 
2464 uint32_t
rge_read_csi(struct rge_softc * sc,uint32_t reg)2465 rge_read_csi(struct rge_softc *sc, uint32_t reg)
2466 {
2467 	int i;
2468 
2469 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2470 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
2471 
2472 	for (i = 0; i < 20000; i++) {
2473 		 DELAY(1);
2474 		 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
2475 			break;
2476 	}
2477 
2478 	DELAY(20);
2479 
2480 	return (RGE_READ_4(sc, RGE_CSIDR));
2481 }
2482 
2483 void
rge_write_mac_ocp(struct rge_softc * sc,uint16_t reg,uint16_t val)2484 rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2485 {
2486 	uint32_t tmp;
2487 
2488 	tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2489 	tmp += val;
2490 	tmp |= RGE_MACOCP_BUSY;
2491 	RGE_WRITE_4(sc, RGE_MACOCP, tmp);
2492 }
2493 
2494 uint16_t
rge_read_mac_ocp(struct rge_softc * sc,uint16_t reg)2495 rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
2496 {
2497 	uint32_t val;
2498 
2499 	val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2500 	RGE_WRITE_4(sc, RGE_MACOCP, val);
2501 
2502 	return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
2503 }
2504 
2505 void
rge_write_ephy(struct rge_softc * sc,uint16_t reg,uint16_t val)2506 rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
2507 {
2508 	uint32_t tmp;
2509 	int i;
2510 
2511 	tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2512 	tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
2513 	RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
2514 
2515 	for (i = 0; i < 10; i++) {
2516 		DELAY(100);
2517 		if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
2518 			break;
2519 	}
2520 
2521 	DELAY(20);
2522 }
2523 
2524 uint16_t
rge_read_ephy(struct rge_softc * sc,uint16_t reg)2525 rge_read_ephy(struct rge_softc *sc, uint16_t reg)
2526 {
2527 	uint32_t val;
2528 	int i;
2529 
2530 	val = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2531 	RGE_WRITE_4(sc, RGE_EPHYAR, val);
2532 
2533 	for (i = 0; i < 10; i++) {
2534 		DELAY(100);
2535 		val = RGE_READ_4(sc, RGE_EPHYAR);
2536 		if (val & RGE_EPHYAR_BUSY)
2537 			break;
2538 	}
2539 
2540 	DELAY(20);
2541 
2542 	return (val & RGE_EPHYAR_DATA_MASK);
2543 }
2544 
2545 void
rge_write_phy(struct rge_softc * sc,uint16_t addr,uint16_t reg,uint16_t val)2546 rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
2547 {
2548 	uint16_t off, phyaddr;
2549 
2550 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2551 	phyaddr <<= 4;
2552 
2553 	off = addr ? reg : 0x10 + (reg % 8);
2554 
2555 	phyaddr += (off - 16) << 1;
2556 
2557 	rge_write_phy_ocp(sc, phyaddr, val);
2558 }
2559 
2560 uint16_t
rge_read_phy(struct rge_softc * sc,uint16_t addr,uint16_t reg)2561 rge_read_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg)
2562 {
2563 	uint16_t off, phyaddr;
2564 
2565 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2566 	phyaddr <<= 4;
2567 
2568 	off = addr ? reg : 0x10 + (reg % 8);
2569 
2570 	phyaddr += (off - 16) << 1;
2571 
2572 	return (rge_read_phy_ocp(sc, phyaddr));
2573 }
2574 
2575 void
rge_write_phy_ocp(struct rge_softc * sc,uint16_t reg,uint16_t val)2576 rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2577 {
2578 	uint32_t tmp;
2579 	int i;
2580 
2581 	tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2582 	tmp |= RGE_PHYOCP_BUSY | val;
2583 	RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
2584 
2585 	for (i = 0; i < RGE_TIMEOUT; i++) {
2586 		DELAY(1);
2587 		if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
2588 			break;
2589 	}
2590 }
2591 
2592 uint16_t
rge_read_phy_ocp(struct rge_softc * sc,uint16_t reg)2593 rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
2594 {
2595 	uint32_t val;
2596 	int i;
2597 
2598 	val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2599 	RGE_WRITE_4(sc, RGE_PHYOCP, val);
2600 
2601 	for (i = 0; i < RGE_TIMEOUT; i++) {
2602 		DELAY(1);
2603 		val = RGE_READ_4(sc, RGE_PHYOCP);
2604 		if (val & RGE_PHYOCP_BUSY)
2605 			break;
2606 	}
2607 
2608 	return (val & RGE_PHYOCP_DATA_MASK);
2609 }
2610 
2611 int
rge_get_link_status(struct rge_softc * sc)2612 rge_get_link_status(struct rge_softc *sc)
2613 {
2614 	return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
2615 }
2616 
2617 void
rge_txstart(void * arg)2618 rge_txstart(void *arg)
2619 {
2620 	struct rge_softc *sc = arg;
2621 
2622 	RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
2623 }
2624 
2625 void
rge_tick(void * arg)2626 rge_tick(void *arg)
2627 {
2628 	struct rge_softc *sc = arg;
2629 	int s;
2630 
2631 	s = splnet();
2632 	rge_link_state(sc);
2633 	splx(s);
2634 
2635 	timeout_add_sec(&sc->sc_timeout, 1);
2636 }
2637 
2638 void
rge_link_state(struct rge_softc * sc)2639 rge_link_state(struct rge_softc *sc)
2640 {
2641 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2642 	int link = LINK_STATE_DOWN;
2643 
2644 	if (rge_get_link_status(sc))
2645 		link = LINK_STATE_UP;
2646 
2647 	if (ifp->if_link_state != link) {
2648 		ifp->if_link_state = link;
2649 		if_link_state_change(ifp);
2650 	}
2651 }
2652 
2653 #ifndef SMALL_KERNEL
2654 int
rge_wol(struct ifnet * ifp,int enable)2655 rge_wol(struct ifnet *ifp, int enable)
2656 {
2657 	struct rge_softc *sc = ifp->if_softc;
2658 
2659 	if (enable) {
2660 		if (!(RGE_READ_1(sc, RGE_CFG1) & RGE_CFG1_PM_EN)) {
2661 			printf("%s: power management is disabled, "
2662 			    "cannot do WOL\n", sc->sc_dev.dv_xname);
2663 			return (ENOTSUP);
2664 		}
2665 
2666 	}
2667 
2668 	rge_iff(sc);
2669 
2670 	if (enable)
2671 		RGE_MAC_SETBIT(sc, 0xc0b6, 0x0001);
2672 	else
2673 		RGE_MAC_CLRBIT(sc, 0xc0b6, 0x0001);
2674 
2675 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2676 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_WOL_LANWAKE | RGE_CFG5_WOL_UCAST |
2677 	    RGE_CFG5_WOL_MCAST | RGE_CFG5_WOL_BCAST);
2678 	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_WOL_LINK | RGE_CFG3_WOL_MAGIC);
2679 	if (enable)
2680 		RGE_SETBIT_1(sc, RGE_CFG5, RGE_CFG5_WOL_LANWAKE);
2681 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2682 
2683 	return (0);
2684 }
2685 
2686 void
rge_wol_power(struct rge_softc * sc)2687 rge_wol_power(struct rge_softc *sc)
2688 {
2689 	/* Disable RXDV gate. */
2690 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
2691 	DELAY(2000);
2692 
2693 	RGE_SETBIT_1(sc, RGE_CFG1, RGE_CFG1_PM_EN);
2694 	RGE_SETBIT_1(sc, RGE_CFG2, RGE_CFG2_PMSTS_EN);
2695 }
2696 #endif
2697 
2698 #if NKSTAT > 0
2699 
2700 #define RGE_DTCCR_CMD		(1U << 3)
2701 #define RGE_DTCCR_LO		0x10
2702 #define RGE_DTCCR_HI		0x14
2703 
2704 struct rge_kstats {
2705 	struct kstat_kv		tx_ok;
2706 	struct kstat_kv		rx_ok;
2707 	struct kstat_kv		tx_er;
2708 	struct kstat_kv		rx_er;
2709 	struct kstat_kv		miss_pkt;
2710 	struct kstat_kv		fae;
2711 	struct kstat_kv		tx_1col;
2712 	struct kstat_kv		tx_mcol;
2713 	struct kstat_kv		rx_ok_phy;
2714 	struct kstat_kv		rx_ok_brd;
2715 	struct kstat_kv		rx_ok_mul;
2716 	struct kstat_kv		tx_abt;
2717 	struct kstat_kv		tx_undrn;
2718 };
2719 
2720 static const struct rge_kstats rge_kstats_tpl = {
2721 	.tx_ok =	KSTAT_KV_UNIT_INITIALIZER("TxOk",
2722 			    KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
2723 	.rx_ok =	KSTAT_KV_UNIT_INITIALIZER("RxOk",
2724 			    KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
2725 	.tx_er =	KSTAT_KV_UNIT_INITIALIZER("TxEr",
2726 			    KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
2727 	.rx_er =	KSTAT_KV_UNIT_INITIALIZER("RxEr",
2728 			    KSTAT_KV_T_COUNTER32, KSTAT_KV_U_PACKETS),
2729 	.miss_pkt =	KSTAT_KV_UNIT_INITIALIZER("MissPkt",
2730 			    KSTAT_KV_T_COUNTER16, KSTAT_KV_U_PACKETS),
2731 	.fae =		KSTAT_KV_UNIT_INITIALIZER("FAE",
2732 			    KSTAT_KV_T_COUNTER16, KSTAT_KV_U_PACKETS),
2733 	.tx_1col =	KSTAT_KV_UNIT_INITIALIZER("Tx1Col",
2734 			    KSTAT_KV_T_COUNTER32, KSTAT_KV_U_PACKETS),
2735 	.tx_mcol =	KSTAT_KV_UNIT_INITIALIZER("TxMCol",
2736 			    KSTAT_KV_T_COUNTER32, KSTAT_KV_U_PACKETS),
2737 	.rx_ok_phy =	KSTAT_KV_UNIT_INITIALIZER("RxOkPhy",
2738 			    KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
2739 	.rx_ok_brd =	KSTAT_KV_UNIT_INITIALIZER("RxOkBrd",
2740 			    KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
2741 	.rx_ok_mul =	KSTAT_KV_UNIT_INITIALIZER("RxOkMul",
2742 			    KSTAT_KV_T_COUNTER32, KSTAT_KV_U_PACKETS),
2743 	.tx_abt =	KSTAT_KV_UNIT_INITIALIZER("TxAbt",
2744 			    KSTAT_KV_T_COUNTER16, KSTAT_KV_U_PACKETS),
2745 	.tx_undrn =	KSTAT_KV_UNIT_INITIALIZER("TxUndrn",
2746 			    KSTAT_KV_T_COUNTER16, KSTAT_KV_U_PACKETS),
2747 };
2748 
2749 struct rge_kstat_softc {
2750 	struct rge_stats	*rge_ks_sc_stats;
2751 
2752 	bus_dmamap_t		 rge_ks_sc_map;
2753 	bus_dma_segment_t	 rge_ks_sc_seg;
2754 	int			 rge_ks_sc_nsegs;
2755 
2756 	struct rwlock		 rge_ks_sc_rwl;
2757 };
2758 
2759 static int
rge_kstat_read(struct kstat * ks)2760 rge_kstat_read(struct kstat *ks)
2761 {
2762 	struct rge_softc *sc = ks->ks_softc;
2763 	struct rge_kstat_softc *rge_ks_sc = ks->ks_ptr;
2764 	bus_dmamap_t map;
2765 	uint64_t cmd;
2766 	uint32_t reg;
2767 	uint8_t command;
2768 	int tmo;
2769 
2770 	command = RGE_READ_1(sc, RGE_CMD);
2771 	if (!ISSET(command, RGE_CMD_RXENB) || command == 0xff)
2772 		return (ENETDOWN);
2773 
2774 	map = rge_ks_sc->rge_ks_sc_map;
2775 	cmd = map->dm_segs[0].ds_addr | RGE_DTCCR_CMD;
2776 
2777 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2778 	    BUS_DMASYNC_PREREAD);
2779 
2780 	RGE_WRITE_4(sc, RGE_DTCCR_HI, cmd >> 32);
2781 	bus_space_barrier(sc->rge_btag, sc->rge_bhandle, RGE_DTCCR_HI, 8,
2782 	    BUS_SPACE_BARRIER_WRITE);
2783 	RGE_WRITE_4(sc, RGE_DTCCR_LO, cmd);
2784 	bus_space_barrier(sc->rge_btag, sc->rge_bhandle, RGE_DTCCR_LO, 4,
2785 	    BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE);
2786 
2787 	tmo = 1000;
2788 	do {
2789 		reg = RGE_READ_4(sc, RGE_DTCCR_LO);
2790 		if (!ISSET(reg, RGE_DTCCR_CMD))
2791 			break;
2792 
2793 		delay(10);
2794 		bus_space_barrier(sc->rge_btag, sc->rge_bhandle,
2795 		    RGE_DTCCR_LO, 4, BUS_SPACE_BARRIER_READ);
2796 	} while (--tmo);
2797 
2798 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2799 	    BUS_DMASYNC_POSTREAD);
2800 
2801 	if (ISSET(reg, RGE_DTCCR_CMD))
2802 		return (EIO);
2803 
2804 	nanouptime(&ks->ks_updated);
2805 
2806 	return (0);
2807 }
2808 
2809 static int
rge_kstat_copy(struct kstat * ks,void * dst)2810 rge_kstat_copy(struct kstat *ks, void *dst)
2811 {
2812 	struct rge_kstat_softc *rge_ks_sc = ks->ks_ptr;
2813 	struct rge_stats *rs = rge_ks_sc->rge_ks_sc_stats;
2814 	struct rge_kstats *kvs = dst;
2815 
2816 	*kvs = rge_kstats_tpl;
2817 	kstat_kv_u64(&kvs->tx_ok) = lemtoh64(&rs->rge_tx_ok);
2818 	kstat_kv_u64(&kvs->rx_ok) = lemtoh64(&rs->rge_rx_ok);
2819 	kstat_kv_u64(&kvs->tx_er) = lemtoh64(&rs->rge_tx_er);
2820 	kstat_kv_u32(&kvs->rx_er) = lemtoh32(&rs->rge_rx_er);
2821 	kstat_kv_u16(&kvs->miss_pkt) = lemtoh16(&rs->rge_miss_pkt);
2822 	kstat_kv_u16(&kvs->fae) = lemtoh16(&rs->rge_fae);
2823 	kstat_kv_u32(&kvs->tx_1col) = lemtoh32(&rs->rge_tx_1col);
2824 	kstat_kv_u32(&kvs->tx_mcol) = lemtoh32(&rs->rge_tx_mcol);
2825 	kstat_kv_u64(&kvs->rx_ok_phy) = lemtoh64(&rs->rge_rx_ok_phy);
2826 	kstat_kv_u64(&kvs->rx_ok_brd) = lemtoh64(&rs->rge_rx_ok_brd);
2827 	kstat_kv_u32(&kvs->rx_ok_mul) = lemtoh32(&rs->rge_rx_ok_mul);
2828 	kstat_kv_u16(&kvs->tx_abt) = lemtoh16(&rs->rge_tx_abt);
2829 	kstat_kv_u16(&kvs->tx_undrn) = lemtoh16(&rs->rge_tx_undrn);
2830 
2831 	return (0);
2832 }
2833 
2834 void
rge_kstat_attach(struct rge_softc * sc)2835 rge_kstat_attach(struct rge_softc *sc)
2836 {
2837 	struct rge_kstat_softc *rge_ks_sc;
2838 	struct kstat *ks;
2839 
2840 	rge_ks_sc = malloc(sizeof(*rge_ks_sc), M_DEVBUF, M_NOWAIT);
2841 	if (rge_ks_sc == NULL) {
2842 		printf("%s: cannot allocate kstat softc\n",
2843 		    sc->sc_dev.dv_xname);
2844 		return;
2845 	}
2846 
2847 	if (bus_dmamap_create(sc->sc_dmat,
2848 	    sizeof(struct rge_stats), 1, sizeof(struct rge_stats), 0,
2849 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2850 	    &rge_ks_sc->rge_ks_sc_map) != 0) {
2851 		printf("%s: cannot create counter dma memory map\n",
2852 		    sc->sc_dev.dv_xname);
2853 		goto free;
2854 	}
2855 
2856 	if (bus_dmamem_alloc(sc->sc_dmat,
2857 	    sizeof(struct rge_stats), RGE_STATS_ALIGNMENT, 0,
2858 	    &rge_ks_sc->rge_ks_sc_seg, 1, &rge_ks_sc->rge_ks_sc_nsegs,
2859 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) {
2860 		printf("%s: cannot allocate counter dma memory\n",
2861 		    sc->sc_dev.dv_xname);
2862 		goto destroy;
2863 	}
2864 
2865 	if (bus_dmamem_map(sc->sc_dmat,
2866 	    &rge_ks_sc->rge_ks_sc_seg, rge_ks_sc->rge_ks_sc_nsegs,
2867 	    sizeof(struct rge_stats), (caddr_t *)&rge_ks_sc->rge_ks_sc_stats,
2868 	    BUS_DMA_NOWAIT) != 0) {
2869 		printf("%s: cannot map counter dma memory\n",
2870 		    sc->sc_dev.dv_xname);
2871 		goto freedma;
2872 	}
2873 
2874 	if (bus_dmamap_load(sc->sc_dmat, rge_ks_sc->rge_ks_sc_map,
2875 	    (caddr_t)rge_ks_sc->rge_ks_sc_stats, sizeof(struct rge_stats),
2876 	    NULL, BUS_DMA_NOWAIT) != 0) {
2877 		printf("%s: cannot load counter dma memory\n",
2878 		    sc->sc_dev.dv_xname);
2879 		goto unmap;
2880 	}
2881 
2882 	ks = kstat_create(sc->sc_dev.dv_xname, 0, "re-stats", 0,
2883 	    KSTAT_T_KV, 0);
2884 	if (ks == NULL) {
2885 		printf("%s: cannot create re-stats kstat\n",
2886 		    sc->sc_dev.dv_xname);
2887 		goto unload;
2888 	}
2889 
2890 	ks->ks_datalen = sizeof(rge_kstats_tpl);
2891 
2892 	rw_init(&rge_ks_sc->rge_ks_sc_rwl, "rgestats");
2893 	kstat_set_wlock(ks, &rge_ks_sc->rge_ks_sc_rwl);
2894 	ks->ks_softc = sc;
2895 	ks->ks_ptr = rge_ks_sc;
2896 	ks->ks_read = rge_kstat_read;
2897 	ks->ks_copy = rge_kstat_copy;
2898 
2899 	kstat_install(ks);
2900 
2901 	sc->sc_kstat = ks;
2902 
2903 	return;
2904 
2905 unload:
2906 	bus_dmamap_unload(sc->sc_dmat, rge_ks_sc->rge_ks_sc_map);
2907 unmap:
2908 	bus_dmamem_unmap(sc->sc_dmat,
2909 	    (caddr_t)rge_ks_sc->rge_ks_sc_stats, sizeof(struct rge_stats));
2910 freedma:
2911 	bus_dmamem_free(sc->sc_dmat, &rge_ks_sc->rge_ks_sc_seg, 1);
2912 destroy:
2913 	bus_dmamap_destroy(sc->sc_dmat, rge_ks_sc->rge_ks_sc_map);
2914 free:
2915 	free(rge_ks_sc, M_DEVBUF, sizeof(*rge_ks_sc));
2916 }
2917 #endif /* NKSTAT > 0 */
2918