xref: /openbsd/sys/dev/pci/if_rge.c (revision 0f9891f1)
1 /*	$OpenBSD: if_rge.c,v 1.26 2024/05/24 06:02:56 jsg Exp $	*/
2 
3 /*
4  * Copyright (c) 2019, 2020, 2023 Kevin Lo <kevlo@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bpfilter.h"
20 #include "vlan.h"
21 #include "kstat.h"
22 
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/sockio.h>
26 #include <sys/mbuf.h>
27 #include <sys/malloc.h>
28 #include <sys/device.h>
29 #include <sys/endian.h>
30 
31 #include <net/if.h>
32 #include <net/if_media.h>
33 
34 #include <netinet/in.h>
35 #include <netinet/if_ether.h>
36 
37 #if NBPFILTER > 0
38 #include <net/bpf.h>
39 #endif
40 
41 #if NKSTAT > 0
42 #include <sys/kstat.h>
43 #endif
44 
45 #include <machine/bus.h>
46 #include <machine/intr.h>
47 
48 #include <dev/mii/mii.h>
49 
50 #include <dev/pci/pcivar.h>
51 #include <dev/pci/pcireg.h>
52 #include <dev/pci/pcidevs.h>
53 
54 #include <dev/pci/if_rgereg.h>
55 
56 #ifdef RGE_DEBUG
57 #define DPRINTF(x)	do { if (rge_debug > 0) printf x; } while (0)
58 int rge_debug = 0;
59 #else
60 #define DPRINTF(x)
61 #endif
62 
63 int		rge_match(struct device *, void *, void *);
64 void		rge_attach(struct device *, struct device *, void *);
65 int		rge_activate(struct device *, int);
66 int		rge_intr(void *);
67 int		rge_encap(struct rge_queues *, struct mbuf *, int);
68 int		rge_ioctl(struct ifnet *, u_long, caddr_t);
69 void		rge_start(struct ifqueue *);
70 void		rge_watchdog(struct ifnet *);
71 void		rge_init(struct ifnet *);
72 void		rge_stop(struct ifnet *);
73 int		rge_ifmedia_upd(struct ifnet *);
74 void		rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
75 int		rge_allocmem(struct rge_softc *);
76 int		rge_newbuf(struct rge_queues *);
77 void		rge_discard_rxbuf(struct rge_queues *, int);
78 void		rge_rx_list_init(struct rge_queues *);
79 void		rge_tx_list_init(struct rge_queues *);
80 void		rge_fill_rx_ring(struct rge_queues *);
81 int		rge_rxeof(struct rge_queues *);
82 int		rge_txeof(struct rge_queues *);
83 void		rge_reset(struct rge_softc *);
84 void		rge_iff(struct rge_softc *);
85 void		rge_chipinit(struct rge_softc *);
86 void		rge_set_phy_power(struct rge_softc *, int);
87 void		rge_ephy_config(struct rge_softc *);
88 void		rge_ephy_config_mac_cfg3(struct rge_softc *);
89 void		rge_ephy_config_mac_cfg5(struct rge_softc *);
90 int		rge_phy_config(struct rge_softc *);
91 void		rge_phy_config_mac_cfg3(struct rge_softc *);
92 void		rge_phy_config_mac_cfg5(struct rge_softc *);
93 void		rge_phy_config_mcu(struct rge_softc *, uint16_t);
94 void		rge_set_macaddr(struct rge_softc *, const uint8_t *);
95 void		rge_get_macaddr(struct rge_softc *, uint8_t *);
96 void		rge_hw_init(struct rge_softc *);
97 void		rge_hw_reset(struct rge_softc *);
98 void		rge_disable_phy_ocp_pwrsave(struct rge_softc *);
99 void		rge_patch_phy_mcu(struct rge_softc *, int);
100 void		rge_add_media_types(struct rge_softc *);
101 void		rge_config_imtype(struct rge_softc *, int);
102 void		rge_disable_aspm_clkreq(struct rge_softc *);
103 void		rge_disable_hw_im(struct rge_softc *);
104 void		rge_disable_sim_im(struct rge_softc *);
105 void		rge_setup_sim_im(struct rge_softc *);
106 void		rge_setup_intr(struct rge_softc *, int);
107 void		rge_switch_mcu_ram_page(struct rge_softc *, int);
108 void		rge_exit_oob(struct rge_softc *);
109 void		rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
110 uint32_t	rge_read_csi(struct rge_softc *, uint32_t);
111 void		rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
112 uint16_t	rge_read_mac_ocp(struct rge_softc *, uint16_t);
113 void		rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
114 uint16_t	rge_read_ephy(struct rge_softc *, uint16_t);
115 void		rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
116 uint16_t	rge_read_phy(struct rge_softc *, uint16_t, uint16_t);
117 void		rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
118 uint16_t	rge_read_phy_ocp(struct rge_softc *, uint16_t);
119 int		rge_get_link_status(struct rge_softc *);
120 void		rge_txstart(void *);
121 void		rge_tick(void *);
122 void		rge_link_state(struct rge_softc *);
123 #ifndef SMALL_KERNEL
124 int		rge_wol(struct ifnet *, int);
125 void		rge_wol_power(struct rge_softc *);
126 #endif
127 
128 #if NKSTAT > 0
129 void		rge_kstat_attach(struct rge_softc *);
130 #endif
131 
132 static const struct {
133 	uint16_t reg;
134 	uint16_t val;
135 }  rtl8125_mac_cfg3_mcu[] = {
136 	RTL8125_MAC_CFG3_MCU
137 }, rtl8125_mac_cfg5_mcu[] = {
138 	RTL8125_MAC_CFG5_MCU
139 };
140 
141 const struct cfattach rge_ca = {
142 	sizeof(struct rge_softc), rge_match, rge_attach, NULL, rge_activate
143 };
144 
145 struct cfdriver rge_cd = {
146 	NULL, "rge", DV_IFNET
147 };
148 
149 const struct pci_matchid rge_devices[] = {
150 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000 },
151 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8125 }
152 };
153 
154 int
rge_match(struct device * parent,void * match,void * aux)155 rge_match(struct device *parent, void *match, void *aux)
156 {
157 	return (pci_matchbyid((struct pci_attach_args *)aux, rge_devices,
158 	    nitems(rge_devices)));
159 }
160 
161 void
rge_attach(struct device * parent,struct device * self,void * aux)162 rge_attach(struct device *parent, struct device *self, void *aux)
163 {
164 	struct rge_softc *sc = (struct rge_softc *)self;
165 	struct pci_attach_args *pa = aux;
166 	pci_chipset_tag_t pc = pa->pa_pc;
167 	pci_intr_handle_t ih;
168 	const char *intrstr = NULL;
169 	struct ifnet *ifp;
170 	struct rge_queues *q;
171 	pcireg_t reg;
172 	uint32_t hwrev;
173 	uint8_t eaddr[ETHER_ADDR_LEN];
174 	int offset;
175 
176 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
177 
178 	/*
179 	 * Map control/status registers.
180 	 */
181 	if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
182 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
183 	    NULL, &sc->rge_bsize, 0)) {
184 		if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
185 		    PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
186 		    &sc->rge_bhandle, NULL, &sc->rge_bsize, 0)) {
187 			if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
188 			    0, &sc->rge_btag, &sc->rge_bhandle, NULL,
189 			    &sc->rge_bsize, 0)) {
190 				printf(": can't map mem or i/o space\n");
191 				return;
192 			}
193 		}
194 	}
195 
196 	q = malloc(sizeof(struct rge_queues), M_DEVBUF, M_NOWAIT | M_ZERO);
197 	if (q == NULL) {
198 		printf(": unable to allocate queue memory\n");
199 		return;
200 	}
201 	q->q_sc = sc;
202 	q->q_index = 0;
203 
204 	sc->sc_queues = q;
205 	sc->sc_nqueues = 1;
206 
207 	/*
208 	 * Allocate interrupt.
209 	 */
210 	if (pci_intr_map_msix(pa, 0, &ih) == 0 ||
211 	    pci_intr_map_msi(pa, &ih) == 0)
212 		sc->rge_flags |= RGE_FLAG_MSI;
213 	else if (pci_intr_map(pa, &ih) != 0) {
214 		printf(": couldn't map interrupt\n");
215 		return;
216 	}
217 	intrstr = pci_intr_string(pc, ih);
218 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET | IPL_MPSAFE, rge_intr,
219 	    sc, sc->sc_dev.dv_xname);
220 	if (sc->sc_ih == NULL) {
221 		printf(": couldn't establish interrupt");
222 		if (intrstr != NULL)
223 			printf(" at %s", intrstr);
224 		printf("\n");
225 		return;
226 	}
227 	printf(": %s", intrstr);
228 
229 	sc->sc_dmat = pa->pa_dmat;
230 	sc->sc_pc = pa->pa_pc;
231 	sc->sc_tag = pa->pa_tag;
232 
233 	/* Determine hardware revision */
234 	hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
235 	switch (hwrev) {
236 	case 0x60900000:
237 		sc->rge_type = MAC_CFG3;
238 		break;
239 	case 0x64100000:
240 		sc->rge_type = MAC_CFG5;
241 		break;
242 	default:
243 		printf(": unknown version 0x%08x\n", hwrev);
244 		return;
245 	}
246 
247 	rge_config_imtype(sc, RGE_IMTYPE_SIM);
248 
249 	/*
250 	 * PCI Express check.
251 	 */
252 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
253 	    &offset, NULL)) {
254 		/* Disable PCIe ASPM and ECPM. */
255 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
256 		    offset + PCI_PCIE_LCSR);
257 		reg &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1 |
258 		    PCI_PCIE_LCSR_ECPM);
259 		pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCI_PCIE_LCSR,
260 		    reg);
261 	}
262 
263 	rge_chipinit(sc);
264 
265 	rge_get_macaddr(sc, eaddr);
266 	printf(", address %s\n", ether_sprintf(eaddr));
267 
268 	memcpy(sc->sc_arpcom.ac_enaddr, eaddr, ETHER_ADDR_LEN);
269 
270 	if (rge_allocmem(sc))
271 		return;
272 
273 	ifp = &sc->sc_arpcom.ac_if;
274 	ifp->if_softc = sc;
275 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
276 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
277 	ifp->if_xflags = IFXF_MPSAFE;
278 	ifp->if_ioctl = rge_ioctl;
279 	ifp->if_qstart = rge_start;
280 	ifp->if_watchdog = rge_watchdog;
281 	ifq_init_maxlen(&ifp->if_snd, RGE_TX_LIST_CNT - 1);
282 	ifp->if_hardmtu = RGE_JUMBO_MTU;
283 
284 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
285 	    IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
286 
287 #if NVLAN > 0
288 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
289 #endif
290 
291 #ifndef SMALL_KERNEL
292 	ifp->if_capabilities |= IFCAP_WOL;
293 	ifp->if_wol = rge_wol;
294 	rge_wol(ifp, 0);
295 #endif
296 	timeout_set(&sc->sc_timeout, rge_tick, sc);
297 	task_set(&sc->sc_task, rge_txstart, sc);
298 
299 	/* Initialize ifmedia structures. */
300 	ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
301 	    rge_ifmedia_sts);
302 	rge_add_media_types(sc);
303 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
304 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
305 	sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
306 
307 	if_attach(ifp);
308 	ether_ifattach(ifp);
309 
310 #if NKSTAT > 0
311 	rge_kstat_attach(sc);
312 #endif
313 }
314 
315 int
rge_activate(struct device * self,int act)316 rge_activate(struct device *self, int act)
317 {
318 #ifndef SMALL_KERNEL
319 	struct rge_softc *sc = (struct rge_softc *)self;
320 #endif
321 	int rv = 0;
322 
323 	switch (act) {
324 	case DVACT_POWERDOWN:
325 		rv = config_activate_children(self, act);
326 #ifndef SMALL_KERNEL
327 		rge_wol_power(sc);
328 #endif
329 		break;
330 	default:
331 		rv = config_activate_children(self, act);
332 		break;
333 	}
334 	return (rv);
335 }
336 
337 int
rge_intr(void * arg)338 rge_intr(void *arg)
339 {
340 	struct rge_softc *sc = arg;
341 	struct rge_queues *q = sc->sc_queues;
342 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
343 	uint32_t status;
344 	int claimed = 0, rv;
345 
346 	if (!(ifp->if_flags & IFF_RUNNING))
347 		return (0);
348 
349 	/* Disable interrupts. */
350 	RGE_WRITE_4(sc, RGE_IMR, 0);
351 
352 	if (!(sc->rge_flags & RGE_FLAG_MSI)) {
353 		if ((RGE_READ_4(sc, RGE_ISR) & sc->rge_intrs) == 0)
354 			return (0);
355 	}
356 
357 	status = RGE_READ_4(sc, RGE_ISR);
358 	if (status)
359 		RGE_WRITE_4(sc, RGE_ISR, status);
360 
361 	if (status & RGE_ISR_PCS_TIMEOUT)
362 		claimed = 1;
363 
364 	rv = 0;
365 	if (status & sc->rge_intrs) {
366 		rv |= rge_rxeof(q);
367 		rv |= rge_txeof(q);
368 
369 		if (status & RGE_ISR_SYSTEM_ERR) {
370 			KERNEL_LOCK();
371 			rge_init(ifp);
372 			KERNEL_UNLOCK();
373 		}
374 		claimed = 1;
375 	}
376 
377 	if (sc->rge_timerintr) {
378 		if (!rv) {
379 			/*
380 			 * Nothing needs to be processed, fallback
381 			 * to use TX/RX interrupts.
382 			 */
383 			rge_setup_intr(sc, RGE_IMTYPE_NONE);
384 
385 			/*
386 			 * Recollect, mainly to avoid the possible
387 			 * race introduced by changing interrupt
388 			 * masks.
389 			 */
390 			rge_rxeof(q);
391 			rge_txeof(q);
392 		} else
393 			RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
394 	} else if (rv) {
395 		/*
396 		 * Assume that using simulated interrupt moderation
397 		 * (hardware timer based) could reduce the interrupt
398 		 * rate.
399 		 */
400 		rge_setup_intr(sc, RGE_IMTYPE_SIM);
401 	}
402 
403 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
404 
405 	return (claimed);
406 }
407 
408 int
rge_encap(struct rge_queues * q,struct mbuf * m,int idx)409 rge_encap(struct rge_queues *q, struct mbuf *m, int idx)
410 {
411 	struct rge_softc *sc = q->q_sc;
412 	struct rge_tx_desc *d = NULL;
413 	struct rge_txq *txq;
414 	bus_dmamap_t txmap;
415 	uint32_t cmdsts, cflags = 0;
416 	int cur, error, i, last, nsegs;
417 
418 	/*
419 	 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
420 	 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
421 	 * take affect.
422 	 */
423 	if ((m->m_pkthdr.csum_flags &
424 	    (M_IPV4_CSUM_OUT | M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) != 0) {
425 		cflags |= RGE_TDEXTSTS_IPCSUM;
426 		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
427 			cflags |= RGE_TDEXTSTS_TCPCSUM;
428 		if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
429 			cflags |= RGE_TDEXTSTS_UDPCSUM;
430 	}
431 
432 	txq = &q->q_tx.rge_txq[idx];
433 	txmap = txq->txq_dmamap;
434 
435 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
436 	switch (error) {
437 	case 0:
438 		break;
439 	case EFBIG: /* mbuf chain is too fragmented */
440 		if (m_defrag(m, M_DONTWAIT) == 0 &&
441 		    bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
442 		    BUS_DMA_NOWAIT) == 0)
443 			break;
444 
445 		/* FALLTHROUGH */
446 	default:
447 		return (0);
448 	}
449 
450 	bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
451 	    BUS_DMASYNC_PREWRITE);
452 
453 	nsegs = txmap->dm_nsegs;
454 
455 	/* Set up hardware VLAN tagging. */
456 #if NVLAN > 0
457 	if (m->m_flags & M_VLANTAG)
458 		cflags |= swap16(m->m_pkthdr.ether_vtag) | RGE_TDEXTSTS_VTAG;
459 #endif
460 
461 	cur = idx;
462 	cmdsts = RGE_TDCMDSTS_SOF;
463 
464 	for (i = 0; i < txmap->dm_nsegs; i++) {
465 		d = &q->q_tx.rge_tx_list[cur];
466 
467 		d->rge_extsts = htole32(cflags);
468 		d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
469 		d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
470 
471 		cmdsts |= txmap->dm_segs[i].ds_len;
472 
473 		if (cur == RGE_TX_LIST_CNT - 1)
474 			cmdsts |= RGE_TDCMDSTS_EOR;
475 
476 		d->rge_cmdsts = htole32(cmdsts);
477 
478 		last = cur;
479 		cmdsts = RGE_TDCMDSTS_OWN;
480 		cur = RGE_NEXT_TX_DESC(cur);
481 	}
482 
483 	/* Set EOF on the last descriptor. */
484 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
485 
486 	/* Transfer ownership of packet to the chip. */
487 	d = &q->q_tx.rge_tx_list[idx];
488 
489 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
490 
491 	bus_dmamap_sync(sc->sc_dmat, q->q_tx.rge_tx_list_map,
492 	    cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
493 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
494 
495 	/* Update info of TX queue and descriptors. */
496 	txq->txq_mbuf = m;
497 	txq->txq_descidx = last;
498 
499 	return (nsegs);
500 }
501 
502 int
rge_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)503 rge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
504 {
505 	struct rge_softc *sc = ifp->if_softc;
506 	struct ifreq *ifr = (struct ifreq *)data;
507 	int s, error = 0;
508 
509 	s = splnet();
510 
511 	switch (cmd) {
512 	case SIOCSIFADDR:
513 		ifp->if_flags |= IFF_UP;
514 		if (!(ifp->if_flags & IFF_RUNNING))
515 			rge_init(ifp);
516 		break;
517 	case SIOCSIFFLAGS:
518 		if (ifp->if_flags & IFF_UP) {
519 			if (ifp->if_flags & IFF_RUNNING)
520 				error = ENETRESET;
521 			else
522 				rge_init(ifp);
523 		} else {
524 			if (ifp->if_flags & IFF_RUNNING)
525 				rge_stop(ifp);
526 		}
527 		break;
528 	case SIOCGIFMEDIA:
529 	case SIOCSIFMEDIA:
530 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
531 		break;
532 	case SIOCGIFRXR:
533 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
534 		    NULL, RGE_JUMBO_FRAMELEN, &sc->sc_queues->q_rx.rge_rx_ring);
535 		break;
536 	default:
537 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
538 	}
539 
540 	if (error == ENETRESET) {
541 		if (ifp->if_flags & IFF_RUNNING)
542 			rge_iff(sc);
543 		error = 0;
544 	}
545 
546 	splx(s);
547 	return (error);
548 }
549 
550 void
rge_start(struct ifqueue * ifq)551 rge_start(struct ifqueue *ifq)
552 {
553 	struct ifnet *ifp = ifq->ifq_if;
554 	struct rge_softc *sc = ifp->if_softc;
555 	struct rge_queues *q = sc->sc_queues;
556 	struct mbuf *m;
557 	int free, idx, used;
558 	int queued = 0;
559 
560 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
561 		ifq_purge(ifq);
562 		return;
563 	}
564 
565 	/* Calculate free space. */
566 	idx = q->q_tx.rge_txq_prodidx;
567 	free = q->q_tx.rge_txq_considx;
568 	if (free <= idx)
569 		free += RGE_TX_LIST_CNT;
570 	free -= idx;
571 
572 	for (;;) {
573 		if (RGE_TX_NSEGS >= free + 2) {
574 			ifq_set_oactive(&ifp->if_snd);
575 			break;
576 		}
577 
578 		m = ifq_dequeue(ifq);
579 		if (m == NULL)
580 			break;
581 
582 		used = rge_encap(q, m, idx);
583 		if (used == 0) {
584 			m_freem(m);
585 			continue;
586 		}
587 
588 		KASSERT(used <= free);
589 		free -= used;
590 
591 #if NBPFILTER > 0
592 		if (ifp->if_bpf)
593 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
594 #endif
595 
596 		idx += used;
597 		if (idx >= RGE_TX_LIST_CNT)
598 			idx -= RGE_TX_LIST_CNT;
599 
600 		queued++;
601 	}
602 
603 	if (queued == 0)
604 		return;
605 
606 	/* Set a timeout in case the chip goes out to lunch. */
607 	ifp->if_timer = 5;
608 
609 	q->q_tx.rge_txq_prodidx = idx;
610 	ifq_serialize(ifq, &sc->sc_task);
611 }
612 
613 void
rge_watchdog(struct ifnet * ifp)614 rge_watchdog(struct ifnet *ifp)
615 {
616 	struct rge_softc *sc = ifp->if_softc;
617 
618 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
619 	ifp->if_oerrors++;
620 
621 	rge_init(ifp);
622 }
623 
624 void
rge_init(struct ifnet * ifp)625 rge_init(struct ifnet *ifp)
626 {
627 	struct rge_softc *sc = ifp->if_softc;
628 	struct rge_queues *q = sc->sc_queues;
629 	uint32_t val;
630 	int i, num_miti;
631 
632 	rge_stop(ifp);
633 
634 	/* Set MAC address. */
635 	rge_set_macaddr(sc, sc->sc_arpcom.ac_enaddr);
636 
637 	/* Initialize RX and TX descriptors lists. */
638 	rge_rx_list_init(q);
639 	rge_tx_list_init(q);
640 
641 	rge_chipinit(sc);
642 
643 	if (rge_phy_config(sc))
644 		return;
645 
646 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
647 
648 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
649 	rge_disable_aspm_clkreq(sc);
650 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER,
651 	    RGE_JUMBO_MTU + ETHER_HDR_LEN + 32);
652 	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
653 
654 	/* Load the addresses of the RX and TX lists into the chip. */
655 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
656 	    RGE_ADDR_LO(q->q_rx.rge_rx_list_map->dm_segs[0].ds_addr));
657 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
658 	    RGE_ADDR_HI(q->q_rx.rge_rx_list_map->dm_segs[0].ds_addr));
659 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
660 	    RGE_ADDR_LO(q->q_tx.rge_tx_list_map->dm_segs[0].ds_addr));
661 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
662 	    RGE_ADDR_HI(q->q_tx.rge_tx_list_map->dm_segs[0].ds_addr));
663 
664 	/* Set the initial RX and TX configurations. */
665 	RGE_WRITE_4(sc, RGE_RXCFG,
666 	    (sc->rge_type == MAC_CFG3) ? RGE_RXCFG_CONFIG :
667 	    RGE_RXCFG_CONFIG_8125B);
668 	RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
669 
670 	val = rge_read_csi(sc, 0x70c) & ~0xff000000;
671 	rge_write_csi(sc, 0x70c, val | 0x27000000);
672 
673 	RGE_WRITE_2(sc, 0x0382, 0x221b);
674 
675 	RGE_WRITE_1(sc, RGE_RSS_CTRL, 0);
676 
677 	val = RGE_READ_2(sc, RGE_RXQUEUE_CTRL) & ~0x001c;
678 	RGE_WRITE_2(sc, RGE_RXQUEUE_CTRL, val | (fls(sc->sc_nqueues) - 1) << 2);
679 
680 	RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
681 
682 	rge_write_mac_ocp(sc, 0xc140, 0xffff);
683 	rge_write_mac_ocp(sc, 0xc142, 0xffff);
684 
685 	RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
686 
687 	val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
688 	if (sc->rge_type == MAC_CFG3)
689 		rge_write_mac_ocp(sc, 0xe614, val | 0x0300);
690 	else
691 		rge_write_mac_ocp(sc, 0xe614, val | 0x0200);
692 
693 	val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0c00;
694 	rge_write_mac_ocp(sc, 0xe63e, val |
695 	    ((fls(sc->sc_nqueues) - 1) & 0x03) << 10);
696 
697 	RGE_MAC_CLRBIT(sc, 0xe63e, 0x0030);
698 	if (sc->rge_type == MAC_CFG3)
699 		RGE_MAC_SETBIT(sc, 0xe63e, 0x0020);
700 
701 	RGE_MAC_CLRBIT(sc, 0xc0b4, 0x0001);
702 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x0001);
703 
704 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
705 
706 	val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x00ff;
707 	rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
708 
709 	val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
710 	rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
711 
712 	RGE_MAC_CLRBIT(sc, 0xe056, 0x00f0);
713 
714 	RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
715 
716 	RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
717 
718 	val = rge_read_mac_ocp(sc, 0xea1c) & ~0x0003;
719 	rge_write_mac_ocp(sc, 0xea1c, val | 0x0001);
720 
721 	rge_write_mac_ocp(sc, 0xe0c0, 0x4000);
722 
723 	RGE_MAC_SETBIT(sc, 0xe052, 0x0060);
724 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0088);
725 
726 	val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
727 	rge_write_mac_ocp(sc, 0xd430, val | 0x045f);
728 
729 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_PFM_EN | RGE_DLLPR_TX_10M_PS_EN);
730 
731 	if (sc->rge_type == MAC_CFG3)
732 		RGE_SETBIT_1(sc, RGE_MCUCMD, 0x01);
733 
734 	/* Disable EEE plus. */
735 	RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
736 
737 	RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
738 
739 	RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
740 	DELAY(1);
741 	RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
742 
743 	RGE_CLRBIT_2(sc, 0x1880, 0x0030);
744 
745 	/* Config interrupt type for RTL8125B. */
746 	if (sc->rge_type == MAC_CFG5)
747 		RGE_CLRBIT_1(sc, RGE_INT_CFG0, RGE_INT_CFG0_EN);
748 
749 	/* Clear timer interrupts. */
750 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
751 	RGE_WRITE_4(sc, RGE_TIMERINT1, 0);
752 	RGE_WRITE_4(sc, RGE_TIMERINT2, 0);
753 	RGE_WRITE_4(sc, RGE_TIMERINT3, 0);
754 
755 	num_miti = (sc->rge_type == MAC_CFG3) ? 64 : 32;
756 	/* Clear interrupt moderation timer. */
757 	for (i = 0; i < num_miti; i++)
758 		RGE_WRITE_4(sc, RGE_INTMITI(i), 0);
759 
760 	if (sc->rge_type == MAC_CFG5) {
761 		RGE_CLRBIT_1(sc, RGE_INT_CFG0,
762 		    RGE_INT_CFG0_TIMEOUT_BYPASS |
763 		    RGE_INT_CFG0_MITIGATION_BYPASS);
764 		RGE_WRITE_2(sc, RGE_INT_CFG1, 0);
765 	}
766 
767 	RGE_MAC_SETBIT(sc, 0xc0ac, 0x1f80);
768 
769 	rge_write_mac_ocp(sc, 0xe098, 0xc302);
770 
771 	RGE_MAC_CLRBIT(sc, 0xe032, 0x0003);
772 	val = rge_read_csi(sc, 0x98) & ~0x0000ff00;
773 	rge_write_csi(sc, 0x98, val);
774 
775 	val = rge_read_mac_ocp(sc, 0xe092) & ~0x00ff;
776 	rge_write_mac_ocp(sc, 0xe092, val);
777 
778 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
779 		RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
780 
781 	RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
782 
783 	/* Set Maximum frame size. */
784 	RGE_WRITE_2(sc, RGE_RXMAXSIZE, RGE_JUMBO_FRAMELEN);
785 
786 	/* Disable RXDV gate. */
787 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
788 	DELAY(2000);
789 
790 	/* Program promiscuous mode and multicast filters. */
791 	rge_iff(sc);
792 
793 	rge_disable_aspm_clkreq(sc);
794 
795 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
796 	DELAY(10);
797 
798 	rge_ifmedia_upd(ifp);
799 
800 	/* Enable transmit and receive. */
801 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
802 
803 	/* Enable interrupts. */
804 	rge_setup_intr(sc, RGE_IMTYPE_SIM);
805 
806 	ifp->if_flags |= IFF_RUNNING;
807 	ifq_clr_oactive(&ifp->if_snd);
808 
809 	timeout_add_sec(&sc->sc_timeout, 1);
810 }
811 
812 /*
813  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
814  */
815 void
rge_stop(struct ifnet * ifp)816 rge_stop(struct ifnet *ifp)
817 {
818 	struct rge_softc *sc = ifp->if_softc;
819 	struct rge_queues *q = sc->sc_queues;
820 	int i;
821 
822 	timeout_del(&sc->sc_timeout);
823 
824 	ifp->if_timer = 0;
825 	ifp->if_flags &= ~IFF_RUNNING;
826 	sc->rge_timerintr = 0;
827 
828 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
829 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
830 	    RGE_RXCFG_ERRPKT);
831 
832 	rge_hw_reset(sc);
833 
834 	RGE_MAC_CLRBIT(sc, 0xc0ac, 0x1f80);
835 
836 	intr_barrier(sc->sc_ih);
837 	ifq_barrier(&ifp->if_snd);
838 	ifq_clr_oactive(&ifp->if_snd);
839 
840 	if (q->q_rx.rge_head != NULL) {
841 		m_freem(q->q_rx.rge_head);
842 		q->q_rx.rge_head = q->q_rx.rge_tail = NULL;
843 	}
844 
845 	/* Free the TX list buffers. */
846 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
847 		if (q->q_tx.rge_txq[i].txq_mbuf != NULL) {
848 			bus_dmamap_unload(sc->sc_dmat,
849 			    q->q_tx.rge_txq[i].txq_dmamap);
850 			m_freem(q->q_tx.rge_txq[i].txq_mbuf);
851 			q->q_tx.rge_txq[i].txq_mbuf = NULL;
852 		}
853 	}
854 
855 	/* Free the RX list buffers. */
856 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
857 		if (q->q_rx.rge_rxq[i].rxq_mbuf != NULL) {
858 			bus_dmamap_unload(sc->sc_dmat,
859 			    q->q_rx.rge_rxq[i].rxq_dmamap);
860 			m_freem(q->q_rx.rge_rxq[i].rxq_mbuf);
861 			q->q_rx.rge_rxq[i].rxq_mbuf = NULL;
862 		}
863 	}
864 }
865 
866 /*
867  * Set media options.
868  */
869 int
rge_ifmedia_upd(struct ifnet * ifp)870 rge_ifmedia_upd(struct ifnet *ifp)
871 {
872 	struct rge_softc *sc = ifp->if_softc;
873 	struct ifmedia *ifm = &sc->sc_media;
874 	int anar, gig, val;
875 
876 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
877 		return (EINVAL);
878 
879 	/* Disable Gigabit Lite. */
880 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
881 	RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
882 
883 	val = rge_read_phy_ocp(sc, 0xa5d4);
884 	val &= ~RGE_ADV_2500TFDX;
885 
886 	anar = gig = 0;
887 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
888 	case IFM_AUTO:
889 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
890 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
891 		val |= RGE_ADV_2500TFDX;
892 		break;
893 	case IFM_2500_T:
894 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
895 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
896 		val |= RGE_ADV_2500TFDX;
897 		ifp->if_baudrate = IF_Mbps(2500);
898 		break;
899 	case IFM_1000_T:
900 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
901 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
902 		ifp->if_baudrate = IF_Gbps(1);
903 		break;
904 	case IFM_100_TX:
905 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
906 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
907 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
908 		    ANAR_TX | ANAR_TX_FD | ANAR_10_FD | ANAR_10 :
909 		    ANAR_TX | ANAR_10_FD | ANAR_10;
910 		ifp->if_baudrate = IF_Mbps(100);
911 		break;
912 	case IFM_10_T:
913 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
914 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
915 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
916 		    ANAR_10_FD | ANAR_10 : ANAR_10;
917 		ifp->if_baudrate = IF_Mbps(10);
918 		break;
919 	default:
920 		printf("%s: unsupported media type\n", sc->sc_dev.dv_xname);
921 		return (EINVAL);
922 	}
923 
924 	rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
925 	rge_write_phy(sc, 0, MII_100T2CR, gig);
926 	rge_write_phy_ocp(sc, 0xa5d4, val);
927 	rge_write_phy(sc, 0, MII_BMCR, BMCR_RESET | BMCR_AUTOEN |
928 	    BMCR_STARTNEG);
929 
930 	return (0);
931 }
932 
933 /*
934  * Report current media status.
935  */
936 void
rge_ifmedia_sts(struct ifnet * ifp,struct ifmediareq * ifmr)937 rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
938 {
939 	struct rge_softc *sc = ifp->if_softc;
940 	uint16_t status = 0;
941 
942 	ifmr->ifm_status = IFM_AVALID;
943 	ifmr->ifm_active = IFM_ETHER;
944 
945 	if (rge_get_link_status(sc)) {
946 		ifmr->ifm_status |= IFM_ACTIVE;
947 
948 		status = RGE_READ_2(sc, RGE_PHYSTAT);
949 		if ((status & RGE_PHYSTAT_FDX) ||
950 		    (status & RGE_PHYSTAT_2500MBPS))
951 			ifmr->ifm_active |= IFM_FDX;
952 		else
953 			ifmr->ifm_active |= IFM_HDX;
954 
955 		if (status & RGE_PHYSTAT_10MBPS)
956 			ifmr->ifm_active |= IFM_10_T;
957 		else if (status & RGE_PHYSTAT_100MBPS)
958 			ifmr->ifm_active |= IFM_100_TX;
959 		else if (status & RGE_PHYSTAT_1000MBPS)
960 			ifmr->ifm_active |= IFM_1000_T;
961 		else if (status & RGE_PHYSTAT_2500MBPS)
962 			ifmr->ifm_active |= IFM_2500_T;
963 	}
964 }
965 
966 /*
967  * Allocate memory for RX/TX rings.
968  */
969 int
rge_allocmem(struct rge_softc * sc)970 rge_allocmem(struct rge_softc *sc)
971 {
972 	struct rge_queues *q = sc->sc_queues;
973 	int error, i;
974 
975 	/* Allocate DMA'able memory for the TX ring. */
976 	error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
977 	    RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
978 	    &q->q_tx.rge_tx_list_map);
979 	if (error) {
980 		printf("%s: can't create TX list map\n", sc->sc_dev.dv_xname);
981 		return (error);
982 	}
983 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
984 	    &q->q_tx.rge_tx_listseg, 1, &q->q_tx.rge_tx_listnseg,
985 	    BUS_DMA_NOWAIT| BUS_DMA_ZERO);
986 	if (error) {
987 		printf("%s: can't alloc TX list\n", sc->sc_dev.dv_xname);
988 		return (error);
989 	}
990 
991 	/* Load the map for the TX ring. */
992 	error = bus_dmamem_map(sc->sc_dmat, &q->q_tx.rge_tx_listseg,
993 	    q->q_tx.rge_tx_listnseg, RGE_TX_LIST_SZ,
994 	    (caddr_t *)&q->q_tx.rge_tx_list, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
995 	if (error) {
996 		printf("%s: can't map TX dma buffers\n", sc->sc_dev.dv_xname);
997 		bus_dmamem_free(sc->sc_dmat, &q->q_tx.rge_tx_listseg,
998 		    q->q_tx.rge_tx_listnseg);
999 		return (error);
1000 	}
1001 	error = bus_dmamap_load(sc->sc_dmat, q->q_tx.rge_tx_list_map,
1002 	    q->q_tx.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1003 	if (error) {
1004 		printf("%s: can't load TX dma map\n", sc->sc_dev.dv_xname);
1005 		bus_dmamap_destroy(sc->sc_dmat, q->q_tx.rge_tx_list_map);
1006 		bus_dmamem_unmap(sc->sc_dmat,
1007 		    (caddr_t)q->q_tx.rge_tx_list, RGE_TX_LIST_SZ);
1008 		bus_dmamem_free(sc->sc_dmat, &q->q_tx.rge_tx_listseg,
1009 		    q->q_tx.rge_tx_listnseg);
1010 		return (error);
1011 	}
1012 
1013 	/* Create DMA maps for TX buffers. */
1014 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
1015 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
1016 		    RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0,
1017 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1018 		    &q->q_tx.rge_txq[i].txq_dmamap);
1019 		if (error) {
1020 			printf("%s: can't create DMA map for TX\n",
1021 			    sc->sc_dev.dv_xname);
1022 			return (error);
1023 		}
1024 	}
1025 
1026 	/* Allocate DMA'able memory for the RX ring. */
1027 	error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
1028 	    RGE_RX_LIST_SZ, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1029 	    &q->q_rx.rge_rx_list_map);
1030 	if (error) {
1031 		printf("%s: can't create RX list map\n", sc->sc_dev.dv_xname);
1032 		return (error);
1033 	}
1034 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
1035 	    &q->q_rx.rge_rx_listseg, 1, &q->q_rx.rge_rx_listnseg,
1036 	    BUS_DMA_NOWAIT| BUS_DMA_ZERO);
1037 	if (error) {
1038 		printf("%s: can't alloc RX list\n", sc->sc_dev.dv_xname);
1039 		return (error);
1040 	}
1041 
1042 	/* Load the map for the RX ring. */
1043 	error = bus_dmamem_map(sc->sc_dmat, &q->q_rx.rge_rx_listseg,
1044 	    q->q_rx.rge_rx_listnseg, RGE_RX_LIST_SZ,
1045 	    (caddr_t *)&q->q_rx.rge_rx_list, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1046 	if (error) {
1047 		printf("%s: can't map RX dma buffers\n", sc->sc_dev.dv_xname);
1048 		bus_dmamem_free(sc->sc_dmat, &q->q_rx.rge_rx_listseg,
1049 		    q->q_rx.rge_rx_listnseg);
1050 		return (error);
1051 	}
1052 	error = bus_dmamap_load(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1053 	    q->q_rx.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1054 	if (error) {
1055 		printf("%s: can't load RX dma map\n", sc->sc_dev.dv_xname);
1056 		bus_dmamap_destroy(sc->sc_dmat, q->q_rx.rge_rx_list_map);
1057 		bus_dmamem_unmap(sc->sc_dmat,
1058 		    (caddr_t)q->q_rx.rge_rx_list, RGE_RX_LIST_SZ);
1059 		bus_dmamem_free(sc->sc_dmat, &q->q_rx.rge_rx_listseg,
1060 		    q->q_rx.rge_rx_listnseg);
1061 		return (error);
1062 	}
1063 
1064 	/* Create DMA maps for RX buffers. */
1065 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1066 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
1067 		    RGE_JUMBO_FRAMELEN, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1068 		    &q->q_rx.rge_rxq[i].rxq_dmamap);
1069 		if (error) {
1070 			printf("%s: can't create DMA map for RX\n",
1071 			    sc->sc_dev.dv_xname);
1072 			return (error);
1073 		}
1074 	}
1075 
1076 	return (error);
1077 }
1078 
1079 /*
1080  * Initialize the RX descriptor and attach an mbuf cluster.
1081  */
1082 int
rge_newbuf(struct rge_queues * q)1083 rge_newbuf(struct rge_queues *q)
1084 {
1085 	struct rge_softc *sc = q->q_sc;
1086 	struct mbuf *m;
1087 	struct rge_rx_desc *r;
1088 	struct rge_rxq *rxq;
1089 	bus_dmamap_t rxmap;
1090 	int idx;
1091 
1092 	m = MCLGETL(NULL, M_DONTWAIT, RGE_JUMBO_FRAMELEN);
1093 	if (m == NULL)
1094 		return (ENOBUFS);
1095 
1096 	m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
1097 
1098 	idx = q->q_rx.rge_rxq_prodidx;
1099 	rxq = &q->q_rx.rge_rxq[idx];
1100 	rxmap = rxq->rxq_dmamap;
1101 
1102 	if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT)) {
1103 		m_freem(m);
1104 		return (ENOBUFS);
1105 	}
1106 
1107 	bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1108 	    BUS_DMASYNC_PREREAD);
1109 
1110 	/* Map the segments into RX descriptors. */
1111 	r = &q->q_rx.rge_rx_list[idx];
1112 
1113 	rxq->rxq_mbuf = m;
1114 
1115 	r->hi_qword1.rx_qword4.rge_extsts = 0;
1116 	r->hi_qword0.rge_addr = htole64(rxmap->dm_segs[0].ds_addr);
1117 
1118 	r->hi_qword1.rx_qword4.rge_cmdsts = htole32(rxmap->dm_segs[0].ds_len);
1119 	if (idx == RGE_RX_LIST_CNT - 1)
1120 		r->hi_qword1.rx_qword4.rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1121 
1122 	r->hi_qword1.rx_qword4.rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1123 
1124 	bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1125 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1126 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1127 
1128 	q->q_rx.rge_rxq_prodidx = RGE_NEXT_RX_DESC(idx);
1129 
1130 	return (0);
1131 }
1132 
1133 void
rge_discard_rxbuf(struct rge_queues * q,int idx)1134 rge_discard_rxbuf(struct rge_queues *q, int idx)
1135 {
1136 	struct rge_softc *sc = q->q_sc;
1137 	struct rge_rx_desc *r;
1138 
1139 	r = &q->q_rx.rge_rx_list[idx];
1140 
1141 	r->hi_qword1.rx_qword4.rge_cmdsts = htole32(RGE_JUMBO_FRAMELEN);
1142 	r->hi_qword1.rx_qword4.rge_extsts = 0;
1143 	if (idx == RGE_RX_LIST_CNT - 1)
1144 		r->hi_qword1.rx_qword4.rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1145 	r->hi_qword1.rx_qword4.rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1146 
1147 	bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1148 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1149 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1150 }
1151 
1152 void
rge_rx_list_init(struct rge_queues * q)1153 rge_rx_list_init(struct rge_queues *q)
1154 {
1155 	memset(q->q_rx.rge_rx_list, 0, RGE_RX_LIST_SZ);
1156 
1157 	q->q_rx.rge_rxq_prodidx = q->q_rx.rge_rxq_considx = 0;
1158 	q->q_rx.rge_head = q->q_rx.rge_tail = NULL;
1159 
1160 	if_rxr_init(&q->q_rx.rge_rx_ring, 32, RGE_RX_LIST_CNT);
1161 	rge_fill_rx_ring(q);
1162 }
1163 
1164 void
rge_fill_rx_ring(struct rge_queues * q)1165 rge_fill_rx_ring(struct rge_queues *q)
1166 {
1167 	struct if_rxring *rxr = &q->q_rx.rge_rx_ring;
1168 	int slots;
1169 
1170 	for (slots = if_rxr_get(rxr, RGE_RX_LIST_CNT); slots > 0; slots--) {
1171 		if (rge_newbuf(q))
1172 			break;
1173 	}
1174 	if_rxr_put(rxr, slots);
1175 }
1176 
1177 void
rge_tx_list_init(struct rge_queues * q)1178 rge_tx_list_init(struct rge_queues *q)
1179 {
1180 	struct rge_softc *sc = q->q_sc;
1181 	struct rge_tx_desc *d;
1182 	int i;
1183 
1184 	memset(q->q_tx.rge_tx_list, 0, RGE_TX_LIST_SZ);
1185 
1186 	for (i = 0; i < RGE_TX_LIST_CNT; i++)
1187 		q->q_tx.rge_txq[i].txq_mbuf = NULL;
1188 
1189 	d = &q->q_tx.rge_tx_list[RGE_TX_LIST_CNT - 1];
1190 	d->rge_cmdsts = htole32(RGE_TDCMDSTS_EOR);
1191 
1192 	bus_dmamap_sync(sc->sc_dmat, q->q_tx.rge_tx_list_map, 0,
1193 	    q->q_tx.rge_tx_list_map->dm_mapsize,
1194 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1195 
1196 	q->q_tx.rge_txq_prodidx = q->q_tx.rge_txq_considx = 0;
1197 }
1198 
1199 int
rge_rxeof(struct rge_queues * q)1200 rge_rxeof(struct rge_queues *q)
1201 {
1202 	struct rge_softc *sc = q->q_sc;
1203 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1204 	struct mbuf *m;
1205 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1206 	struct if_rxring *rxr = &q->q_rx.rge_rx_ring;
1207 	struct rge_rx_desc *cur_rx;
1208 	struct rge_rxq *rxq;
1209 	uint32_t rxstat, extsts;
1210 	int i, total_len, rx = 0;
1211 
1212 	for (i = q->q_rx.rge_rxq_considx; if_rxr_inuse(rxr) > 0;
1213 	    i = RGE_NEXT_RX_DESC(i)) {
1214 		/* Invalidate the descriptor memory. */
1215 		bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1216 		    i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1217 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1218 
1219 		cur_rx = &q->q_rx.rge_rx_list[i];
1220 		rxstat = letoh32(cur_rx->hi_qword1.rx_qword4.rge_cmdsts);
1221 		extsts = letoh32(cur_rx->hi_qword1.rx_qword4.rge_extsts);
1222 
1223 		if (rxstat & RGE_RDCMDSTS_OWN)
1224 			break;
1225 
1226 		total_len = rxstat & RGE_RDCMDSTS_FRAGLEN;
1227 		rxq = &q->q_rx.rge_rxq[i];
1228 		m = rxq->rxq_mbuf;
1229 		rxq->rxq_mbuf = NULL;
1230 		if_rxr_put(rxr, 1);
1231 		rx = 1;
1232 
1233 		/* Invalidate the RX mbuf and unload its map. */
1234 		bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
1235 		    rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1236 		bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
1237 
1238 		if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
1239 		    (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
1240 			ifp->if_ierrors++;
1241 			m_freem(m);
1242 			rge_discard_rxbuf(q, i);
1243 			continue;
1244 		}
1245 
1246 		if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
1247 			ifp->if_ierrors++;
1248 			/*
1249 			 * If this is part of a multi-fragment packet,
1250 			 * discard all the pieces.
1251 			 */
1252 			if (q->q_rx.rge_head != NULL) {
1253 				m_freem(q->q_rx.rge_head);
1254 				q->q_rx.rge_head = q->q_rx.rge_tail = NULL;
1255 			}
1256 			m_freem(m);
1257 			rge_discard_rxbuf(q, i);
1258 			continue;
1259 		}
1260 
1261 		if (q->q_rx.rge_head != NULL) {
1262 			m->m_len = total_len;
1263 			/*
1264 			 * Special case: if there's 4 bytes or less
1265 			 * in this buffer, the mbuf can be discarded:
1266 			 * the last 4 bytes is the CRC, which we don't
1267 			 * care about anyway.
1268 			 */
1269 			if (m->m_len <= ETHER_CRC_LEN) {
1270 				q->q_rx.rge_tail->m_len -=
1271 				    (ETHER_CRC_LEN - m->m_len);
1272 				m_freem(m);
1273 			} else {
1274 				m->m_len -= ETHER_CRC_LEN;
1275 				m->m_flags &= ~M_PKTHDR;
1276 				q->q_rx.rge_tail->m_next = m;
1277 			}
1278 			m = q->q_rx.rge_head;
1279 			q->q_rx.rge_head = q->q_rx.rge_tail = NULL;
1280 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1281 		} else
1282 			m->m_pkthdr.len = m->m_len =
1283 			    (total_len - ETHER_CRC_LEN);
1284 
1285 		/* Check IP header checksum. */
1286 		if (!(extsts & RGE_RDEXTSTS_IPCSUMERR) &&
1287 		    (extsts & RGE_RDEXTSTS_IPV4))
1288 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1289 
1290 		/* Check TCP/UDP checksum. */
1291 		if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
1292 		    (((extsts & RGE_RDEXTSTS_TCPPKT) &&
1293 		    !(extsts & RGE_RDEXTSTS_TCPCSUMERR)) ||
1294 		    ((extsts & RGE_RDEXTSTS_UDPPKT) &&
1295 		    !(extsts & RGE_RDEXTSTS_UDPCSUMERR))))
1296 			m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
1297 			    M_UDP_CSUM_IN_OK;
1298 
1299 #if NVLAN > 0
1300 		if (extsts & RGE_RDEXTSTS_VTAG) {
1301 			m->m_pkthdr.ether_vtag =
1302 			    ntohs(extsts & RGE_RDEXTSTS_VLAN_MASK);
1303 			m->m_flags |= M_VLANTAG;
1304 		}
1305 #endif
1306 
1307 		ml_enqueue(&ml, m);
1308 	}
1309 
1310 	if (ifiq_input(&ifp->if_rcv, &ml))
1311 		if_rxr_livelocked(rxr);
1312 
1313 	q->q_rx.rge_rxq_considx = i;
1314 	rge_fill_rx_ring(q);
1315 
1316 	return (rx);
1317 }
1318 
1319 int
rge_txeof(struct rge_queues * q)1320 rge_txeof(struct rge_queues *q)
1321 {
1322 	struct rge_softc *sc = q->q_sc;
1323 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1324 	struct rge_txq *txq;
1325 	uint32_t txstat;
1326 	int cons, idx, prod;
1327 	int free = 0;
1328 
1329 	prod = q->q_tx.rge_txq_prodidx;
1330 	cons = q->q_tx.rge_txq_considx;
1331 
1332 	while (prod != cons) {
1333 		txq = &q->q_tx.rge_txq[cons];
1334 		idx = txq->txq_descidx;
1335 
1336 		bus_dmamap_sync(sc->sc_dmat, q->q_tx.rge_tx_list_map,
1337 		    idx * sizeof(struct rge_tx_desc),
1338 		    sizeof(struct rge_tx_desc),
1339 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1340 
1341 		txstat = letoh32(q->q_tx.rge_tx_list[idx].rge_cmdsts);
1342 
1343 		if (txstat & RGE_TDCMDSTS_OWN) {
1344 			free = 2;
1345 			break;
1346 		}
1347 
1348 		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
1349 		    txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1350 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
1351 		m_freem(txq->txq_mbuf);
1352 		txq->txq_mbuf = NULL;
1353 
1354 		if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
1355 			ifp->if_collisions++;
1356 		if (txstat & RGE_TDCMDSTS_TXERR)
1357 			ifp->if_oerrors++;
1358 
1359 		bus_dmamap_sync(sc->sc_dmat, q->q_tx.rge_tx_list_map,
1360 		    idx * sizeof(struct rge_tx_desc),
1361 		    sizeof(struct rge_tx_desc),
1362 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1363 
1364 		cons = RGE_NEXT_TX_DESC(idx);
1365 		free = 1;
1366 	}
1367 
1368 	if (free == 0)
1369 		return (0);
1370 
1371 	q->q_tx.rge_txq_considx = cons;
1372 
1373 	if (ifq_is_oactive(&ifp->if_snd))
1374 		ifq_restart(&ifp->if_snd);
1375 	else if (free == 2)
1376 		ifq_serialize(&ifp->if_snd, &sc->sc_task);
1377 	else
1378 		ifp->if_timer = 0;
1379 
1380 	return (1);
1381 }
1382 
1383 void
rge_reset(struct rge_softc * sc)1384 rge_reset(struct rge_softc *sc)
1385 {
1386 	int i;
1387 
1388 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
1389 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
1390 	    RGE_RXCFG_ERRPKT);
1391 
1392 	/* Enable RXDV gate. */
1393 	RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
1394 	DELAY(2000);
1395 
1396 	RGE_SETBIT_1(sc, RGE_CMD, RGE_CMD_STOPREQ);
1397 	for (i = 0; i < 20; i++) {
1398 		DELAY(10);
1399 		if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_STOPREQ))
1400 			break;
1401 	}
1402 
1403 	for (i = 0; i < 3000; i++) {
1404 		DELAY(50);
1405 		if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
1406 		    RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
1407 		    RGE_MCUCMD_TXFIFO_EMPTY))
1408 			break;
1409 	}
1410 	if (sc->rge_type != MAC_CFG3) {
1411 		for (i = 0; i < 3000; i++) {
1412 			DELAY(50);
1413 			if ((RGE_READ_2(sc, RGE_IM) & 0x0103) == 0x0103)
1414 				break;
1415 		}
1416 	}
1417 
1418 	DELAY(2000);
1419 
1420 	/* Soft reset. */
1421 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
1422 
1423 	for (i = 0; i < RGE_TIMEOUT; i++) {
1424 		DELAY(100);
1425 		if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
1426 			break;
1427 	}
1428 	if (i == RGE_TIMEOUT)
1429 		printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
1430 }
1431 
1432 void
rge_iff(struct rge_softc * sc)1433 rge_iff(struct rge_softc *sc)
1434 {
1435 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1436 	struct arpcom *ac = &sc->sc_arpcom;
1437 	struct ether_multi *enm;
1438 	struct ether_multistep step;
1439 	uint32_t hashes[2];
1440 	uint32_t rxfilt;
1441 	int h = 0;
1442 
1443 	rxfilt = RGE_READ_4(sc, RGE_RXCFG);
1444 	rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
1445 	ifp->if_flags &= ~IFF_ALLMULTI;
1446 
1447 	/*
1448 	 * Always accept frames destined to our station address.
1449 	 * Always accept broadcast frames.
1450 	 */
1451 	rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
1452 
1453 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1454 		ifp->if_flags |= IFF_ALLMULTI;
1455 		rxfilt |= RGE_RXCFG_MULTI;
1456 		if (ifp->if_flags & IFF_PROMISC)
1457 			rxfilt |= RGE_RXCFG_ALLPHYS;
1458 		hashes[0] = hashes[1] = 0xffffffff;
1459 	} else {
1460 		rxfilt |= RGE_RXCFG_MULTI;
1461 		/* Program new filter. */
1462 		memset(hashes, 0, sizeof(hashes));
1463 
1464 		ETHER_FIRST_MULTI(step, ac, enm);
1465 		while (enm != NULL) {
1466 			h = ether_crc32_be(enm->enm_addrlo,
1467 			    ETHER_ADDR_LEN) >> 26;
1468 
1469 			if (h < 32)
1470 				hashes[0] |= (1 << h);
1471 			else
1472 				hashes[1] |= (1 << (h - 32));
1473 
1474 			ETHER_NEXT_MULTI(step, enm);
1475 		}
1476 	}
1477 
1478 	RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
1479 	RGE_WRITE_4(sc, RGE_MAR0, swap32(hashes[1]));
1480 	RGE_WRITE_4(sc, RGE_MAR4, swap32(hashes[0]));
1481 }
1482 
1483 void
rge_chipinit(struct rge_softc * sc)1484 rge_chipinit(struct rge_softc *sc)
1485 {
1486 	rge_exit_oob(sc);
1487 	rge_set_phy_power(sc, 1);
1488 	rge_hw_init(sc);
1489 	rge_hw_reset(sc);
1490 }
1491 
1492 void
rge_set_phy_power(struct rge_softc * sc,int on)1493 rge_set_phy_power(struct rge_softc *sc, int on)
1494 {
1495 	int i;
1496 
1497 	if (on) {
1498 		RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
1499 
1500 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
1501 
1502 		for (i = 0; i < RGE_TIMEOUT; i++) {
1503 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 3)
1504 				break;
1505 			DELAY(1000);
1506 		}
1507 	} else {
1508 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
1509 		RGE_CLRBIT_1(sc, RGE_PMCH, 0x80);
1510 		RGE_CLRBIT_1(sc, RGE_PPSW, 0x40);
1511 	}
1512 }
1513 
1514 void
rge_ephy_config(struct rge_softc * sc)1515 rge_ephy_config(struct rge_softc *sc)
1516 {
1517 	switch (sc->rge_type) {
1518 	case MAC_CFG3:
1519 		rge_ephy_config_mac_cfg3(sc);
1520 		break;
1521 	case MAC_CFG5:
1522 		rge_ephy_config_mac_cfg5(sc);
1523 		break;
1524 	default:
1525 		break;	/* Can't happen. */
1526 	}
1527 }
1528 
1529 void
rge_ephy_config_mac_cfg3(struct rge_softc * sc)1530 rge_ephy_config_mac_cfg3(struct rge_softc *sc)
1531 {
1532 	uint16_t val;
1533 	int i;
1534 
1535 	for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
1536 		rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
1537 		    rtl8125_mac_cfg3_ephy[i].val);
1538 
1539 	val = rge_read_ephy(sc, 0x002a) & ~0x7000;
1540 	rge_write_ephy(sc, 0x002a, val | 0x3000);
1541 	RGE_EPHY_CLRBIT(sc, 0x0019, 0x0040);
1542 	RGE_EPHY_SETBIT(sc, 0x001b, 0x0e00);
1543 	RGE_EPHY_CLRBIT(sc, 0x001b, 0x7000);
1544 	rge_write_ephy(sc, 0x0002, 0x6042);
1545 	rge_write_ephy(sc, 0x0006, 0x0014);
1546 	val = rge_read_ephy(sc, 0x006a) & ~0x7000;
1547 	rge_write_ephy(sc, 0x006a, val | 0x3000);
1548 	RGE_EPHY_CLRBIT(sc, 0x0059, 0x0040);
1549 	RGE_EPHY_SETBIT(sc, 0x005b, 0x0e00);
1550 	RGE_EPHY_CLRBIT(sc, 0x005b, 0x7000);
1551 	rge_write_ephy(sc, 0x0042, 0x6042);
1552 	rge_write_ephy(sc, 0x0046, 0x0014);
1553 }
1554 
1555 void
rge_ephy_config_mac_cfg5(struct rge_softc * sc)1556 rge_ephy_config_mac_cfg5(struct rge_softc *sc)
1557 {
1558 	int i;
1559 
1560 	for (i = 0; i < nitems(rtl8125_mac_cfg5_ephy); i++)
1561 		rge_write_ephy(sc, rtl8125_mac_cfg5_ephy[i].reg,
1562 		    rtl8125_mac_cfg5_ephy[i].val);
1563 }
1564 
1565 int
rge_phy_config(struct rge_softc * sc)1566 rge_phy_config(struct rge_softc *sc)
1567 {
1568 	int i;
1569 
1570 	rge_ephy_config(sc);
1571 
1572 	/* PHY reset. */
1573 	rge_write_phy(sc, 0, MII_ANAR,
1574 	    rge_read_phy(sc, 0, MII_ANAR) &
1575 	    ~(ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10));
1576 	rge_write_phy(sc, 0, MII_100T2CR,
1577 	    rge_read_phy(sc, 0, MII_100T2CR) &
1578 	    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX));
1579 	RGE_PHY_CLRBIT(sc, 0xa5d4, RGE_ADV_2500TFDX);
1580 	rge_write_phy(sc, 0, MII_BMCR, BMCR_RESET | BMCR_AUTOEN |
1581 	    BMCR_STARTNEG);
1582 	for (i = 0; i < 2500; i++) {
1583 		if (!(rge_read_phy(sc, 0, MII_BMCR) & BMCR_RESET))
1584 			break;
1585 		DELAY(1000);
1586 	}
1587 	if (i == 2500) {
1588 		printf("%s: PHY reset failed\n", sc->sc_dev.dv_xname);
1589 		return (ETIMEDOUT);
1590 	}
1591 
1592 	/* Read microcode version. */
1593 	rge_write_phy_ocp(sc, 0xa436, 0x801e);
1594 	sc->rge_mcodever = rge_read_phy_ocp(sc, 0xa438);
1595 
1596 	switch (sc->rge_type) {
1597 	case MAC_CFG3:
1598 		rge_phy_config_mac_cfg3(sc);
1599 		break;
1600 	case MAC_CFG5:
1601 		rge_phy_config_mac_cfg5(sc);
1602 		break;
1603 	default:
1604 		break;	/* Can't happen. */
1605 	}
1606 
1607 	RGE_PHY_CLRBIT(sc, 0xa5b4, 0x8000);
1608 
1609 	/* Disable EEE. */
1610 	RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
1611 	if (sc->rge_type == MAC_CFG3) {
1612 		RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
1613 		RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
1614 	}
1615 	RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
1616 	RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
1617 	RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
1618 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
1619 	RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
1620 
1621 	/* Advanced EEE. */
1622 	rge_patch_phy_mcu(sc, 1);
1623 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
1624 	RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
1625 	RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
1626 	rge_patch_phy_mcu(sc, 0);
1627 
1628 	return (0);
1629 }
1630 
1631 void
rge_phy_config_mac_cfg3(struct rge_softc * sc)1632 rge_phy_config_mac_cfg3(struct rge_softc *sc)
1633 {
1634 	uint16_t val;
1635 	int i;
1636 	static const uint16_t mac_cfg3_a438_value[] =
1637 	    { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
1638 	      0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
1639 
1640 	static const uint16_t mac_cfg3_b88e_value[] =
1641 	    { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
1642 	      0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
1643 	      0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
1644 	      0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
1645 	      0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
1646 	      0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
1647 
1648 	rge_phy_config_mcu(sc, RGE_MAC_CFG3_MCODE_VER);
1649 
1650 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1651 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1652 	rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
1653 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
1654 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1655 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
1656 	RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
1657 	val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
1658 	rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
1659 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
1660 	rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
1661 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
1662 	rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
1663 	RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
1664 	RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
1665 	val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
1666 	rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
1667 	rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
1668 	rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
1669 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
1670 	rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
1671 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1672 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1673 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0500);
1674 	rge_write_phy_ocp(sc, 0xb87c, 0x8159);
1675 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1676 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
1677 	rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
1678 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1679 	rge_write_phy_ocp(sc, 0xb87c, 0x809c);
1680 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1681 
1682 	rge_write_phy_ocp(sc, 0xa436, 0x81b3);
1683 	for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
1684 		rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
1685 	for (i = 0; i < 26; i++)
1686 		rge_write_phy_ocp(sc, 0xa438, 0);
1687 	rge_write_phy_ocp(sc, 0xa436, 0x8257);
1688 	rge_write_phy_ocp(sc, 0xa438, 0x020f);
1689 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1690 	rge_write_phy_ocp(sc, 0xa438, 0x7843);
1691 
1692 	rge_patch_phy_mcu(sc, 1);
1693 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1694 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1695 	for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
1696 		rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
1697 		rge_write_phy_ocp(sc, 0xb890, mac_cfg3_b88e_value[i + 1]);
1698 	}
1699 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1700 	rge_patch_phy_mcu(sc, 0);
1701 
1702 	RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
1703 	rge_write_phy_ocp(sc, 0xa436, 0x81a2);
1704 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1705 	val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
1706 	rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
1707 	RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
1708 	RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
1709 	RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
1710 	RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
1711 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1712 	RGE_PHY_SETBIT(sc, 0xa424, 0x0008);
1713 }
1714 
1715 void
rge_phy_config_mac_cfg5(struct rge_softc * sc)1716 rge_phy_config_mac_cfg5(struct rge_softc *sc)
1717 {
1718 	uint16_t val;
1719 	int i;
1720 
1721 	rge_phy_config_mcu(sc, RGE_MAC_CFG5_MCODE_VER);
1722 
1723 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1724 	val = rge_read_phy_ocp(sc, 0xac46) & ~0x00f0;
1725 	rge_write_phy_ocp(sc, 0xac46, val | 0x0090);
1726 	val = rge_read_phy_ocp(sc, 0xad30) & ~0x0003;
1727 	rge_write_phy_ocp(sc, 0xad30, val | 0x0001);
1728 	rge_write_phy_ocp(sc, 0xb87c, 0x80f5);
1729 	rge_write_phy_ocp(sc, 0xb87e, 0x760e);
1730 	rge_write_phy_ocp(sc, 0xb87c, 0x8107);
1731 	rge_write_phy_ocp(sc, 0xb87e, 0x360e);
1732 	rge_write_phy_ocp(sc, 0xb87c, 0x8551);
1733 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1734 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
1735 	val = rge_read_phy_ocp(sc, 0xbf00) & ~0xe000;
1736 	rge_write_phy_ocp(sc, 0xbf00, val | 0xa000);
1737 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1738 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0300);
1739 	for (i = 0; i < 10; i++) {
1740 		rge_write_phy_ocp(sc, 0xa436, 0x8044 + i * 6);
1741 		rge_write_phy_ocp(sc, 0xa438, 0x2417);
1742 	}
1743 	RGE_PHY_SETBIT(sc, 0xa4ca, 0x0040);
1744 	val = rge_read_phy_ocp(sc, 0xbf84) & ~0xe000;
1745 	rge_write_phy_ocp(sc, 0xbf84, val | 0xa000);
1746 	rge_write_phy_ocp(sc, 0xa436, 0x8170);
1747 	val = rge_read_phy_ocp(sc, 0xa438) & ~0x2700;
1748 	rge_write_phy_ocp(sc, 0xa438, val | 0xd800);
1749 	RGE_PHY_SETBIT(sc, 0xa424, 0x0008);
1750 }
1751 
1752 void
rge_phy_config_mcu(struct rge_softc * sc,uint16_t mcode_version)1753 rge_phy_config_mcu(struct rge_softc *sc, uint16_t mcode_version)
1754 {
1755 	if (sc->rge_mcodever != mcode_version) {
1756 		int i;
1757 
1758 		rge_patch_phy_mcu(sc, 1);
1759 
1760 		if (sc->rge_type == MAC_CFG3) {
1761 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
1762 			rge_write_phy_ocp(sc, 0xa438, 0x8601);
1763 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
1764 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
1765 
1766 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
1767 
1768 			for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
1769 				rge_write_phy_ocp(sc,
1770 				    rtl8125_mac_cfg3_mcu[i].reg,
1771 				    rtl8125_mac_cfg3_mcu[i].val);
1772 			}
1773 
1774 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
1775 
1776 			rge_write_phy_ocp(sc, 0xa436, 0);
1777 			rge_write_phy_ocp(sc, 0xa438, 0);
1778 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
1779 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
1780 			rge_write_phy_ocp(sc, 0xa438, 0);
1781 		} else if (sc->rge_type == MAC_CFG5) {
1782 			for (i = 0; i < nitems(rtl8125_mac_cfg5_mcu); i++) {
1783 				rge_write_phy_ocp(sc,
1784 				    rtl8125_mac_cfg5_mcu[i].reg,
1785 				    rtl8125_mac_cfg5_mcu[i].val);
1786 			}
1787 		}
1788 
1789 		rge_patch_phy_mcu(sc, 0);
1790 
1791 		/* Write microcode version. */
1792 		rge_write_phy_ocp(sc, 0xa436, 0x801e);
1793 		rge_write_phy_ocp(sc, 0xa438, mcode_version);
1794 	}
1795 }
1796 
1797 void
rge_set_macaddr(struct rge_softc * sc,const uint8_t * addr)1798 rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
1799 {
1800 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
1801 	RGE_WRITE_4(sc, RGE_MAC0,
1802 	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1803 	RGE_WRITE_4(sc, RGE_MAC4,
1804 	    addr[5] <<  8 | addr[4]);
1805 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
1806 }
1807 
1808 void
rge_get_macaddr(struct rge_softc * sc,uint8_t * addr)1809 rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
1810 {
1811 	int i;
1812 
1813 	for (i = 0; i < ETHER_ADDR_LEN; i++)
1814 		addr[i] = RGE_READ_1(sc, RGE_MAC0 + i);
1815 
1816 	*(uint32_t *)&addr[0] = RGE_READ_4(sc, RGE_ADDR0);
1817 	*(uint16_t *)&addr[4] = RGE_READ_2(sc, RGE_ADDR1);
1818 
1819 	rge_set_macaddr(sc, addr);
1820 }
1821 
1822 void
rge_hw_init(struct rge_softc * sc)1823 rge_hw_init(struct rge_softc *sc)
1824 {
1825 	uint16_t reg;
1826 	int i, npages;
1827 
1828 	rge_disable_aspm_clkreq(sc);
1829 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
1830 
1831 	/* Disable UPS. */
1832 	RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
1833 
1834 	/* Disable MAC MCU. */
1835 	rge_disable_aspm_clkreq(sc);
1836 	rge_write_mac_ocp(sc, 0xfc48, 0);
1837 	for (reg = 0xfc28; reg < 0xfc48; reg += 2)
1838 		rge_write_mac_ocp(sc, reg, 0);
1839 	DELAY(3000);
1840 	rge_write_mac_ocp(sc, 0xfc26, 0);
1841 
1842 	if (sc->rge_type == MAC_CFG3) {
1843 		for (npages = 0; npages < 3; npages++) {
1844 			rge_switch_mcu_ram_page(sc, npages);
1845 			for (i = 0; i < nitems(rtl8125_mac_bps); i++) {
1846 				if (npages == 0)
1847 					rge_write_mac_ocp(sc,
1848 					    rtl8125_mac_bps[i].reg,
1849 					    rtl8125_mac_bps[i].val);
1850 				else if (npages == 1)
1851 					rge_write_mac_ocp(sc,
1852 					    rtl8125_mac_bps[i].reg, 0);
1853 				else {
1854 					if (rtl8125_mac_bps[i].reg < 0xf9f8)
1855 						rge_write_mac_ocp(sc,
1856 						    rtl8125_mac_bps[i].reg, 0);
1857 				}
1858 			}
1859 			if (npages == 2) {
1860 				rge_write_mac_ocp(sc, 0xf9f8, 0x6486);
1861 				rge_write_mac_ocp(sc, 0xf9fa, 0x0b15);
1862 				rge_write_mac_ocp(sc, 0xf9fc, 0x090e);
1863 				rge_write_mac_ocp(sc, 0xf9fe, 0x1139);
1864 			}
1865 		}
1866 		rge_write_mac_ocp(sc, 0xfc26, 0x8000);
1867 		rge_write_mac_ocp(sc, 0xfc2a, 0x0540);
1868 		rge_write_mac_ocp(sc, 0xfc2e, 0x0a06);
1869 		rge_write_mac_ocp(sc, 0xfc30, 0x0eb8);
1870 		rge_write_mac_ocp(sc, 0xfc32, 0x3a5c);
1871 		rge_write_mac_ocp(sc, 0xfc34, 0x10a8);
1872 		rge_write_mac_ocp(sc, 0xfc40, 0x0d54);
1873 		rge_write_mac_ocp(sc, 0xfc42, 0x0e24);
1874 		rge_write_mac_ocp(sc, 0xfc48, 0x307a);
1875 	} else if (sc->rge_type == MAC_CFG5) {
1876 		rge_switch_mcu_ram_page(sc, 0);
1877 		for (i = 0; i < nitems(rtl8125b_mac_bps); i++) {
1878 			rge_write_mac_ocp(sc, rtl8125b_mac_bps[i].reg,
1879 			    rtl8125b_mac_bps[i].val);
1880 		}
1881 	}
1882 
1883 	/* Disable PHY power saving. */
1884 	rge_disable_phy_ocp_pwrsave(sc);
1885 
1886 	/* Set PCIe uncorrectable error status. */
1887 	rge_write_csi(sc, 0x108,
1888 	    rge_read_csi(sc, 0x108) | 0x00100000);
1889 }
1890 
1891 void
rge_hw_reset(struct rge_softc * sc)1892 rge_hw_reset(struct rge_softc *sc)
1893 {
1894 	/* Disable interrupts */
1895 	RGE_WRITE_4(sc, RGE_IMR, 0);
1896 	RGE_WRITE_4(sc, RGE_ISR, RGE_READ_4(sc, RGE_ISR));
1897 
1898 	/* Clear timer interrupts. */
1899 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
1900 	RGE_WRITE_4(sc, RGE_TIMERINT1, 0);
1901 	RGE_WRITE_4(sc, RGE_TIMERINT2, 0);
1902 	RGE_WRITE_4(sc, RGE_TIMERINT3, 0);
1903 
1904 	rge_reset(sc);
1905 }
1906 
1907 void
rge_disable_phy_ocp_pwrsave(struct rge_softc * sc)1908 rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
1909 {
1910 	if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
1911 		rge_patch_phy_mcu(sc, 1);
1912 		rge_write_phy_ocp(sc, 0xc416, 0);
1913 		rge_write_phy_ocp(sc, 0xc416, 0x0500);
1914 		rge_patch_phy_mcu(sc, 0);
1915 	}
1916 }
1917 
1918 void
rge_patch_phy_mcu(struct rge_softc * sc,int set)1919 rge_patch_phy_mcu(struct rge_softc *sc, int set)
1920 {
1921 	int i;
1922 
1923 	if (set)
1924 		RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
1925 	else
1926 		RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
1927 
1928 	for (i = 0; i < 1000; i++) {
1929 		if (set) {
1930 			if ((rge_read_phy_ocp(sc, 0xb800) & 0x0040) != 0)
1931 				break;
1932 		} else {
1933 			if (!(rge_read_phy_ocp(sc, 0xb800) & 0x0040))
1934 				break;
1935 		}
1936 		DELAY(100);
1937 	}
1938 	if (i == 1000)
1939 		printf("%s: timeout waiting to patch phy mcu\n",
1940 		    sc->sc_dev.dv_xname);
1941 }
1942 
1943 void
rge_add_media_types(struct rge_softc * sc)1944 rge_add_media_types(struct rge_softc *sc)
1945 {
1946 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
1947 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
1948 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
1949 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
1950 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
1951 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
1952 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
1953 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
1954 }
1955 
1956 void
rge_config_imtype(struct rge_softc * sc,int imtype)1957 rge_config_imtype(struct rge_softc *sc, int imtype)
1958 {
1959 	switch (imtype) {
1960 	case RGE_IMTYPE_NONE:
1961 		sc->rge_intrs = RGE_INTRS;
1962 		break;
1963 	case RGE_IMTYPE_SIM:
1964 		sc->rge_intrs = RGE_INTRS_TIMER;
1965 		break;
1966 	default:
1967 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
1968 	}
1969 }
1970 
1971 void
rge_disable_aspm_clkreq(struct rge_softc * sc)1972 rge_disable_aspm_clkreq(struct rge_softc *sc)
1973 {
1974 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
1975 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
1976 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
1977 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
1978 }
1979 
1980 void
rge_disable_hw_im(struct rge_softc * sc)1981 rge_disable_hw_im(struct rge_softc *sc)
1982 {
1983 	RGE_WRITE_2(sc, RGE_IM, 0);
1984 }
1985 
1986 void
rge_disable_sim_im(struct rge_softc * sc)1987 rge_disable_sim_im(struct rge_softc *sc)
1988 {
1989 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
1990 	sc->rge_timerintr = 0;
1991 }
1992 
1993 void
rge_setup_sim_im(struct rge_softc * sc)1994 rge_setup_sim_im(struct rge_softc *sc)
1995 {
1996 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0x2600);
1997 	RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
1998 	sc->rge_timerintr = 1;
1999 }
2000 
2001 void
rge_setup_intr(struct rge_softc * sc,int imtype)2002 rge_setup_intr(struct rge_softc *sc, int imtype)
2003 {
2004 	rge_config_imtype(sc, imtype);
2005 
2006 	/* Enable interrupts. */
2007 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
2008 
2009 	switch (imtype) {
2010 	case RGE_IMTYPE_NONE:
2011 		rge_disable_sim_im(sc);
2012 		rge_disable_hw_im(sc);
2013 		break;
2014 	case RGE_IMTYPE_SIM:
2015 		rge_disable_hw_im(sc);
2016 		rge_setup_sim_im(sc);
2017 		break;
2018 	default:
2019 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
2020 	}
2021 }
2022 
2023 void
rge_switch_mcu_ram_page(struct rge_softc * sc,int page)2024 rge_switch_mcu_ram_page(struct rge_softc *sc, int page)
2025 {
2026 	uint16_t val;
2027 
2028 	val = rge_read_mac_ocp(sc, 0xe446) & ~0x0003;
2029 	val |= page;
2030 	rge_write_mac_ocp(sc, 0xe446, val);
2031 }
2032 
2033 void
rge_exit_oob(struct rge_softc * sc)2034 rge_exit_oob(struct rge_softc *sc)
2035 {
2036 	int i;
2037 
2038 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
2039 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
2040 	    RGE_RXCFG_ERRPKT);
2041 
2042 	/* Disable RealWoW. */
2043 	rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
2044 
2045 	rge_reset(sc);
2046 
2047 	/* Disable OOB. */
2048 	RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
2049 
2050 	RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
2051 
2052 	for (i = 0; i < 10; i++) {
2053 		DELAY(100);
2054 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2055 			break;
2056 	}
2057 
2058 	rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
2059 	rge_write_mac_ocp(sc, 0xc0a6, 0x01b5);
2060 	rge_write_mac_ocp(sc, 0xc01e, 0x5555);
2061 
2062 	for (i = 0; i < 10; i++) {
2063 		DELAY(100);
2064 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2065 			break;
2066 	}
2067 
2068 	if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
2069 		for (i = 0; i < RGE_TIMEOUT; i++) {
2070 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 2)
2071 				break;
2072 			DELAY(1000);
2073 		}
2074 		RGE_MAC_CLRBIT(sc, 0xd42c, 0x0100);
2075 		if (sc->rge_type != MAC_CFG3)
2076 			RGE_PHY_CLRBIT(sc, 0xa466, 0x0001);
2077 		RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
2078 	}
2079 }
2080 
2081 void
rge_write_csi(struct rge_softc * sc,uint32_t reg,uint32_t val)2082 rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
2083 {
2084 	int i;
2085 
2086 	RGE_WRITE_4(sc, RGE_CSIDR, val);
2087 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2088 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
2089 
2090 	for (i = 0; i < 20000; i++) {
2091 		 DELAY(1);
2092 		 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
2093 			break;
2094 	}
2095 
2096 	DELAY(20);
2097 }
2098 
2099 uint32_t
rge_read_csi(struct rge_softc * sc,uint32_t reg)2100 rge_read_csi(struct rge_softc *sc, uint32_t reg)
2101 {
2102 	int i;
2103 
2104 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2105 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
2106 
2107 	for (i = 0; i < 20000; i++) {
2108 		 DELAY(1);
2109 		 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
2110 			break;
2111 	}
2112 
2113 	DELAY(20);
2114 
2115 	return (RGE_READ_4(sc, RGE_CSIDR));
2116 }
2117 
2118 void
rge_write_mac_ocp(struct rge_softc * sc,uint16_t reg,uint16_t val)2119 rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2120 {
2121 	uint32_t tmp;
2122 
2123 	tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2124 	tmp += val;
2125 	tmp |= RGE_MACOCP_BUSY;
2126 	RGE_WRITE_4(sc, RGE_MACOCP, tmp);
2127 }
2128 
2129 uint16_t
rge_read_mac_ocp(struct rge_softc * sc,uint16_t reg)2130 rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
2131 {
2132 	uint32_t val;
2133 
2134 	val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2135 	RGE_WRITE_4(sc, RGE_MACOCP, val);
2136 
2137 	return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
2138 }
2139 
2140 void
rge_write_ephy(struct rge_softc * sc,uint16_t reg,uint16_t val)2141 rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
2142 {
2143 	uint32_t tmp;
2144 	int i;
2145 
2146 	tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2147 	tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
2148 	RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
2149 
2150 	for (i = 0; i < 10; i++) {
2151 		DELAY(100);
2152 		if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
2153 			break;
2154 	}
2155 
2156 	DELAY(20);
2157 }
2158 
2159 uint16_t
rge_read_ephy(struct rge_softc * sc,uint16_t reg)2160 rge_read_ephy(struct rge_softc *sc, uint16_t reg)
2161 {
2162 	uint32_t val;
2163 	int i;
2164 
2165 	val = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2166 	RGE_WRITE_4(sc, RGE_EPHYAR, val);
2167 
2168 	for (i = 0; i < 10; i++) {
2169 		DELAY(100);
2170 		val = RGE_READ_4(sc, RGE_EPHYAR);
2171 		if (val & RGE_EPHYAR_BUSY)
2172 			break;
2173 	}
2174 
2175 	DELAY(20);
2176 
2177 	return (val & RGE_EPHYAR_DATA_MASK);
2178 }
2179 
2180 void
rge_write_phy(struct rge_softc * sc,uint16_t addr,uint16_t reg,uint16_t val)2181 rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
2182 {
2183 	uint16_t off, phyaddr;
2184 
2185 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2186 	phyaddr <<= 4;
2187 
2188 	off = addr ? reg : 0x10 + (reg % 8);
2189 
2190 	phyaddr += (off - 16) << 1;
2191 
2192 	rge_write_phy_ocp(sc, phyaddr, val);
2193 }
2194 
2195 uint16_t
rge_read_phy(struct rge_softc * sc,uint16_t addr,uint16_t reg)2196 rge_read_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg)
2197 {
2198 	uint16_t off, phyaddr;
2199 
2200 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2201 	phyaddr <<= 4;
2202 
2203 	off = addr ? reg : 0x10 + (reg % 8);
2204 
2205 	phyaddr += (off - 16) << 1;
2206 
2207 	return (rge_read_phy_ocp(sc, phyaddr));
2208 }
2209 
2210 void
rge_write_phy_ocp(struct rge_softc * sc,uint16_t reg,uint16_t val)2211 rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2212 {
2213 	uint32_t tmp;
2214 	int i;
2215 
2216 	tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2217 	tmp |= RGE_PHYOCP_BUSY | val;
2218 	RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
2219 
2220 	for (i = 0; i < RGE_TIMEOUT; i++) {
2221 		DELAY(1);
2222 		if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
2223 			break;
2224 	}
2225 }
2226 
2227 uint16_t
rge_read_phy_ocp(struct rge_softc * sc,uint16_t reg)2228 rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
2229 {
2230 	uint32_t val;
2231 	int i;
2232 
2233 	val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2234 	RGE_WRITE_4(sc, RGE_PHYOCP, val);
2235 
2236 	for (i = 0; i < RGE_TIMEOUT; i++) {
2237 		DELAY(1);
2238 		val = RGE_READ_4(sc, RGE_PHYOCP);
2239 		if (val & RGE_PHYOCP_BUSY)
2240 			break;
2241 	}
2242 
2243 	return (val & RGE_PHYOCP_DATA_MASK);
2244 }
2245 
2246 int
rge_get_link_status(struct rge_softc * sc)2247 rge_get_link_status(struct rge_softc *sc)
2248 {
2249 	return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
2250 }
2251 
2252 void
rge_txstart(void * arg)2253 rge_txstart(void *arg)
2254 {
2255 	struct rge_softc *sc = arg;
2256 
2257 	RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
2258 }
2259 
2260 void
rge_tick(void * arg)2261 rge_tick(void *arg)
2262 {
2263 	struct rge_softc *sc = arg;
2264 	int s;
2265 
2266 	s = splnet();
2267 	rge_link_state(sc);
2268 	splx(s);
2269 
2270 	timeout_add_sec(&sc->sc_timeout, 1);
2271 }
2272 
2273 void
rge_link_state(struct rge_softc * sc)2274 rge_link_state(struct rge_softc *sc)
2275 {
2276 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2277 	int link = LINK_STATE_DOWN;
2278 
2279 	if (rge_get_link_status(sc))
2280 		link = LINK_STATE_UP;
2281 
2282 	if (ifp->if_link_state != link) {
2283 		ifp->if_link_state = link;
2284 		if_link_state_change(ifp);
2285 	}
2286 }
2287 
2288 #ifndef SMALL_KERNEL
2289 int
rge_wol(struct ifnet * ifp,int enable)2290 rge_wol(struct ifnet *ifp, int enable)
2291 {
2292 	struct rge_softc *sc = ifp->if_softc;
2293 
2294 	if (enable) {
2295 		if (!(RGE_READ_1(sc, RGE_CFG1) & RGE_CFG1_PM_EN)) {
2296 			printf("%s: power management is disabled, "
2297 			    "cannot do WOL\n", sc->sc_dev.dv_xname);
2298 			return (ENOTSUP);
2299 		}
2300 
2301 	}
2302 
2303 	rge_iff(sc);
2304 
2305 	if (enable)
2306 		RGE_MAC_SETBIT(sc, 0xc0b6, 0x0001);
2307 	else
2308 		RGE_MAC_CLRBIT(sc, 0xc0b6, 0x0001);
2309 
2310 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2311 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_WOL_LANWAKE | RGE_CFG5_WOL_UCAST |
2312 	    RGE_CFG5_WOL_MCAST | RGE_CFG5_WOL_BCAST);
2313 	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_WOL_LINK | RGE_CFG3_WOL_MAGIC);
2314 	if (enable)
2315 		RGE_SETBIT_1(sc, RGE_CFG5, RGE_CFG5_WOL_LANWAKE);
2316 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2317 
2318 	return (0);
2319 }
2320 
2321 void
rge_wol_power(struct rge_softc * sc)2322 rge_wol_power(struct rge_softc *sc)
2323 {
2324 	/* Disable RXDV gate. */
2325 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
2326 	DELAY(2000);
2327 
2328 	RGE_SETBIT_1(sc, RGE_CFG1, RGE_CFG1_PM_EN);
2329 	RGE_SETBIT_1(sc, RGE_CFG2, RGE_CFG2_PMSTS_EN);
2330 }
2331 #endif
2332 
2333 #if NKSTAT > 0
2334 
2335 #define RGE_DTCCR_CMD		(1U << 3)
2336 #define RGE_DTCCR_LO		0x10
2337 #define RGE_DTCCR_HI		0x14
2338 
2339 struct rge_kstats {
2340 	struct kstat_kv		tx_ok;
2341 	struct kstat_kv		rx_ok;
2342 	struct kstat_kv		tx_er;
2343 	struct kstat_kv		rx_er;
2344 	struct kstat_kv		miss_pkt;
2345 	struct kstat_kv		fae;
2346 	struct kstat_kv		tx_1col;
2347 	struct kstat_kv		tx_mcol;
2348 	struct kstat_kv		rx_ok_phy;
2349 	struct kstat_kv		rx_ok_brd;
2350 	struct kstat_kv		rx_ok_mul;
2351 	struct kstat_kv		tx_abt;
2352 	struct kstat_kv		tx_undrn;
2353 };
2354 
2355 static const struct rge_kstats rge_kstats_tpl = {
2356 	.tx_ok =	KSTAT_KV_UNIT_INITIALIZER("TxOk",
2357 			    KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
2358 	.rx_ok =	KSTAT_KV_UNIT_INITIALIZER("RxOk",
2359 			    KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
2360 	.tx_er =	KSTAT_KV_UNIT_INITIALIZER("TxEr",
2361 			    KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
2362 	.rx_er =	KSTAT_KV_UNIT_INITIALIZER("RxEr",
2363 			    KSTAT_KV_T_COUNTER32, KSTAT_KV_U_PACKETS),
2364 	.miss_pkt =	KSTAT_KV_UNIT_INITIALIZER("MissPkt",
2365 			    KSTAT_KV_T_COUNTER16, KSTAT_KV_U_PACKETS),
2366 	.fae =		KSTAT_KV_UNIT_INITIALIZER("FAE",
2367 			    KSTAT_KV_T_COUNTER16, KSTAT_KV_U_PACKETS),
2368 	.tx_1col =	KSTAT_KV_UNIT_INITIALIZER("Tx1Col",
2369 			    KSTAT_KV_T_COUNTER32, KSTAT_KV_U_PACKETS),
2370 	.tx_mcol =	KSTAT_KV_UNIT_INITIALIZER("TxMCol",
2371 			    KSTAT_KV_T_COUNTER32, KSTAT_KV_U_PACKETS),
2372 	.rx_ok_phy =	KSTAT_KV_UNIT_INITIALIZER("RxOkPhy",
2373 			    KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
2374 	.rx_ok_brd =	KSTAT_KV_UNIT_INITIALIZER("RxOkBrd",
2375 			    KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
2376 	.rx_ok_mul =	KSTAT_KV_UNIT_INITIALIZER("RxOkMul",
2377 			    KSTAT_KV_T_COUNTER32, KSTAT_KV_U_PACKETS),
2378 	.tx_abt =	KSTAT_KV_UNIT_INITIALIZER("TxAbt",
2379 			    KSTAT_KV_T_COUNTER16, KSTAT_KV_U_PACKETS),
2380 	.tx_undrn =	KSTAT_KV_UNIT_INITIALIZER("TxUndrn",
2381 			    KSTAT_KV_T_COUNTER16, KSTAT_KV_U_PACKETS),
2382 };
2383 
2384 struct rge_kstat_softc {
2385 	struct rge_stats	*rge_ks_sc_stats;
2386 
2387 	bus_dmamap_t		 rge_ks_sc_map;
2388 	bus_dma_segment_t	 rge_ks_sc_seg;
2389 	int			 rge_ks_sc_nsegs;
2390 
2391 	struct rwlock		 rge_ks_sc_rwl;
2392 };
2393 
2394 static int
rge_kstat_read(struct kstat * ks)2395 rge_kstat_read(struct kstat *ks)
2396 {
2397 	struct rge_softc *sc = ks->ks_softc;
2398 	struct rge_kstat_softc *rge_ks_sc = ks->ks_ptr;
2399 	bus_dmamap_t map;
2400 	uint64_t cmd;
2401 	uint32_t reg;
2402 	uint8_t command;
2403 	int tmo;
2404 
2405 	command = RGE_READ_1(sc, RGE_CMD);
2406 	if (!ISSET(command, RGE_CMD_RXENB) || command == 0xff)
2407 		return (ENETDOWN);
2408 
2409 	map = rge_ks_sc->rge_ks_sc_map;
2410 	cmd = map->dm_segs[0].ds_addr | RGE_DTCCR_CMD;
2411 
2412 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2413 	    BUS_DMASYNC_PREREAD);
2414 
2415 	RGE_WRITE_4(sc, RGE_DTCCR_HI, cmd >> 32);
2416 	bus_space_barrier(sc->rge_btag, sc->rge_bhandle, RGE_DTCCR_HI, 8,
2417 	    BUS_SPACE_BARRIER_WRITE);
2418 	RGE_WRITE_4(sc, RGE_DTCCR_LO, cmd);
2419 	bus_space_barrier(sc->rge_btag, sc->rge_bhandle, RGE_DTCCR_LO, 4,
2420 	    BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE);
2421 
2422 	tmo = 1000;
2423 	do {
2424 		reg = RGE_READ_4(sc, RGE_DTCCR_LO);
2425 		if (!ISSET(reg, RGE_DTCCR_CMD))
2426 			break;
2427 
2428 		delay(10);
2429 		bus_space_barrier(sc->rge_btag, sc->rge_bhandle,
2430 		    RGE_DTCCR_LO, 4, BUS_SPACE_BARRIER_READ);
2431 	} while (--tmo);
2432 
2433 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2434 	    BUS_DMASYNC_POSTREAD);
2435 
2436 	if (ISSET(reg, RGE_DTCCR_CMD))
2437 		return (EIO);
2438 
2439 	nanouptime(&ks->ks_updated);
2440 
2441 	return (0);
2442 }
2443 
2444 static int
rge_kstat_copy(struct kstat * ks,void * dst)2445 rge_kstat_copy(struct kstat *ks, void *dst)
2446 {
2447 	struct rge_kstat_softc *rge_ks_sc = ks->ks_ptr;
2448 	struct rge_stats *rs = rge_ks_sc->rge_ks_sc_stats;
2449 	struct rge_kstats *kvs = dst;
2450 
2451 	*kvs = rge_kstats_tpl;
2452 	kstat_kv_u64(&kvs->tx_ok) = lemtoh64(&rs->rge_tx_ok);
2453 	kstat_kv_u64(&kvs->rx_ok) = lemtoh64(&rs->rge_rx_ok);
2454 	kstat_kv_u64(&kvs->tx_er) = lemtoh64(&rs->rge_tx_er);
2455 	kstat_kv_u32(&kvs->rx_er) = lemtoh32(&rs->rge_rx_er);
2456 	kstat_kv_u16(&kvs->miss_pkt) = lemtoh16(&rs->rge_miss_pkt);
2457 	kstat_kv_u16(&kvs->fae) = lemtoh16(&rs->rge_fae);
2458 	kstat_kv_u32(&kvs->tx_1col) = lemtoh32(&rs->rge_tx_1col);
2459 	kstat_kv_u32(&kvs->tx_mcol) = lemtoh32(&rs->rge_tx_mcol);
2460 	kstat_kv_u64(&kvs->rx_ok_phy) = lemtoh64(&rs->rge_rx_ok_phy);
2461 	kstat_kv_u64(&kvs->rx_ok_brd) = lemtoh64(&rs->rge_rx_ok_brd);
2462 	kstat_kv_u32(&kvs->rx_ok_mul) = lemtoh32(&rs->rge_rx_ok_mul);
2463 	kstat_kv_u16(&kvs->tx_abt) = lemtoh16(&rs->rge_tx_abt);
2464 	kstat_kv_u16(&kvs->tx_undrn) = lemtoh16(&rs->rge_tx_undrn);
2465 
2466 	return (0);
2467 }
2468 
2469 void
rge_kstat_attach(struct rge_softc * sc)2470 rge_kstat_attach(struct rge_softc *sc)
2471 {
2472 	struct rge_kstat_softc *rge_ks_sc;
2473 	struct kstat *ks;
2474 
2475 	rge_ks_sc = malloc(sizeof(*rge_ks_sc), M_DEVBUF, M_NOWAIT);
2476 	if (rge_ks_sc == NULL) {
2477 		printf("%s: cannot allocate kstat softc\n",
2478 		    sc->sc_dev.dv_xname);
2479 		return;
2480 	}
2481 
2482 	if (bus_dmamap_create(sc->sc_dmat,
2483 	    sizeof(struct rge_stats), 1, sizeof(struct rge_stats), 0,
2484 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2485 	    &rge_ks_sc->rge_ks_sc_map) != 0) {
2486 		printf("%s: cannot create counter dma memory map\n",
2487 		    sc->sc_dev.dv_xname);
2488 		goto free;
2489 	}
2490 
2491 	if (bus_dmamem_alloc(sc->sc_dmat,
2492 	    sizeof(struct rge_stats), RGE_STATS_ALIGNMENT, 0,
2493 	    &rge_ks_sc->rge_ks_sc_seg, 1, &rge_ks_sc->rge_ks_sc_nsegs,
2494 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) {
2495 		printf("%s: cannot allocate counter dma memory\n",
2496 		    sc->sc_dev.dv_xname);
2497 		goto destroy;
2498 	}
2499 
2500 	if (bus_dmamem_map(sc->sc_dmat,
2501 	    &rge_ks_sc->rge_ks_sc_seg, rge_ks_sc->rge_ks_sc_nsegs,
2502 	    sizeof(struct rge_stats), (caddr_t *)&rge_ks_sc->rge_ks_sc_stats,
2503 	    BUS_DMA_NOWAIT) != 0) {
2504 		printf("%s: cannot map counter dma memory\n",
2505 		    sc->sc_dev.dv_xname);
2506 		goto freedma;
2507 	}
2508 
2509 	if (bus_dmamap_load(sc->sc_dmat, rge_ks_sc->rge_ks_sc_map,
2510 	    (caddr_t)rge_ks_sc->rge_ks_sc_stats, sizeof(struct rge_stats),
2511 	    NULL, BUS_DMA_NOWAIT) != 0) {
2512 		printf("%s: cannot load counter dma memory\n",
2513 		    sc->sc_dev.dv_xname);
2514 		goto unmap;
2515 	}
2516 
2517 	ks = kstat_create(sc->sc_dev.dv_xname, 0, "re-stats", 0,
2518 	    KSTAT_T_KV, 0);
2519 	if (ks == NULL) {
2520 		printf("%s: cannot create re-stats kstat\n",
2521 		    sc->sc_dev.dv_xname);
2522 		goto unload;
2523 	}
2524 
2525 	ks->ks_datalen = sizeof(rge_kstats_tpl);
2526 
2527 	rw_init(&rge_ks_sc->rge_ks_sc_rwl, "rgestats");
2528 	kstat_set_wlock(ks, &rge_ks_sc->rge_ks_sc_rwl);
2529 	ks->ks_softc = sc;
2530 	ks->ks_ptr = rge_ks_sc;
2531 	ks->ks_read = rge_kstat_read;
2532 	ks->ks_copy = rge_kstat_copy;
2533 
2534 	kstat_install(ks);
2535 
2536 	sc->sc_kstat = ks;
2537 
2538 	return;
2539 
2540 unload:
2541 	bus_dmamap_unload(sc->sc_dmat, rge_ks_sc->rge_ks_sc_map);
2542 unmap:
2543 	bus_dmamem_unmap(sc->sc_dmat,
2544 	    (caddr_t)rge_ks_sc->rge_ks_sc_stats, sizeof(struct rge_stats));
2545 freedma:
2546 	bus_dmamem_free(sc->sc_dmat, &rge_ks_sc->rge_ks_sc_seg, 1);
2547 destroy:
2548 	bus_dmamap_destroy(sc->sc_dmat, rge_ks_sc->rge_ks_sc_map);
2549 free:
2550 	free(rge_ks_sc, M_DEVBUF, sizeof(*rge_ks_sc));
2551 }
2552 #endif /* NKSTAT > 0 */
2553