xref: /netbsd/sys/dev/pci/if_kse.c (revision 8de7985d)
1 /*	$NetBSD: if_kse.c,v 1.59 2022/09/24 18:12:42 thorpej Exp $	*/
2 
3 /*-
4  * Copyright (c) 2006 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Tohru Nishimura.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Micrel 8841/8842 10/100 PCI ethernet driver
34  */
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: if_kse.c,v 1.59 2022/09/24 18:12:42 thorpej Exp $");
38 
39 #include <sys/param.h>
40 #include <sys/bus.h>
41 #include <sys/intr.h>
42 #include <sys/device.h>
43 #include <sys/callout.h>
44 #include <sys/ioctl.h>
45 #include <sys/mbuf.h>
46 #include <sys/rndsource.h>
47 #include <sys/errno.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 
51 #include <net/if.h>
52 #include <net/if_media.h>
53 #include <net/if_dl.h>
54 #include <net/if_ether.h>
55 #include <dev/mii/mii.h>
56 #include <dev/mii/miivar.h>
57 #include <net/bpf.h>
58 
59 #include <dev/pci/pcivar.h>
60 #include <dev/pci/pcireg.h>
61 #include <dev/pci/pcidevs.h>
62 
63 #define KSE_LINKDEBUG 0
64 
65 #define CSR_READ_4(sc, off) \
66 	    bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (off))
67 #define CSR_WRITE_4(sc, off, val) \
68 	    bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (off), (val))
69 #define CSR_READ_2(sc, off) \
70 	    bus_space_read_2((sc)->sc_st, (sc)->sc_sh, (off))
71 #define CSR_WRITE_2(sc, off, val) \
72 	    bus_space_write_2((sc)->sc_st, (sc)->sc_sh, (off), (val))
73 
74 #define MDTXC		0x000		/* DMA transmit control */
75 #define MDRXC		0x004		/* DMA receive control */
76 #define MDTSC		0x008		/* trigger DMA transmit (SC) */
77 #define MDRSC		0x00c		/* trigger DMA receive (SC) */
78 #define TDLB		0x010		/* transmit descriptor list base */
79 #define RDLB		0x014		/* receive descriptor list base */
80 #define MTR0		0x020		/* multicast table 31:0 */
81 #define MTR1		0x024		/* multicast table 63:32 */
82 #define INTEN		0x028		/* interrupt enable */
83 #define INTST		0x02c		/* interrupt status */
84 #define MAAL0		0x080		/* additional MAC address 0 low */
85 #define MAAH0		0x084		/* additional MAC address 0 high */
86 #define MARL		0x200		/* MAC address low */
87 #define MARM		0x202		/* MAC address middle */
88 #define MARH		0x204		/* MAC address high */
89 #define GRR		0x216		/* global reset */
90 #define SIDER		0x400		/* switch ID and function enable */
91 #define SGCR3		0x406		/* switch function control 3 */
92 #define  CR3_USEHDX	(1U<<6)		/* use half-duplex 8842 host port */
93 #define  CR3_USEFC	(1U<<5) 	/* use flowcontrol 8842 host port */
94 #define IACR		0x4a0		/* indirect access control */
95 #define IADR1		0x4a2		/* indirect access data 66:63 */
96 #define IADR2		0x4a4		/* indirect access data 47:32 */
97 #define IADR3		0x4a6		/* indirect access data 63:48 */
98 #define IADR4		0x4a8		/* indirect access data 15:0 */
99 #define IADR5		0x4aa		/* indirect access data 31:16 */
100 #define  IADR_LATCH	(1U<<30)	/* latch completed indication */
101 #define  IADR_OVF	(1U<<31)	/* overflow detected */
102 #define P1CR4		0x512		/* port 1 control 4 */
103 #define P1SR		0x514		/* port 1 status */
104 #define P2CR4		0x532		/* port 2 control 4 */
105 #define P2SR		0x534		/* port 2 status */
106 #define  PxCR_STARTNEG	(1U<<9)		/* restart auto negotiation */
107 #define  PxCR_AUTOEN	(1U<<7)		/* auto negotiation enable */
108 #define  PxCR_SPD100	(1U<<6)		/* force speed 100 */
109 #define  PxCR_USEFDX	(1U<<5)		/* force full duplex */
110 #define  PxCR_USEFC	(1U<<4)		/* advertise pause flow control */
111 #define  PxSR_ACOMP	(1U<<6)		/* auto negotiation completed */
112 #define  PxSR_SPD100	(1U<<10)	/* speed is 100Mbps */
113 #define  PxSR_FDX	(1U<<9)		/* full duplex */
114 #define  PxSR_LINKUP	(1U<<5)		/* link is good */
115 #define  PxSR_RXFLOW	(1U<<12)	/* receive flow control active */
116 #define  PxSR_TXFLOW	(1U<<11)	/* transmit flow control active */
117 #define P1VIDCR		0x504		/* port 1 vtag */
118 #define P2VIDCR		0x524		/* port 2 vtag */
119 #define P3VIDCR		0x544		/* 8842 host vtag */
120 #define EVCNTBR		0x1c00		/* 3 sets of 34 event counters */
121 
122 #define TXC_BS_MSK	0x3f000000	/* burst size */
123 #define TXC_BS_SFT	(24)		/* 1,2,4,8,16,32 or 0 for unlimited */
124 #define TXC_UCG		(1U<<18)	/* generate UDP checksum */
125 #define TXC_TCG		(1U<<17)	/* generate TCP checksum */
126 #define TXC_ICG		(1U<<16)	/* generate IP checksum */
127 #define TXC_FCE		(1U<<9)		/* generate PAUSE to moderate Rx lvl */
128 #define TXC_EP		(1U<<2)		/* enable automatic padding */
129 #define TXC_AC		(1U<<1)		/* add CRC to frame */
130 #define TXC_TEN		(1)		/* enable DMA to run */
131 
132 #define RXC_BS_MSK	0x3f000000	/* burst size */
133 #define RXC_BS_SFT	(24)		/* 1,2,4,8,16,32 or 0 for unlimited */
134 #define RXC_IHAE	(1U<<19)	/* IP header alignment enable */
135 #define RXC_UCC		(1U<<18)	/* run UDP checksum */
136 #define RXC_TCC		(1U<<17)	/* run TDP checksum */
137 #define RXC_ICC		(1U<<16)	/* run IP checksum */
138 #define RXC_FCE		(1U<<9)		/* accept PAUSE to throttle Tx */
139 #define RXC_RB		(1U<<6)		/* receive broadcast frame */
140 #define RXC_RM		(1U<<5)		/* receive all multicast (inc. RB) */
141 #define RXC_RU		(1U<<4)		/* receive 16 additional unicasts */
142 #define RXC_RE		(1U<<3)		/* accept error frame */
143 #define RXC_RA		(1U<<2)		/* receive all frame */
144 #define RXC_MHTE	(1U<<1)		/* use multicast hash table */
145 #define RXC_REN		(1)		/* enable DMA to run */
146 
147 #define INT_DMLCS	(1U<<31)	/* link status change */
148 #define INT_DMTS	(1U<<30)	/* sending desc. has posted Tx done */
149 #define INT_DMRS	(1U<<29)	/* frame was received */
150 #define INT_DMRBUS	(1U<<27)	/* Rx descriptor pool is full */
151 #define INT_DMxPSS	(3U<<25)	/* 26:25 DMA Tx/Rx have stopped */
152 
153 struct tdes {
154 	uint32_t t0, t1, t2, t3;
155 };
156 
157 struct rdes {
158 	uint32_t r0, r1, r2, r3;
159 };
160 
161 #define T0_OWN		(1U<<31)	/* desc is ready to Tx */
162 
163 #define R0_OWN		(1U<<31)	/* desc is empty */
164 #define R0_FS		(1U<<30)	/* first segment of frame */
165 #define R0_LS		(1U<<29)	/* last segment of frame */
166 #define R0_IPE		(1U<<28)	/* IP checksum error */
167 #define R0_TCPE		(1U<<27)	/* TCP checksum error */
168 #define R0_UDPE		(1U<<26)	/* UDP checksum error */
169 #define R0_ES		(1U<<25)	/* error summary */
170 #define R0_MF		(1U<<24)	/* multicast frame */
171 #define R0_SPN		0x00300000	/* 21:20 switch port 1/2 */
172 #define R0_ALIGN	0x00300000	/* 21:20 (KSZ8692P) Rx align amount */
173 #define R0_RE		(1U<<19)	/* MII reported error */
174 #define R0_TL		(1U<<18)	/* frame too long, beyond 1518 */
175 #define R0_RF		(1U<<17)	/* damaged runt frame */
176 #define R0_CE		(1U<<16)	/* CRC error */
177 #define R0_FT		(1U<<15)	/* frame type */
178 #define R0_FL_MASK	0x7ff		/* frame length 10:0 */
179 
180 #define T1_IC		(1U<<31)	/* post interrupt on complete */
181 #define T1_FS		(1U<<30)	/* first segment of frame */
182 #define T1_LS		(1U<<29)	/* last segment of frame */
183 #define T1_IPCKG	(1U<<28)	/* generate IP checksum */
184 #define T1_TCPCKG	(1U<<27)	/* generate TCP checksum */
185 #define T1_UDPCKG	(1U<<26)	/* generate UDP checksum */
186 #define T1_TER		(1U<<25)	/* end of ring */
187 #define T1_SPN		0x00300000	/* 21:20 switch port 1/2 */
188 #define T1_TBS_MASK	0x7ff		/* segment size 10:0 */
189 
190 #define R1_RER		(1U<<25)	/* end of ring */
191 #define R1_RBS_MASK	0x7fc		/* segment size 10:0 */
192 
193 #define KSE_NTXSEGS		16
194 #define KSE_TXQUEUELEN		64
195 #define KSE_TXQUEUELEN_MASK	(KSE_TXQUEUELEN - 1)
196 #define KSE_TXQUEUE_GC		(KSE_TXQUEUELEN / 4)
197 #define KSE_NTXDESC		256
198 #define KSE_NTXDESC_MASK	(KSE_NTXDESC - 1)
199 #define KSE_NEXTTX(x)		(((x) + 1) & KSE_NTXDESC_MASK)
200 #define KSE_NEXTTXS(x)		(((x) + 1) & KSE_TXQUEUELEN_MASK)
201 
202 #define KSE_NRXDESC		64
203 #define KSE_NRXDESC_MASK	(KSE_NRXDESC - 1)
204 #define KSE_NEXTRX(x)		(((x) + 1) & KSE_NRXDESC_MASK)
205 
206 struct kse_control_data {
207 	struct tdes kcd_txdescs[KSE_NTXDESC];
208 	struct rdes kcd_rxdescs[KSE_NRXDESC];
209 };
210 #define KSE_CDOFF(x)		offsetof(struct kse_control_data, x)
211 #define KSE_CDTXOFF(x)		KSE_CDOFF(kcd_txdescs[(x)])
212 #define KSE_CDRXOFF(x)		KSE_CDOFF(kcd_rxdescs[(x)])
213 
214 struct kse_txsoft {
215 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
216 	bus_dmamap_t txs_dmamap;	/* our DMA map */
217 	int txs_firstdesc;		/* first descriptor in packet */
218 	int txs_lastdesc;		/* last descriptor in packet */
219 	int txs_ndesc;			/* # of descriptors used */
220 };
221 
222 struct kse_rxsoft {
223 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
224 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
225 };
226 
227 struct kse_softc {
228 	device_t sc_dev;		/* generic device information */
229 	bus_space_tag_t sc_st;		/* bus space tag */
230 	bus_space_handle_t sc_sh;	/* bus space handle */
231 	bus_size_t sc_memsize;		/* csr map size */
232 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
233 	pci_chipset_tag_t sc_pc;	/* PCI chipset tag */
234 	struct ethercom sc_ethercom;	/* Ethernet common data */
235 	void *sc_ih;			/* interrupt cookie */
236 
237 	struct mii_data sc_mii;		/* mii 8841 */
238 	struct ifmedia sc_media;	/* ifmedia 8842 */
239 	int sc_flowflags;		/* 802.3x PAUSE flow control */
240 
241 	callout_t  sc_tick_ch;		/* MII tick callout */
242 
243 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
244 #define sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
245 
246 	struct kse_control_data *sc_control_data;
247 #define sc_txdescs	sc_control_data->kcd_txdescs
248 #define sc_rxdescs	sc_control_data->kcd_rxdescs
249 
250 	struct kse_txsoft sc_txsoft[KSE_TXQUEUELEN];
251 	struct kse_rxsoft sc_rxsoft[KSE_NRXDESC];
252 	int sc_txfree;			/* number of free Tx descriptors */
253 	int sc_txnext;			/* next ready Tx descriptor */
254 	int sc_txsfree;			/* number of free Tx jobs */
255 	int sc_txsnext;			/* next ready Tx job */
256 	int sc_txsdirty;		/* dirty Tx jobs */
257 	int sc_rxptr;			/* next ready Rx descriptor/descsoft */
258 
259 	uint32_t sc_txc, sc_rxc;
260 	uint32_t sc_t1csum;
261 	int sc_mcsum;
262 	uint32_t sc_inten;
263 	uint32_t sc_chip;
264 
265 	krndsource_t rnd_source;	/* random source */
266 
267 #ifdef KSE_EVENT_COUNTERS
268 	struct ksext {
269 		char evcntname[3][8];
270 		struct evcnt pev[3][34];
271 	} sc_ext;			/* switch statistics */
272 #endif
273 };
274 
275 #define KSE_CDTXADDR(sc, x)	((sc)->sc_cddma + KSE_CDTXOFF((x)))
276 #define KSE_CDRXADDR(sc, x)	((sc)->sc_cddma + KSE_CDRXOFF((x)))
277 
278 #define KSE_CDTXSYNC(sc, x, n, ops)					\
279 do {									\
280 	int __x, __n;							\
281 									\
282 	__x = (x);							\
283 	__n = (n);							\
284 									\
285 	/* If it will wrap around, sync to the end of the ring. */	\
286 	if ((__x + __n) > KSE_NTXDESC) {				\
287 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
288 		    KSE_CDTXOFF(__x), sizeof(struct tdes) *		\
289 		    (KSE_NTXDESC - __x), (ops));			\
290 		__n -= (KSE_NTXDESC - __x);				\
291 		__x = 0;						\
292 	}								\
293 									\
294 	/* Now sync whatever is left. */				\
295 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
296 	    KSE_CDTXOFF(__x), sizeof(struct tdes) * __n, (ops));	\
297 } while (/*CONSTCOND*/0)
298 
299 #define KSE_CDRXSYNC(sc, x, ops)					\
300 do {									\
301 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
302 	    KSE_CDRXOFF((x)), sizeof(struct rdes), (ops));		\
303 } while (/*CONSTCOND*/0)
304 
305 #define KSE_INIT_RXDESC(sc, x)						\
306 do {									\
307 	struct kse_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
308 	struct rdes *__rxd = &(sc)->sc_rxdescs[(x)];			\
309 	struct mbuf *__m = __rxs->rxs_mbuf;				\
310 									\
311 	__m->m_data = __m->m_ext.ext_buf;				\
312 	__rxd->r2 = __rxs->rxs_dmamap->dm_segs[0].ds_addr;		\
313 	__rxd->r1 = R1_RBS_MASK /* __m->m_ext.ext_size */;		\
314 	__rxd->r0 = R0_OWN;						\
315 	KSE_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); \
316 } while (/*CONSTCOND*/0)
317 
318 u_int kse_burstsize = 8;	/* DMA burst length tuning knob */
319 
320 #ifdef KSEDIAGNOSTIC
321 u_int kse_monitor_rxintr;	/* fragmented UDP csum HW bug hook */
322 #endif
323 
324 static int kse_match(device_t, cfdata_t, void *);
325 static void kse_attach(device_t, device_t, void *);
326 
327 CFATTACH_DECL_NEW(kse, sizeof(struct kse_softc),
328     kse_match, kse_attach, NULL, NULL);
329 
330 static int kse_ioctl(struct ifnet *, u_long, void *);
331 static void kse_start(struct ifnet *);
332 static void kse_watchdog(struct ifnet *);
333 static int kse_init(struct ifnet *);
334 static void kse_stop(struct ifnet *, int);
335 static void kse_reset(struct kse_softc *);
336 static void kse_set_rcvfilt(struct kse_softc *);
337 static int add_rxbuf(struct kse_softc *, int);
338 static void rxdrain(struct kse_softc *);
339 static int kse_intr(void *);
340 static void rxintr(struct kse_softc *);
341 static void txreap(struct kse_softc *);
342 static void lnkchg(struct kse_softc *);
343 static int kse_ifmedia_upd(struct ifnet *);
344 static void kse_ifmedia_sts(struct ifnet *, struct ifmediareq *);
345 static void nopifmedia_sts(struct ifnet *, struct ifmediareq *);
346 static void phy_tick(void *);
347 int kse_mii_readreg(device_t, int, int, uint16_t *);
348 int kse_mii_writereg(device_t, int, int, uint16_t);
349 void kse_mii_statchg(struct ifnet *);
350 #ifdef KSE_EVENT_COUNTERS
351 static void stat_tick(void *);
352 static void zerostats(struct kse_softc *);
353 #endif
354 
355 static const struct device_compatible_entry compat_data[] = {
356 	{ .id = PCI_ID_CODE(PCI_VENDOR_MICREL,
357 		PCI_PRODUCT_MICREL_KSZ8842) },
358 	{ .id = PCI_ID_CODE(PCI_VENDOR_MICREL,
359 		PCI_PRODUCT_MICREL_KSZ8841) },
360 
361 	PCI_COMPAT_EOL
362 };
363 
364 static int
kse_match(device_t parent,cfdata_t match,void * aux)365 kse_match(device_t parent, cfdata_t match, void *aux)
366 {
367 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
368 
369 	return PCI_CLASS(pa->pa_class) == PCI_CLASS_NETWORK &&
370 	       pci_compatible_match(pa, compat_data);
371 }
372 
373 static void
kse_attach(device_t parent,device_t self,void * aux)374 kse_attach(device_t parent, device_t self, void *aux)
375 {
376 	struct kse_softc *sc = device_private(self);
377 	struct pci_attach_args *pa = aux;
378 	pci_chipset_tag_t pc = pa->pa_pc;
379 	pci_intr_handle_t ih;
380 	const char *intrstr;
381 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
382 	struct mii_data * const mii = &sc->sc_mii;
383 	struct ifmedia *ifm;
384 	uint8_t enaddr[ETHER_ADDR_LEN];
385 	bus_dma_segment_t seg;
386 	int i, error, nseg;
387 	char intrbuf[PCI_INTRSTR_LEN];
388 
389 	aprint_normal(": Micrel KSZ%04x Ethernet (rev. 0x%02x)\n",
390 	    PCI_PRODUCT(pa->pa_id), PCI_REVISION(pa->pa_class));
391 
392 	if (pci_mapreg_map(pa, 0x10,
393 	    PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT,
394 	    0, &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_memsize) != 0) {
395 		aprint_error_dev(self, "unable to map device registers\n");
396 		return;
397 	}
398 
399 	/* Make sure bus mastering is enabled. */
400 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
401 	    pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) |
402 	    PCI_COMMAND_MASTER_ENABLE);
403 
404 	/* Power up chip if necessary. */
405 	if ((error = pci_activate(pc, pa->pa_tag, self, NULL))
406 	    && error != EOPNOTSUPP) {
407 		aprint_error_dev(self, "cannot activate %d\n", error);
408 		return;
409 	}
410 
411 	/* Map and establish our interrupt. */
412 	if (pci_intr_map(pa, &ih)) {
413 		aprint_error_dev(self, "unable to map interrupt\n");
414 		goto fail;
415 	}
416 	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
417 	sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_NET, kse_intr, sc,
418 	    device_xname(self));
419 	if (sc->sc_ih == NULL) {
420 		aprint_error_dev(self, "unable to establish interrupt");
421 		if (intrstr != NULL)
422 			aprint_error(" at %s", intrstr);
423 		aprint_error("\n");
424 		goto fail;
425 	}
426 	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
427 
428 	sc->sc_dev = self;
429 	sc->sc_dmat = pa->pa_dmat;
430 	sc->sc_pc = pa->pa_pc;
431 	sc->sc_chip = PCI_PRODUCT(pa->pa_id);
432 
433 	/*
434 	 * Read the Ethernet address from the EEPROM.
435 	 */
436 	i = CSR_READ_2(sc, MARL);
437 	enaddr[5] = i;
438 	enaddr[4] = i >> 8;
439 	i = CSR_READ_2(sc, MARM);
440 	enaddr[3] = i;
441 	enaddr[2] = i >> 8;
442 	i = CSR_READ_2(sc, MARH);
443 	enaddr[1] = i;
444 	enaddr[0] = i >> 8;
445 	aprint_normal_dev(self,
446 	    "Ethernet address %s\n", ether_sprintf(enaddr));
447 
448 	/*
449 	 * Enable chip function.
450 	 */
451 	CSR_WRITE_2(sc, SIDER, 1);
452 
453 	/*
454 	 * Allocate the control data structures, and create and load the
455 	 * DMA map for it.
456 	 */
457 	error = bus_dmamem_alloc(sc->sc_dmat,
458 	    sizeof(struct kse_control_data), PAGE_SIZE, 0, &seg, 1, &nseg, 0);
459 	if (error != 0) {
460 		aprint_error_dev(self,
461 		    "unable to allocate control data, error = %d\n", error);
462 		goto fail_0;
463 	}
464 	error = bus_dmamem_map(sc->sc_dmat, &seg, nseg,
465 	    sizeof(struct kse_control_data), (void **)&sc->sc_control_data,
466 	    BUS_DMA_COHERENT);
467 	if (error != 0) {
468 		aprint_error_dev(self,
469 		    "unable to map control data, error = %d\n", error);
470 		goto fail_1;
471 	}
472 	error = bus_dmamap_create(sc->sc_dmat,
473 	    sizeof(struct kse_control_data), 1,
474 	    sizeof(struct kse_control_data), 0, 0, &sc->sc_cddmamap);
475 	if (error != 0) {
476 		aprint_error_dev(self,
477 		    "unable to create control data DMA map, "
478 		    "error = %d\n", error);
479 		goto fail_2;
480 	}
481 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
482 	    sc->sc_control_data, sizeof(struct kse_control_data), NULL, 0);
483 	if (error != 0) {
484 		aprint_error_dev(self,
485 		    "unable to load control data DMA map, error = %d\n",
486 		    error);
487 		goto fail_3;
488 	}
489 	for (i = 0; i < KSE_TXQUEUELEN; i++) {
490 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
491 		    KSE_NTXSEGS, MCLBYTES, 0, 0,
492 		    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
493 			aprint_error_dev(self,
494 			    "unable to create tx DMA map %d, error = %d\n",
495 			    i, error);
496 			goto fail_4;
497 		}
498 	}
499 	for (i = 0; i < KSE_NRXDESC; i++) {
500 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
501 		    1, MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
502 			aprint_error_dev(self,
503 			    "unable to create rx DMA map %d, error = %d\n",
504 			    i, error);
505 			goto fail_5;
506 		}
507 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
508 	}
509 
510 	mii->mii_ifp = ifp;
511 	mii->mii_readreg = kse_mii_readreg;
512 	mii->mii_writereg = kse_mii_writereg;
513 	mii->mii_statchg = kse_mii_statchg;
514 
515 	/* Initialize ifmedia structures. */
516 	if (sc->sc_chip == 0x8841) {
517 		/* use port 1 builtin PHY as index 1 device */
518 		sc->sc_ethercom.ec_mii = mii;
519 		ifm = &mii->mii_media;
520 		ifmedia_init(ifm, 0, kse_ifmedia_upd, kse_ifmedia_sts);
521 		mii_attach(sc->sc_dev, mii, 0xffffffff, 1 /* PHY1 */,
522 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
523 		if (LIST_FIRST(&mii->mii_phys) == NULL) {
524 			ifmedia_add(ifm, IFM_ETHER | IFM_NONE, 0, NULL);
525 			ifmedia_set(ifm, IFM_ETHER | IFM_NONE);
526 		} else
527 			ifmedia_set(ifm, IFM_ETHER | IFM_AUTO);
528 	} else {
529 		/*
530 		 * pretend 100FDX w/ no alternative media selection.
531 		 * 8842 MAC is tied with a builtin 3 port switch. It can do
532 		 * 4 degree priotised rate control over either of tx/rx
533 		 * direction for any of ports, respectively. Tough, this
534 		 * driver leaves the rate unlimited intending 100Mbps maximum.
535 		 * 2 external ports behave in AN mode and this driver provides
536 		 * no mean to manipulate and see their operational details.
537 		 */
538 		sc->sc_ethercom.ec_ifmedia = ifm = &sc->sc_media;
539 		ifmedia_init(ifm, 0, NULL, nopifmedia_sts);
540 		ifmedia_add(ifm, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
541 		ifmedia_set(ifm, IFM_ETHER | IFM_100_TX | IFM_FDX);
542 
543 		aprint_normal_dev(self,
544 		    "10baseT, 10baseT-FDX, 100baseTX, 100baseTX-FDX, auto\n");
545 	}
546 	ifm->ifm_media = ifm->ifm_cur->ifm_media; /* as if user has requested */
547 
548 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
549 	ifp->if_softc = sc;
550 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
551 	ifp->if_ioctl = kse_ioctl;
552 	ifp->if_start = kse_start;
553 	ifp->if_watchdog = kse_watchdog;
554 	ifp->if_init = kse_init;
555 	ifp->if_stop = kse_stop;
556 	IFQ_SET_READY(&ifp->if_snd);
557 
558 	/*
559 	 * capable of 802.1Q VLAN-sized frames and hw assisted tagging.
560 	 * can do IPv4, TCPv4, and UDPv4 checksums in hardware.
561 	 */
562 	sc->sc_ethercom.ec_capabilities = ETHERCAP_VLAN_MTU;
563 	ifp->if_capabilities =
564 	    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
565 	    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
566 	    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
567 
568 	sc->sc_flowflags = 0;
569 
570 	if_attach(ifp);
571 	if_deferred_start_init(ifp, NULL);
572 	ether_ifattach(ifp, enaddr);
573 
574 	callout_init(&sc->sc_tick_ch, 0);
575 	callout_setfunc(&sc->sc_tick_ch, phy_tick, sc);
576 
577 	rnd_attach_source(&sc->rnd_source, device_xname(self),
578 	    RND_TYPE_NET, RND_FLAG_DEFAULT);
579 
580 #ifdef KSE_EVENT_COUNTERS
581 	const char *events[34] = {
582 		"RxLoPriotyByte",
583 		"RxHiPriotyByte",
584 		"RxUndersizePkt",
585 		"RxFragments",
586 		"RxOversize",
587 		"RxJabbers",
588 		"RxSymbolError",
589 		"RxCRCError",
590 		"RxAlignmentError",
591 		"RxControl8808Pkts",
592 		"RxPausePkts",
593 		"RxBroadcast",
594 		"RxMulticast",
595 		"RxUnicast",
596 		"Rx64Octets",
597 		"Rx65To127Octets",
598 		"Rx128To255Octets",
599 		"Rx255To511Octets",
600 		"Rx512To1023Octets",
601 		"Rx1024To1522Octets",
602 		"TxLoPriotyByte",
603 		"TxHiPriotyByte",
604 		"TxLateCollision",
605 		"TxPausePkts",
606 		"TxBroadcastPkts",
607 		"TxMulticastPkts",
608 		"TxUnicastPkts",
609 		"TxDeferred",
610 		"TxTotalCollision",
611 		"TxExcessiveCollision",
612 		"TxSingleCollision",
613 		"TxMultipleCollision",
614 		"TxDropPkts",
615 		"RxDropPkts",
616 	};
617 	struct ksext *ee = &sc->sc_ext;
618 	int p = (sc->sc_chip == 0x8842) ? 3 : 1;
619 	for (i = 0; i < p; i++) {
620 		snprintf(ee->evcntname[i], sizeof(ee->evcntname[i]),
621 		    "%s.%d", device_xname(sc->sc_dev), i+1);
622 		for (int ev = 0; ev < 34; ev++) {
623 			evcnt_attach_dynamic(&ee->pev[i][ev], EVCNT_TYPE_MISC,
624 			    NULL, ee->evcntname[i], events[ev]);
625 		}
626 	}
627 #endif
628 	return;
629 
630  fail_5:
631 	for (i = 0; i < KSE_NRXDESC; i++) {
632 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
633 			bus_dmamap_destroy(sc->sc_dmat,
634 			    sc->sc_rxsoft[i].rxs_dmamap);
635 	}
636  fail_4:
637 	for (i = 0; i < KSE_TXQUEUELEN; i++) {
638 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
639 			bus_dmamap_destroy(sc->sc_dmat,
640 			    sc->sc_txsoft[i].txs_dmamap);
641 	}
642 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
643  fail_3:
644 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
645  fail_2:
646 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
647 	    sizeof(struct kse_control_data));
648  fail_1:
649 	bus_dmamem_free(sc->sc_dmat, &seg, nseg);
650  fail_0:
651 	pci_intr_disestablish(pc, sc->sc_ih);
652  fail:
653 	bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_memsize);
654 	return;
655 }
656 
657 static int
kse_ioctl(struct ifnet * ifp,u_long cmd,void * data)658 kse_ioctl(struct ifnet *ifp, u_long cmd, void *data)
659 {
660 	struct kse_softc *sc = ifp->if_softc;
661 	struct ifreq *ifr = (struct ifreq *)data;
662 	struct ifmedia *ifm;
663 	int s, error;
664 
665 	s = splnet();
666 
667 	switch (cmd) {
668 	case SIOCSIFMEDIA:
669 		/* Flow control requires full-duplex mode. */
670 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
671 		    (ifr->ifr_media & IFM_FDX) == 0)
672 			ifr->ifr_media &= ~IFM_ETH_FMASK;
673 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
674 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
675 				/* We can do both TXPAUSE and RXPAUSE. */
676 				ifr->ifr_media |=
677 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
678 			}
679 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
680 		}
681 		ifm = (sc->sc_chip == 0x8841)
682 		    ? &sc->sc_mii.mii_media : &sc->sc_media;
683 		error = ifmedia_ioctl(ifp, ifr, ifm, cmd);
684 		break;
685 	default:
686 		error = ether_ioctl(ifp, cmd, data);
687 		if (error != ENETRESET)
688 			break;
689 		error = 0;
690 		if (cmd == SIOCSIFCAP)
691 			error = if_init(ifp);
692 		if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
693 			;
694 		else if (ifp->if_flags & IFF_RUNNING) {
695 			/*
696 			 * Multicast list has changed; set the hardware filter
697 			 * accordingly.
698 			 */
699 			kse_set_rcvfilt(sc);
700 		}
701 		break;
702 	}
703 
704 	splx(s);
705 
706 	return error;
707 }
708 
709 static int
kse_init(struct ifnet * ifp)710 kse_init(struct ifnet *ifp)
711 {
712 	struct kse_softc *sc = ifp->if_softc;
713 	uint32_t paddr;
714 	int i, error = 0;
715 
716 	/* cancel pending I/O */
717 	kse_stop(ifp, 0);
718 
719 	/* reset all registers but PCI configuration */
720 	kse_reset(sc);
721 
722 	/* craft Tx descriptor ring */
723 	memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
724 	for (i = 0, paddr = KSE_CDTXADDR(sc, 1); i < KSE_NTXDESC - 1; i++) {
725 		sc->sc_txdescs[i].t3 = paddr;
726 		paddr += sizeof(struct tdes);
727 	}
728 	sc->sc_txdescs[KSE_NTXDESC - 1].t3 = KSE_CDTXADDR(sc, 0);
729 	KSE_CDTXSYNC(sc, 0, KSE_NTXDESC,
730 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
731 	sc->sc_txfree = KSE_NTXDESC;
732 	sc->sc_txnext = 0;
733 
734 	for (i = 0; i < KSE_TXQUEUELEN; i++)
735 		sc->sc_txsoft[i].txs_mbuf = NULL;
736 	sc->sc_txsfree = KSE_TXQUEUELEN;
737 	sc->sc_txsnext = 0;
738 	sc->sc_txsdirty = 0;
739 
740 	/* craft Rx descriptor ring */
741 	memset(sc->sc_rxdescs, 0, sizeof(sc->sc_rxdescs));
742 	for (i = 0, paddr = KSE_CDRXADDR(sc, 1); i < KSE_NRXDESC - 1; i++) {
743 		sc->sc_rxdescs[i].r3 = paddr;
744 		paddr += sizeof(struct rdes);
745 	}
746 	sc->sc_rxdescs[KSE_NRXDESC - 1].r3 = KSE_CDRXADDR(sc, 0);
747 	for (i = 0; i < KSE_NRXDESC; i++) {
748 		if (sc->sc_rxsoft[i].rxs_mbuf == NULL) {
749 			if ((error = add_rxbuf(sc, i)) != 0) {
750 				aprint_error_dev(sc->sc_dev,
751 				    "unable to allocate or map rx "
752 				    "buffer %d, error = %d\n",
753 				    i, error);
754 				rxdrain(sc);
755 				goto out;
756 			}
757 		}
758 		else
759 			KSE_INIT_RXDESC(sc, i);
760 	}
761 	sc->sc_rxptr = 0;
762 
763 	/* hand Tx/Rx rings to HW */
764 	CSR_WRITE_4(sc, TDLB, KSE_CDTXADDR(sc, 0));
765 	CSR_WRITE_4(sc, RDLB, KSE_CDRXADDR(sc, 0));
766 
767 	sc->sc_txc = TXC_TEN | TXC_EP | TXC_AC;
768 	sc->sc_rxc = RXC_REN | RXC_RU | RXC_RB;
769 	sc->sc_t1csum = sc->sc_mcsum = 0;
770 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) {
771 		sc->sc_rxc |= RXC_ICC;
772 		sc->sc_mcsum |= M_CSUM_IPv4;
773 	}
774 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Tx) {
775 		sc->sc_txc |= TXC_ICG;
776 		sc->sc_t1csum |= T1_IPCKG;
777 	}
778 	if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Rx) {
779 		sc->sc_rxc |= RXC_TCC;
780 		sc->sc_mcsum |= M_CSUM_TCPv4;
781 	}
782 	if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Tx) {
783 		sc->sc_txc |= TXC_TCG;
784 		sc->sc_t1csum |= T1_TCPCKG;
785 	}
786 	if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Rx) {
787 		sc->sc_rxc |= RXC_UCC;
788 		sc->sc_mcsum |= M_CSUM_UDPv4;
789 	}
790 	if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Tx) {
791 		sc->sc_txc |= TXC_UCG;
792 		sc->sc_t1csum |= T1_UDPCKG;
793 	}
794 	sc->sc_txc |= (kse_burstsize << TXC_BS_SFT);
795 	sc->sc_rxc |= (kse_burstsize << RXC_BS_SFT);
796 
797 	if (sc->sc_chip == 0x8842) {
798 		/* make PAUSE flow control to run */
799 		sc->sc_txc |= TXC_FCE;
800 		sc->sc_rxc |= RXC_FCE;
801 		i = CSR_READ_2(sc, SGCR3);
802 		CSR_WRITE_2(sc, SGCR3, i | CR3_USEFC);
803 	}
804 
805 	/* accept multicast frame or run promisc mode */
806 	kse_set_rcvfilt(sc);
807 
808 	/* set current media */
809 	if (sc->sc_chip == 0x8841)
810 		(void)kse_ifmedia_upd(ifp);
811 
812 	/* enable transmitter and receiver */
813 	CSR_WRITE_4(sc, MDTXC, sc->sc_txc);
814 	CSR_WRITE_4(sc, MDRXC, sc->sc_rxc);
815 	CSR_WRITE_4(sc, MDRSC, 1);
816 
817 	/* enable interrupts */
818 	sc->sc_inten = INT_DMTS | INT_DMRS | INT_DMRBUS;
819 	if (sc->sc_chip == 0x8841)
820 		sc->sc_inten |= INT_DMLCS;
821 	CSR_WRITE_4(sc, INTST, ~0);
822 	CSR_WRITE_4(sc, INTEN, sc->sc_inten);
823 
824 	ifp->if_flags |= IFF_RUNNING;
825 	ifp->if_flags &= ~IFF_OACTIVE;
826 
827 	/* start one second timer */
828 	callout_schedule(&sc->sc_tick_ch, hz);
829 
830 #ifdef KSE_EVENT_COUNTERS
831 	zerostats(sc);
832 #endif
833 
834  out:
835 	if (error) {
836 		ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
837 		ifp->if_timer = 0;
838 		aprint_error_dev(sc->sc_dev, "interface not running\n");
839 	}
840 	return error;
841 }
842 
843 static void
kse_stop(struct ifnet * ifp,int disable)844 kse_stop(struct ifnet *ifp, int disable)
845 {
846 	struct kse_softc *sc = ifp->if_softc;
847 	struct kse_txsoft *txs;
848 	int i;
849 
850 	callout_stop(&sc->sc_tick_ch);
851 
852 	sc->sc_txc &= ~TXC_TEN;
853 	sc->sc_rxc &= ~RXC_REN;
854 	CSR_WRITE_4(sc, MDTXC, sc->sc_txc);
855 	CSR_WRITE_4(sc, MDRXC, sc->sc_rxc);
856 
857 	for (i = 0; i < KSE_TXQUEUELEN; i++) {
858 		txs = &sc->sc_txsoft[i];
859 		if (txs->txs_mbuf != NULL) {
860 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
861 			m_freem(txs->txs_mbuf);
862 			txs->txs_mbuf = NULL;
863 		}
864 	}
865 
866 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
867 	ifp->if_timer = 0;
868 
869 	if (disable)
870 		rxdrain(sc);
871 }
872 
873 static void
kse_reset(struct kse_softc * sc)874 kse_reset(struct kse_softc *sc)
875 {
876 
877 	/* software reset */
878 	CSR_WRITE_2(sc, GRR, 1);
879 	delay(1000); /* PDF does not mention the delay amount */
880 	CSR_WRITE_2(sc, GRR, 0);
881 
882 	/* enable switch function */
883 	CSR_WRITE_2(sc, SIDER, 1);
884 }
885 
886 static void
kse_watchdog(struct ifnet * ifp)887 kse_watchdog(struct ifnet *ifp)
888 {
889 	struct kse_softc *sc = ifp->if_softc;
890 
891 	/*
892 	 * Since we're not interrupting every packet, sweep
893 	 * up before we report an error.
894 	 */
895 	txreap(sc);
896 
897 	if (sc->sc_txfree != KSE_NTXDESC) {
898 		aprint_error_dev(sc->sc_dev,
899 		    "device timeout (txfree %d txsfree %d txnext %d)\n",
900 		    sc->sc_txfree, sc->sc_txsfree, sc->sc_txnext);
901 		if_statinc(ifp, if_oerrors);
902 
903 		/* Reset the interface. */
904 		kse_init(ifp);
905 	}
906 	else if (ifp->if_flags & IFF_DEBUG)
907 		aprint_error_dev(sc->sc_dev, "recovered from device timeout\n");
908 
909 	/* Try to get more packets going. */
910 	kse_start(ifp);
911 }
912 
913 static void
kse_start(struct ifnet * ifp)914 kse_start(struct ifnet *ifp)
915 {
916 	struct kse_softc *sc = ifp->if_softc;
917 	struct mbuf *m0, *m;
918 	struct kse_txsoft *txs;
919 	bus_dmamap_t dmamap;
920 	int error, nexttx, lasttx, ofree, seg;
921 	uint32_t tdes0;
922 
923 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
924 		return;
925 
926 	/* Remember the previous number of free descriptors. */
927 	ofree = sc->sc_txfree;
928 
929 	/*
930 	 * Loop through the send queue, setting up transmit descriptors
931 	 * until we drain the queue, or use up all available transmit
932 	 * descriptors.
933 	 */
934 	for (;;) {
935 		IFQ_POLL(&ifp->if_snd, m0);
936 		if (m0 == NULL)
937 			break;
938 
939 		if (sc->sc_txsfree < KSE_TXQUEUE_GC) {
940 			txreap(sc);
941 			if (sc->sc_txsfree == 0)
942 				break;
943 		}
944 		txs = &sc->sc_txsoft[sc->sc_txsnext];
945 		dmamap = txs->txs_dmamap;
946 
947 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
948 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
949 		if (error) {
950 			if (error == EFBIG) {
951 				aprint_error_dev(sc->sc_dev,
952 				    "Tx packet consumes too many "
953 				    "DMA segments, dropping...\n");
954 				    IFQ_DEQUEUE(&ifp->if_snd, m0);
955 				    m_freem(m0);
956 				    continue;
957 			}
958 			/* Short on resources, just stop for now. */
959 			break;
960 		}
961 
962 		if (dmamap->dm_nsegs > sc->sc_txfree) {
963 			/*
964 			 * Not enough free descriptors to transmit this
965 			 * packet.  We haven't committed anything yet,
966 			 * so just unload the DMA map, put the packet
967 			 * back on the queue, and punt.	 Notify the upper
968 			 * layer that there are not more slots left.
969 			 */
970 			ifp->if_flags |= IFF_OACTIVE;
971 			bus_dmamap_unload(sc->sc_dmat, dmamap);
972 			break;
973 		}
974 
975 		IFQ_DEQUEUE(&ifp->if_snd, m0);
976 
977 		/*
978 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
979 		 */
980 
981 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
982 		    BUS_DMASYNC_PREWRITE);
983 
984 		tdes0 = 0; /* to postpone 1st segment T0_OWN write */
985 		lasttx = -1;
986 		for (nexttx = sc->sc_txnext, seg = 0;
987 		     seg < dmamap->dm_nsegs;
988 		     seg++, nexttx = KSE_NEXTTX(nexttx)) {
989 			struct tdes *tdes = &sc->sc_txdescs[nexttx];
990 			/*
991 			 * If this is the first descriptor we're
992 			 * enqueueing, don't set the OWN bit just
993 			 * yet.	 That could cause a race condition.
994 			 * We'll do it below.
995 			 */
996 			tdes->t2 = dmamap->dm_segs[seg].ds_addr;
997 			tdes->t1 = sc->sc_t1csum
998 			     | (dmamap->dm_segs[seg].ds_len & T1_TBS_MASK);
999 			tdes->t0 = tdes0;
1000 			tdes0 = T0_OWN; /* 2nd and other segments */
1001 			lasttx = nexttx;
1002 		}
1003 		/*
1004 		 * Outgoing NFS mbuf must be unloaded when Tx completed.
1005 		 * Without T1_IC NFS mbuf is left unack'ed for excessive
1006 		 * time and NFS stops to proceed until kse_watchdog()
1007 		 * calls txreap() to reclaim the unack'ed mbuf.
1008 		 * It's painful to traverse every mbuf chain to determine
1009 		 * whether someone is waiting for Tx completion.
1010 		 */
1011 		m = m0;
1012 		do {
1013 			if ((m->m_flags & M_EXT) && m->m_ext.ext_free) {
1014 				sc->sc_txdescs[lasttx].t1 |= T1_IC;
1015 				break;
1016 			}
1017 		} while ((m = m->m_next) != NULL);
1018 
1019 		/* Write deferred 1st segment T0_OWN at the final stage */
1020 		sc->sc_txdescs[lasttx].t1 |= T1_LS;
1021 		sc->sc_txdescs[sc->sc_txnext].t1 |= T1_FS;
1022 		sc->sc_txdescs[sc->sc_txnext].t0 = T0_OWN;
1023 		KSE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1024 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1025 
1026 		/* Tell DMA start transmit */
1027 		CSR_WRITE_4(sc, MDTSC, 1);
1028 
1029 		txs->txs_mbuf = m0;
1030 		txs->txs_firstdesc = sc->sc_txnext;
1031 		txs->txs_lastdesc = lasttx;
1032 		txs->txs_ndesc = dmamap->dm_nsegs;
1033 
1034 		sc->sc_txfree -= txs->txs_ndesc;
1035 		sc->sc_txnext = nexttx;
1036 		sc->sc_txsfree--;
1037 		sc->sc_txsnext = KSE_NEXTTXS(sc->sc_txsnext);
1038 		/*
1039 		 * Pass the packet to any BPF listeners.
1040 		 */
1041 		bpf_mtap(ifp, m0, BPF_D_OUT);
1042 	}
1043 
1044 	if (sc->sc_txsfree == 0 || sc->sc_txfree == 0) {
1045 		/* No more slots left; notify upper layer. */
1046 		ifp->if_flags |= IFF_OACTIVE;
1047 	}
1048 	if (sc->sc_txfree != ofree) {
1049 		/* Set a watchdog timer in case the chip flakes out. */
1050 		ifp->if_timer = 5;
1051 	}
1052 }
1053 
1054 static void
kse_set_rcvfilt(struct kse_softc * sc)1055 kse_set_rcvfilt(struct kse_softc *sc)
1056 {
1057 	struct ether_multistep step;
1058 	struct ether_multi *enm;
1059 	struct ethercom *ec = &sc->sc_ethercom;
1060 	struct ifnet *ifp = &ec->ec_if;
1061 	uint32_t crc, mchash[2];
1062 	int i;
1063 
1064 	sc->sc_rxc &= ~(RXC_MHTE | RXC_RM | RXC_RA);
1065 
1066 	/* clear perfect match filter and prepare mcast hash table */
1067 	for (i = 0; i < 16; i++)
1068 		 CSR_WRITE_4(sc, MAAH0 + i*8, 0);
1069 	crc = mchash[0] = mchash[1] = 0;
1070 
1071 	ETHER_LOCK(ec);
1072 	if (ifp->if_flags & IFF_PROMISC) {
1073 		ec->ec_flags |= ETHER_F_ALLMULTI;
1074 		ETHER_UNLOCK(ec);
1075 		/* run promisc. mode */
1076 		sc->sc_rxc |= RXC_RA;
1077 		goto update;
1078 	}
1079 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
1080 	ETHER_FIRST_MULTI(step, ec, enm);
1081 	i = 0;
1082 	while (enm != NULL) {
1083 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1084 			/*
1085 			 * We must listen to a range of multicast addresses.
1086 			 * For now, just accept all multicasts, rather than
1087 			 * trying to set only those filter bits needed to match
1088 			 * the range.  (At this time, the only use of address
1089 			 * ranges is for IP multicast routing, for which the
1090 			 * range is big enough to require all bits set.)
1091 			 */
1092 			ec->ec_flags |= ETHER_F_ALLMULTI;
1093 			ETHER_UNLOCK(ec);
1094 			/* accept all multicast */
1095 			sc->sc_rxc |= RXC_RM;
1096 			goto update;
1097 		}
1098 #if KSE_MCASTDEBUG == 1
1099 		printf("[%d] %s\n", i, ether_sprintf(enm->enm_addrlo));
1100 #endif
1101 		if (i < 16) {
1102 			/* use 16 additional MAC addr to accept mcast */
1103 			uint32_t addr;
1104 			uint8_t *ep = enm->enm_addrlo;
1105 			addr = (ep[3] << 24) | (ep[2] << 16)
1106 			     | (ep[1] << 8)  |  ep[0];
1107 			CSR_WRITE_4(sc, MAAL0 + i*8, addr);
1108 			addr = (ep[5] << 8) | ep[4];
1109 			CSR_WRITE_4(sc, MAAH0 + i*8, addr | (1U << 31));
1110 		} else {
1111 			/* use hash table when too many */
1112 			crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1113 			mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
1114 		}
1115 		ETHER_NEXT_MULTI(step, enm);
1116 		i++;
1117 	}
1118 	ETHER_UNLOCK(ec);
1119 
1120 	if (crc)
1121 		sc->sc_rxc |= RXC_MHTE;
1122 	CSR_WRITE_4(sc, MTR0, mchash[0]);
1123 	CSR_WRITE_4(sc, MTR1, mchash[1]);
1124  update:
1125 	/* With RA or RM, MHTE/MTR0/MTR1 are never consulted. */
1126 	return;
1127 }
1128 
1129 static int
add_rxbuf(struct kse_softc * sc,int idx)1130 add_rxbuf(struct kse_softc *sc, int idx)
1131 {
1132 	struct kse_rxsoft *rxs = &sc->sc_rxsoft[idx];
1133 	struct mbuf *m;
1134 	int error;
1135 
1136 	MGETHDR(m, M_DONTWAIT, MT_DATA);
1137 	if (m == NULL)
1138 		return ENOBUFS;
1139 
1140 	MCLGET(m, M_DONTWAIT);
1141 	if ((m->m_flags & M_EXT) == 0) {
1142 		m_freem(m);
1143 		return ENOBUFS;
1144 	}
1145 
1146 	if (rxs->rxs_mbuf != NULL)
1147 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1148 
1149 	rxs->rxs_mbuf = m;
1150 
1151 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
1152 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1153 	if (error) {
1154 		aprint_error_dev(sc->sc_dev,
1155 		    "can't load rx DMA map %d, error = %d\n", idx, error);
1156 		panic("kse_add_rxbuf");
1157 	}
1158 
1159 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1160 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1161 
1162 	KSE_INIT_RXDESC(sc, idx);
1163 
1164 	return 0;
1165 }
1166 
1167 static void
rxdrain(struct kse_softc * sc)1168 rxdrain(struct kse_softc *sc)
1169 {
1170 	struct kse_rxsoft *rxs;
1171 	int i;
1172 
1173 	for (i = 0; i < KSE_NRXDESC; i++) {
1174 		rxs = &sc->sc_rxsoft[i];
1175 		if (rxs->rxs_mbuf != NULL) {
1176 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1177 			m_freem(rxs->rxs_mbuf);
1178 			rxs->rxs_mbuf = NULL;
1179 		}
1180 	}
1181 }
1182 
1183 static int
kse_intr(void * arg)1184 kse_intr(void *arg)
1185 {
1186 	struct kse_softc *sc = arg;
1187 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1188 	uint32_t isr;
1189 
1190 	if ((isr = CSR_READ_4(sc, INTST)) == 0)
1191 		return 0;
1192 
1193 	if (isr & INT_DMRS)
1194 		rxintr(sc);
1195 	if (isr & INT_DMTS)
1196 		txreap(sc);
1197 	if (isr & INT_DMLCS)
1198 		lnkchg(sc);
1199 	if (isr & INT_DMRBUS)
1200 		aprint_error_dev(sc->sc_dev, "Rx descriptor full\n");
1201 
1202 	CSR_WRITE_4(sc, INTST, isr);
1203 
1204 	if (ifp->if_flags & IFF_RUNNING)
1205 		if_schedule_deferred_start(ifp);
1206 
1207 	return 1;
1208 }
1209 
1210 static void
rxintr(struct kse_softc * sc)1211 rxintr(struct kse_softc *sc)
1212 {
1213 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1214 	struct kse_rxsoft *rxs;
1215 	struct mbuf *m;
1216 	uint32_t rxstat;
1217 	int i, len;
1218 
1219 	for (i = sc->sc_rxptr; /*CONSTCOND*/ 1; i = KSE_NEXTRX(i)) {
1220 		rxs = &sc->sc_rxsoft[i];
1221 
1222 		KSE_CDRXSYNC(sc, i,
1223 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1224 
1225 		rxstat = sc->sc_rxdescs[i].r0;
1226 
1227 		if (rxstat & R0_OWN) /* desc is left empty */
1228 			break;
1229 
1230 		/* R0_FS | R0_LS must have been marked for this desc */
1231 
1232 		if (rxstat & R0_ES) {
1233 			if_statinc(ifp, if_ierrors);
1234 #define PRINTERR(bit, str)						\
1235 			if (rxstat & (bit))				\
1236 				aprint_error_dev(sc->sc_dev,		\
1237 				    "%s\n", str)
1238 			PRINTERR(R0_TL, "frame too long");
1239 			PRINTERR(R0_RF, "runt frame");
1240 			PRINTERR(R0_CE, "bad FCS");
1241 #undef PRINTERR
1242 			KSE_INIT_RXDESC(sc, i);
1243 			continue;
1244 		}
1245 
1246 		/* HW errata; frame might be too small or too large */
1247 
1248 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1249 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1250 
1251 		len = rxstat & R0_FL_MASK;
1252 		len -= ETHER_CRC_LEN;	/* Trim CRC off */
1253 		m = rxs->rxs_mbuf;
1254 
1255 		if (add_rxbuf(sc, i) != 0) {
1256 			if_statinc(ifp, if_ierrors);
1257 			KSE_INIT_RXDESC(sc, i);
1258 			bus_dmamap_sync(sc->sc_dmat,
1259 			    rxs->rxs_dmamap, 0,
1260 			    rxs->rxs_dmamap->dm_mapsize,
1261 			    BUS_DMASYNC_PREREAD);
1262 			continue;
1263 		}
1264 
1265 		m_set_rcvif(m, ifp);
1266 		m->m_pkthdr.len = m->m_len = len;
1267 
1268 		if (sc->sc_mcsum) {
1269 			m->m_pkthdr.csum_flags |= sc->sc_mcsum;
1270 			if (rxstat & R0_IPE)
1271 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1272 			if (rxstat & (R0_TCPE | R0_UDPE))
1273 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1274 		}
1275 		if_percpuq_enqueue(ifp->if_percpuq, m);
1276 #ifdef KSEDIAGNOSTIC
1277 		if (kse_monitor_rxintr > 0) {
1278 			aprint_error_dev(sc->sc_dev,
1279 			    "m stat %x data %p len %d\n",
1280 			    rxstat, m->m_data, m->m_len);
1281 		}
1282 #endif
1283 	}
1284 	sc->sc_rxptr = i;
1285 }
1286 
1287 static void
txreap(struct kse_softc * sc)1288 txreap(struct kse_softc *sc)
1289 {
1290 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1291 	struct kse_txsoft *txs;
1292 	uint32_t txstat;
1293 	int i;
1294 
1295 	ifp->if_flags &= ~IFF_OACTIVE;
1296 
1297 	for (i = sc->sc_txsdirty; sc->sc_txsfree != KSE_TXQUEUELEN;
1298 	     i = KSE_NEXTTXS(i), sc->sc_txsfree++) {
1299 		txs = &sc->sc_txsoft[i];
1300 
1301 		KSE_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
1302 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1303 
1304 		txstat = sc->sc_txdescs[txs->txs_lastdesc].t0;
1305 
1306 		if (txstat & T0_OWN) /* desc is still in use */
1307 			break;
1308 
1309 		/* There is no way to tell transmission status per frame */
1310 
1311 		if_statinc(ifp, if_opackets);
1312 
1313 		sc->sc_txfree += txs->txs_ndesc;
1314 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1315 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1316 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1317 		m_freem(txs->txs_mbuf);
1318 		txs->txs_mbuf = NULL;
1319 	}
1320 	sc->sc_txsdirty = i;
1321 	if (sc->sc_txsfree == KSE_TXQUEUELEN)
1322 		ifp->if_timer = 0;
1323 }
1324 
1325 static void
lnkchg(struct kse_softc * sc)1326 lnkchg(struct kse_softc *sc)
1327 {
1328 	struct ifmediareq ifmr;
1329 
1330 #if KSE_LINKDEBUG == 1
1331 	uint16_t p1sr = CSR_READ_2(sc, P1SR);
1332 printf("link %s detected\n", (p1sr & PxSR_LINKUP) ? "up" : "down");
1333 #endif
1334 	kse_ifmedia_sts(&sc->sc_ethercom.ec_if, &ifmr);
1335 }
1336 
1337 static int
kse_ifmedia_upd(struct ifnet * ifp)1338 kse_ifmedia_upd(struct ifnet *ifp)
1339 {
1340 	struct kse_softc *sc = ifp->if_softc;
1341 	struct ifmedia *ifm = &sc->sc_mii.mii_media;
1342 	uint16_t p1cr4;
1343 
1344 	p1cr4 = 0;
1345 	if (IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_AUTO) {
1346 		p1cr4 |= PxCR_STARTNEG;	/* restart AN */
1347 		p1cr4 |= PxCR_AUTOEN;	/* enable AN */
1348 		p1cr4 |= PxCR_USEFC;	/* advertise flow control pause */
1349 		p1cr4 |= 0xf;		/* adv. 100FDX,100HDX,10FDX,10HDX */
1350 	} else {
1351 		if (IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_100_TX)
1352 			p1cr4 |= PxCR_SPD100;
1353 		if (ifm->ifm_media & IFM_FDX)
1354 			p1cr4 |= PxCR_USEFDX;
1355 	}
1356 	CSR_WRITE_2(sc, P1CR4, p1cr4);
1357 #if KSE_LINKDEBUG == 1
1358 printf("P1CR4: %04x\n", p1cr4);
1359 #endif
1360 	return 0;
1361 }
1362 
1363 static void
kse_ifmedia_sts(struct ifnet * ifp,struct ifmediareq * ifmr)1364 kse_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1365 {
1366 	struct kse_softc *sc = ifp->if_softc;
1367 	struct mii_data *mii = &sc->sc_mii;
1368 
1369 	mii_pollstat(mii);
1370 	ifmr->ifm_status = mii->mii_media_status;
1371 	ifmr->ifm_active = sc->sc_flowflags |
1372 	    (mii->mii_media_active & ~IFM_ETH_FMASK);
1373 }
1374 
1375 static void
nopifmedia_sts(struct ifnet * ifp,struct ifmediareq * ifmr)1376 nopifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1377 {
1378 	struct kse_softc *sc = ifp->if_softc;
1379 	struct ifmedia *ifm = &sc->sc_media;
1380 
1381 #if KSE_LINKDEBUG == 2
1382 printf("p1sr: %04x, p2sr: %04x\n", CSR_READ_2(sc, P1SR), CSR_READ_2(sc, P2SR));
1383 #endif
1384 
1385 	/* 8842 MAC pretends 100FDX all the time */
1386 	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
1387 	ifmr->ifm_active = ifm->ifm_cur->ifm_media |
1388 	    IFM_FLOW | IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE;
1389 }
1390 
1391 static void
phy_tick(void * arg)1392 phy_tick(void *arg)
1393 {
1394 	struct kse_softc *sc = arg;
1395 	struct mii_data *mii = &sc->sc_mii;
1396 	int s;
1397 
1398 	if (sc->sc_chip == 0x8841) {
1399 		s = splnet();
1400 		mii_tick(mii);
1401 		splx(s);
1402 	}
1403 #ifdef KSE_EVENT_COUNTERS
1404 	stat_tick(arg);
1405 #endif
1406 	callout_schedule(&sc->sc_tick_ch, hz);
1407 }
1408 
1409 static const uint16_t phy1csr[] = {
1410 	/* 0 BMCR */	0x4d0,
1411 	/* 1 BMSR */	0x4d2,
1412 	/* 2 PHYID1 */	0x4d6,	/* 0x0022 - PHY1HR */
1413 	/* 3 PHYID2 */	0x4d4,	/* 0x1430 - PHY1LR */
1414 	/* 4 ANAR */	0x4d8,
1415 	/* 5 ANLPAR */	0x4da,
1416 };
1417 
1418 int
kse_mii_readreg(device_t self,int phy,int reg,uint16_t * val)1419 kse_mii_readreg(device_t self, int phy, int reg, uint16_t *val)
1420 {
1421 	struct kse_softc *sc = device_private(self);
1422 
1423 	if (phy != 1 || reg >= __arraycount(phy1csr) || reg < 0)
1424 		return EINVAL;
1425 	*val = CSR_READ_2(sc, phy1csr[reg]);
1426 	return 0;
1427 }
1428 
1429 int
kse_mii_writereg(device_t self,int phy,int reg,uint16_t val)1430 kse_mii_writereg(device_t self, int phy, int reg, uint16_t val)
1431 {
1432 	struct kse_softc *sc = device_private(self);
1433 
1434 	if (phy != 1 || reg >= __arraycount(phy1csr) || reg < 0)
1435 		return EINVAL;
1436 	CSR_WRITE_2(sc, phy1csr[reg], val);
1437 	return 0;
1438 }
1439 
1440 void
kse_mii_statchg(struct ifnet * ifp)1441 kse_mii_statchg(struct ifnet *ifp)
1442 {
1443 	struct kse_softc *sc = ifp->if_softc;
1444 	struct mii_data *mii = &sc->sc_mii;
1445 
1446 #if KSE_LINKDEBUG == 1
1447 	/* decode P1SR register value */
1448 	uint16_t p1sr = CSR_READ_2(sc, P1SR);
1449 	printf("P1SR %04x, spd%d", p1sr, (p1sr & PxSR_SPD100) ? 100 : 10);
1450 	if (p1sr & PxSR_FDX)
1451 		printf(",full-duplex");
1452 	if (p1sr & PxSR_RXFLOW)
1453 		printf(",rxpause");
1454 	if (p1sr & PxSR_TXFLOW)
1455 		printf(",txpause");
1456 	printf("\n");
1457 	/* show resolved mii(4) parameters to compare against above */
1458 	printf("MII spd%d",
1459 	    (int)(sc->sc_ethercom.ec_if.if_baudrate / IF_Mbps(1)));
1460 	if (mii->mii_media_active & IFM_FDX)
1461 		printf(",full-duplex");
1462 	if (mii->mii_media_active & IFM_FLOW) {
1463 		printf(",flowcontrol");
1464 		if (mii->mii_media_active & IFM_ETH_RXPAUSE)
1465 			printf(",rxpause");
1466 		if (mii->mii_media_active & IFM_ETH_TXPAUSE)
1467 			printf(",txpause");
1468 	}
1469 	printf("\n");
1470 #endif
1471 	/* Get flow control negotiation result. */
1472 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
1473 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags)
1474 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
1475 
1476 	/* Adjust MAC PAUSE flow control. */
1477 	if ((mii->mii_media_active & IFM_FDX)
1478 	    && (sc->sc_flowflags & IFM_ETH_TXPAUSE))
1479 		sc->sc_txc |= TXC_FCE;
1480 	else
1481 		sc->sc_txc &= ~TXC_FCE;
1482 	if ((mii->mii_media_active & IFM_FDX)
1483 	    && (sc->sc_flowflags & IFM_ETH_RXPAUSE))
1484 		sc->sc_rxc |= RXC_FCE;
1485 	else
1486 		sc->sc_rxc &= ~RXC_FCE;
1487 	CSR_WRITE_4(sc, MDTXC, sc->sc_txc);
1488 	CSR_WRITE_4(sc, MDRXC, sc->sc_rxc);
1489 #if KSE_LINKDEBUG == 1
1490 	printf("%ctxfce, %crxfce\n",
1491 	    (sc->sc_txc & TXC_FCE) ? '+' : '-',
1492 	    (sc->sc_rxc & RXC_FCE) ? '+' : '-');
1493 #endif
1494 }
1495 
1496 #ifdef KSE_EVENT_COUNTERS
1497 static void
stat_tick(void * arg)1498 stat_tick(void *arg)
1499 {
1500 	struct kse_softc *sc = arg;
1501 	struct ksext *ee = &sc->sc_ext;
1502 	int nport, p, i, reg, val;
1503 
1504 	nport = (sc->sc_chip == 0x8842) ? 3 : 1;
1505 	for (p = 0; p < nport; p++) {
1506 		/* read 34 ev counters by indirect read via IACR */
1507 		for (i = 0; i < 32; i++) {
1508 			reg = EVCNTBR + p * 0x20 + i;
1509 			CSR_WRITE_2(sc, IACR, reg);
1510 			/* 30-bit counter value are halved in IADR5 & IADR4 */
1511 			do {
1512 				val = CSR_READ_2(sc, IADR5) << 16;
1513 			} while ((val & IADR_LATCH) == 0);
1514 			if (val & IADR_OVF) {
1515 				(void)CSR_READ_2(sc, IADR4);
1516 				val = 0x3fffffff; /* has made overflow */
1517 			}
1518 			else {
1519 				val &= 0x3fff0000;		/* 29:16 */
1520 				val |= CSR_READ_2(sc, IADR4);	/* 15:0 */
1521 			}
1522 			ee->pev[p][i].ev_count += val; /* ev0 thru 31 */
1523 		}
1524 		/* ev32 and ev33 are 16-bit counter */
1525 		CSR_WRITE_2(sc, IACR, EVCNTBR + 0x100 + p);
1526 		ee->pev[p][32].ev_count += CSR_READ_2(sc, IADR4); /* ev32 */
1527 		CSR_WRITE_2(sc, IACR, EVCNTBR + 0x100 + p * 3 + 1);
1528 		ee->pev[p][33].ev_count += CSR_READ_2(sc, IADR4); /* ev33 */
1529 	}
1530 }
1531 
1532 static void
zerostats(struct kse_softc * sc)1533 zerostats(struct kse_softc *sc)
1534 {
1535 	struct ksext *ee = &sc->sc_ext;
1536 	int nport, p, i, reg, val;
1537 
1538 	/* Make sure all the HW counters get zero */
1539 	nport = (sc->sc_chip == 0x8842) ? 3 : 1;
1540 	for (p = 0; p < nport; p++) {
1541 		for (i = 0; i < 32; i++) {
1542 			reg = EVCNTBR + p * 0x20 + i;
1543 			CSR_WRITE_2(sc, IACR, reg);
1544 			do {
1545 				val = CSR_READ_2(sc, IADR5) << 16;
1546 			} while ((val & IADR_LATCH) == 0);
1547 			(void)CSR_READ_2(sc, IADR4);
1548 			ee->pev[p][i].ev_count = 0;
1549 		}
1550 		CSR_WRITE_2(sc, IACR, EVCNTBR + 0x100 + p);
1551 		(void)CSR_READ_2(sc, IADR4);
1552 		CSR_WRITE_2(sc, IACR, EVCNTBR + 0x100 + p * 3 + 1);
1553 		(void)CSR_READ_2(sc, IADR4);
1554 		ee->pev[p][32].ev_count = 0;
1555 		ee->pev[p][33].ev_count = 0;
1556 	}
1557 }
1558 #endif
1559