xref: /netbsd/sys/dev/pci/if_kse.c (revision 6550d01e)
1 /*	$NetBSD: if_kse.c,v 1.22 2010/04/05 07:20:26 joerg Exp $	*/
2 
3 /*-
4  * Copyright (c) 2006 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Tohru Nishimura.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: if_kse.c,v 1.22 2010/04/05 07:20:26 joerg Exp $");
34 
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/callout.h>
39 #include <sys/mbuf.h>
40 #include <sys/malloc.h>
41 #include <sys/kernel.h>
42 #include <sys/ioctl.h>
43 #include <sys/errno.h>
44 #include <sys/device.h>
45 #include <sys/queue.h>
46 
47 #include <machine/endian.h>
48 #include <sys/bus.h>
49 #include <sys/intr.h>
50 
51 #include <net/if.h>
52 #include <net/if_media.h>
53 #include <net/if_dl.h>
54 #include <net/if_ether.h>
55 
56 #include <net/bpf.h>
57 
58 #include <dev/pci/pcivar.h>
59 #include <dev/pci/pcireg.h>
60 #include <dev/pci/pcidevs.h>
61 
62 #define CSR_READ_4(sc, off) \
63 	    bus_space_read_4(sc->sc_st, sc->sc_sh, off)
64 #define CSR_WRITE_4(sc, off, val) \
65 	    bus_space_write_4(sc->sc_st, sc->sc_sh, off, val)
66 #define CSR_READ_2(sc, off) \
67 	    bus_space_read_2(sc->sc_st, sc->sc_sh, off)
68 #define CSR_WRITE_2(sc, off, val) \
69 	    bus_space_write_2(sc->sc_st, sc->sc_sh, off, val)
70 
71 #define MDTXC	0x000	/* DMA transmit control */
72 #define MDRXC	0x004	/* DMA receive control */
73 #define MDTSC	0x008	/* DMA transmit start */
74 #define MDRSC	0x00c	/* DMA receive start */
75 #define TDLB	0x010	/* transmit descriptor list base */
76 #define RDLB	0x014	/* receive descriptor list base */
77 #define MTR0	0x020	/* multicast table 31:0 */
78 #define MTR1	0x024	/* multicast table 63:32 */
79 #define INTEN	0x028	/* interrupt enable */
80 #define INTST	0x02c	/* interrupt status */
81 #define MARL	0x200	/* MAC address low */
82 #define MARM	0x202	/* MAC address middle */
83 #define MARH	0x204	/* MAC address high */
84 #define GRR	0x216	/* global reset */
85 #define CIDR	0x400	/* chip ID and enable */
86 #define CGCR	0x40a	/* chip global control */
87 #define IACR	0x4a0	/* indirect access control */
88 #define IADR1	0x4a2	/* indirect access data 66:63 */
89 #define IADR2	0x4a4	/* indirect access data 47:32 */
90 #define IADR3	0x4a6	/* indirect access data 63:48 */
91 #define IADR4	0x4a8	/* indirect access data 15:0 */
92 #define IADR5	0x4aa	/* indirect access data 31:16 */
93 #define P1CR4	0x512	/* port 1 control 4 */
94 #define P1SR	0x514	/* port 1 status */
95 #define P2CR4	0x532	/* port 2 control 4 */
96 #define P2SR	0x534	/* port 2 status */
97 
98 #define TXC_BS_MSK	0x3f000000	/* burst size */
99 #define TXC_BS_SFT	(24)		/* 1,2,4,8,16,32 or 0 for unlimited */
100 #define TXC_UCG		(1U<<18)	/* generate UDP checksum */
101 #define TXC_TCG		(1U<<17)	/* generate TCP checksum */
102 #define TXC_ICG		(1U<<16)	/* generate IP checksum */
103 #define TXC_FCE		(1U<<9)		/* enable flowcontrol */
104 #define TXC_EP		(1U<<2)		/* enable automatic padding */
105 #define TXC_AC		(1U<<1)		/* add CRC to frame */
106 #define TXC_TEN		(1)		/* enable DMA to run */
107 
108 #define RXC_BS_MSK	0x3f000000	/* burst size */
109 #define RXC_BS_SFT	(24)		/* 1,2,4,8,16,32 or 0 for unlimited */
110 #define RXC_IHAE	(1U<<19)	/* IP header alignment enable */
111 #define RXC_UCC		(1U<<18)	/* run UDP checksum */
112 #define RXC_TCC		(1U<<17)	/* run TDP checksum */
113 #define RXC_ICC		(1U<<16)	/* run IP checksum */
114 #define RXC_FCE		(1U<<9)		/* enable flowcontrol */
115 #define RXC_RB		(1U<<6)		/* receive broadcast frame */
116 #define RXC_RM		(1U<<5)		/* receive multicast frame */
117 #define RXC_RU		(1U<<4)		/* receive unicast frame */
118 #define RXC_RE		(1U<<3)		/* accept error frame */
119 #define RXC_RA		(1U<<2)		/* receive all frame */
120 #define RXC_MHTE	(1U<<1)		/* use multicast hash table */
121 #define RXC_REN		(1)		/* enable DMA to run */
122 
123 #define INT_DMLCS	(1U<<31)	/* link status change */
124 #define INT_DMTS	(1U<<30)	/* sending desc. has posted Tx done */
125 #define INT_DMRS	(1U<<29)	/* frame was received */
126 #define INT_DMRBUS	(1U<<27)	/* Rx descriptor pool is full */
127 
128 #define T0_OWN		(1U<<31)	/* desc is ready to Tx */
129 
130 #define R0_OWN		(1U<<31)	/* desc is empty */
131 #define R0_FS		(1U<<30)	/* first segment of frame */
132 #define R0_LS		(1U<<29)	/* last segment of frame */
133 #define R0_IPE		(1U<<28)	/* IP checksum error */
134 #define R0_TCPE		(1U<<27)	/* TCP checksum error */
135 #define R0_UDPE		(1U<<26)	/* UDP checksum error */
136 #define R0_ES		(1U<<25)	/* error summary */
137 #define R0_MF		(1U<<24)	/* multicast frame */
138 #define R0_SPN		0x00300000	/* 21:20 switch port 1/2 */
139 #define R0_ALIGN	0x00300000	/* 21:20 (KSZ8692P) Rx align amount */
140 #define R0_RE		(1U<<19)	/* MII reported error */
141 #define R0_TL		(1U<<18)	/* frame too long, beyond 1518 */
142 #define R0_RF		(1U<<17)	/* damaged runt frame */
143 #define R0_CE		(1U<<16)	/* CRC error */
144 #define R0_FT		(1U<<15)	/* frame type */
145 #define R0_FL_MASK	0x7ff		/* frame length 10:0 */
146 
147 #define T1_IC		(1U<<31)	/* post interrupt on complete */
148 #define T1_FS		(1U<<30)	/* first segment of frame */
149 #define T1_LS		(1U<<29)	/* last segment of frame */
150 #define T1_IPCKG	(1U<<28)	/* generate IP checksum */
151 #define T1_TCPCKG	(1U<<27)	/* generate TCP checksum */
152 #define T1_UDPCKG	(1U<<26)	/* generate UDP checksum */
153 #define T1_TER		(1U<<25)	/* end of ring */
154 #define T1_SPN		0x00300000	/* 21:20 switch port 1/2 */
155 #define T1_TBS_MASK	0x7ff		/* segment size 10:0 */
156 
157 #define R1_RER		(1U<<25)	/* end of ring */
158 #define R1_RBS_MASK	0x7fc		/* segment size 10:0 */
159 
160 #define KSE_NTXSEGS		16
161 #define KSE_TXQUEUELEN		64
162 #define KSE_TXQUEUELEN_MASK	(KSE_TXQUEUELEN - 1)
163 #define KSE_TXQUEUE_GC		(KSE_TXQUEUELEN / 4)
164 #define KSE_NTXDESC		256
165 #define KSE_NTXDESC_MASK	(KSE_NTXDESC - 1)
166 #define KSE_NEXTTX(x)		(((x) + 1) & KSE_NTXDESC_MASK)
167 #define KSE_NEXTTXS(x)		(((x) + 1) & KSE_TXQUEUELEN_MASK)
168 
169 #define KSE_NRXDESC		64
170 #define KSE_NRXDESC_MASK	(KSE_NRXDESC - 1)
171 #define KSE_NEXTRX(x)		(((x) + 1) & KSE_NRXDESC_MASK)
172 
173 struct tdes {
174 	uint32_t t0, t1, t2, t3;
175 };
176 
177 struct rdes {
178 	uint32_t r0, r1, r2, r3;
179 };
180 
181 struct kse_control_data {
182 	struct tdes kcd_txdescs[KSE_NTXDESC];
183 	struct rdes kcd_rxdescs[KSE_NRXDESC];
184 };
185 #define KSE_CDOFF(x)		offsetof(struct kse_control_data, x)
186 #define KSE_CDTXOFF(x)		KSE_CDOFF(kcd_txdescs[(x)])
187 #define KSE_CDRXOFF(x)		KSE_CDOFF(kcd_rxdescs[(x)])
188 
189 struct kse_txsoft {
190 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
191 	bus_dmamap_t txs_dmamap;	/* our DMA map */
192 	int txs_firstdesc;		/* first descriptor in packet */
193 	int txs_lastdesc;		/* last descriptor in packet */
194 	int txs_ndesc;			/* # of descriptors used */
195 };
196 
197 struct kse_rxsoft {
198 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
199 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
200 };
201 
202 struct kse_softc {
203 	struct device sc_dev;		/* generic device information */
204 	bus_space_tag_t sc_st;		/* bus space tag */
205 	bus_space_handle_t sc_sh;	/* bus space handle */
206 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
207 	struct ethercom sc_ethercom;	/* Ethernet common data */
208 	void *sc_ih;			/* interrupt cookie */
209 
210 	struct ifmedia sc_media;	/* ifmedia information */
211 	int sc_media_status;		/* PHY */
212 	int sc_media_active;		/* PHY */
213 	callout_t  sc_callout;		/* MII tick callout */
214 	callout_t  sc_stat_ch;		/* statistics counter callout */
215 
216 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
217 #define sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
218 
219 	struct kse_control_data *sc_control_data;
220 #define sc_txdescs	sc_control_data->kcd_txdescs
221 #define sc_rxdescs	sc_control_data->kcd_rxdescs
222 
223 	struct kse_txsoft sc_txsoft[KSE_TXQUEUELEN];
224 	struct kse_rxsoft sc_rxsoft[KSE_NRXDESC];
225 	int sc_txfree;			/* number of free Tx descriptors */
226 	int sc_txnext;			/* next ready Tx descriptor */
227 	int sc_txsfree;			/* number of free Tx jobs */
228 	int sc_txsnext;			/* next ready Tx job */
229 	int sc_txsdirty;		/* dirty Tx jobs */
230 	int sc_rxptr;			/* next ready Rx descriptor/descsoft */
231 
232 	uint32_t sc_txc, sc_rxc;
233 	uint32_t sc_t1csum;
234 	int sc_mcsum;
235 	uint32_t sc_inten;
236 
237 	uint32_t sc_chip;
238 	uint8_t sc_altmac[16][ETHER_ADDR_LEN];
239 	uint16_t sc_vlan[16];
240 
241 #ifdef KSE_EVENT_COUNTERS
242 	struct ksext {
243 		char evcntname[3][8];
244 		struct evcnt pev[3][34];
245 	} sc_ext;			/* switch statistics */
246 #endif
247 };
248 
249 #define KSE_CDTXADDR(sc, x)	((sc)->sc_cddma + KSE_CDTXOFF((x)))
250 #define KSE_CDRXADDR(sc, x)	((sc)->sc_cddma + KSE_CDRXOFF((x)))
251 
252 #define KSE_CDTXSYNC(sc, x, n, ops)					\
253 do {									\
254 	int __x, __n;							\
255 									\
256 	__x = (x);							\
257 	__n = (n);							\
258 									\
259 	/* If it will wrap around, sync to the end of the ring. */	\
260 	if ((__x + __n) > KSE_NTXDESC) {				\
261 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
262 		    KSE_CDTXOFF(__x), sizeof(struct tdes) *		\
263 		    (KSE_NTXDESC - __x), (ops));			\
264 		__n -= (KSE_NTXDESC - __x);				\
265 		__x = 0;						\
266 	}								\
267 									\
268 	/* Now sync whatever is left. */				\
269 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
270 	    KSE_CDTXOFF(__x), sizeof(struct tdes) * __n, (ops));	\
271 } while (/*CONSTCOND*/0)
272 
273 #define KSE_CDRXSYNC(sc, x, ops)					\
274 do {									\
275 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
276 	    KSE_CDRXOFF((x)), sizeof(struct rdes), (ops));		\
277 } while (/*CONSTCOND*/0)
278 
279 #define KSE_INIT_RXDESC(sc, x)						\
280 do {									\
281 	struct kse_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
282 	struct rdes *__rxd = &(sc)->sc_rxdescs[(x)];			\
283 	struct mbuf *__m = __rxs->rxs_mbuf;				\
284 									\
285 	__m->m_data = __m->m_ext.ext_buf;				\
286 	__rxd->r2 = __rxs->rxs_dmamap->dm_segs[0].ds_addr;		\
287 	__rxd->r1 = R1_RBS_MASK /* __m->m_ext.ext_size */;		\
288 	__rxd->r0 = R0_OWN;						\
289 	KSE_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
290 } while (/*CONSTCOND*/0)
291 
292 u_int kse_burstsize = 8;	/* DMA burst length tuning knob */
293 
294 #ifdef KSEDIAGNOSTIC
295 u_int kse_monitor_rxintr;	/* fragmented UDP csum HW bug hook */
296 #endif
297 
298 static int kse_match(device_t, cfdata_t, void *);
299 static void kse_attach(device_t, device_t, void *);
300 
301 CFATTACH_DECL(kse, sizeof(struct kse_softc),
302     kse_match, kse_attach, NULL, NULL);
303 
304 static int kse_ioctl(struct ifnet *, u_long, void *);
305 static void kse_start(struct ifnet *);
306 static void kse_watchdog(struct ifnet *);
307 static int kse_init(struct ifnet *);
308 static void kse_stop(struct ifnet *, int);
309 static void kse_reset(struct kse_softc *);
310 static void kse_set_filter(struct kse_softc *);
311 static int add_rxbuf(struct kse_softc *, int);
312 static void rxdrain(struct kse_softc *);
313 static int kse_intr(void *);
314 static void rxintr(struct kse_softc *);
315 static void txreap(struct kse_softc *);
316 static void lnkchg(struct kse_softc *);
317 static int ifmedia_upd(struct ifnet *);
318 static void ifmedia_sts(struct ifnet *, struct ifmediareq *);
319 static void phy_tick(void *);
320 static int ifmedia2_upd(struct ifnet *);
321 static void ifmedia2_sts(struct ifnet *, struct ifmediareq *);
322 #ifdef KSE_EVENT_COUNTERS
323 static void stat_tick(void *);
324 static void zerostats(struct kse_softc *);
325 #endif
326 
327 static int
328 kse_match(device_t parent, cfdata_t match, void *aux)
329 {
330 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
331 
332 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_MICREL &&
333 	     (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_MICREL_KSZ8842 ||
334 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_MICREL_KSZ8841) &&
335 	    PCI_CLASS(pa->pa_class) == PCI_CLASS_NETWORK)
336 		return 1;
337 
338 	return 0;
339 }
340 
341 static void
342 kse_attach(device_t parent, device_t self, void *aux)
343 {
344 	struct kse_softc *sc = device_private(self);
345 	struct pci_attach_args *pa = aux;
346 	pci_chipset_tag_t pc = pa->pa_pc;
347 	pci_intr_handle_t ih;
348 	const char *intrstr;
349 	struct ifnet *ifp;
350 	struct ifmedia *ifm;
351 	uint8_t enaddr[ETHER_ADDR_LEN];
352 	bus_dma_segment_t seg;
353 	int i, p, error, nseg;
354 	pcireg_t pmode;
355 	int pmreg;
356 
357 	if (pci_mapreg_map(pa, 0x10,
358 	    PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT,
359 	    0, &sc->sc_st, &sc->sc_sh, NULL, NULL) != 0) {
360 		printf(": unable to map device registers\n");
361 		return;
362 	}
363 
364 	sc->sc_dmat = pa->pa_dmat;
365 
366 	/* Make sure bus mastering is enabled. */
367 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
368 	    pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) |
369 	    PCI_COMMAND_MASTER_ENABLE);
370 
371 	/* Get it out of power save mode, if needed. */
372 	if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
373 		pmode = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
374 		    PCI_PMCSR_STATE_MASK;
375 		if (pmode == PCI_PMCSR_STATE_D3) {
376 			/*
377 			 * The card has lost all configuration data in
378 			 * this state, so punt.
379 			 */
380 			printf("%s: unable to wake from power state D3\n",
381 			    device_xname(&sc->sc_dev));
382 			return;
383 		}
384 		if (pmode != PCI_PMCSR_STATE_D0) {
385 			printf("%s: waking up from power date D%d\n",
386 			    device_xname(&sc->sc_dev), pmode);
387 			pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
388 			    PCI_PMCSR_STATE_D0);
389 		}
390 	}
391 
392 	sc->sc_chip = PCI_PRODUCT(pa->pa_id);
393 	printf(": Micrel KSZ%04x Ethernet (rev. 0x%02x)\n",
394 	    sc->sc_chip, PCI_REVISION(pa->pa_class));
395 
396 	/*
397 	 * Read the Ethernet address from the EEPROM.
398 	 */
399 	i = CSR_READ_2(sc, MARL);
400 	enaddr[5] = i; enaddr[4] = i >> 8;
401 	i = CSR_READ_2(sc, MARM);
402 	enaddr[3] = i; enaddr[2] = i >> 8;
403 	i = CSR_READ_2(sc, MARH);
404 	enaddr[1] = i; enaddr[0] = i >> 8;
405 	printf("%s: Ethernet address: %s\n",
406 		device_xname(&sc->sc_dev), ether_sprintf(enaddr));
407 
408 	/*
409 	 * Enable chip function.
410 	 */
411 	CSR_WRITE_2(sc, CIDR, 1);
412 
413 	/*
414 	 * Map and establish our interrupt.
415 	 */
416 	if (pci_intr_map(pa, &ih)) {
417 		aprint_error_dev(&sc->sc_dev, "unable to map interrupt\n");
418 		return;
419 	}
420 	intrstr = pci_intr_string(pc, ih);
421 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, kse_intr, sc);
422 	if (sc->sc_ih == NULL) {
423 		aprint_error_dev(&sc->sc_dev, "unable to establish interrupt");
424 		if (intrstr != NULL)
425 			aprint_error(" at %s", intrstr);
426 		aprint_error("\n");
427 		return;
428 	}
429 	aprint_normal_dev(&sc->sc_dev, "interrupting at %s\n", intrstr);
430 
431 	/*
432 	 * Allocate the control data structures, and create and load the
433 	 * DMA map for it.
434 	 */
435 	error = bus_dmamem_alloc(sc->sc_dmat,
436 	    sizeof(struct kse_control_data), PAGE_SIZE, 0, &seg, 1, &nseg, 0);
437 	if (error != 0) {
438 		aprint_error_dev(&sc->sc_dev, "unable to allocate control data, error = %d\n", error);
439 		goto fail_0;
440 	}
441 	error = bus_dmamem_map(sc->sc_dmat, &seg, nseg,
442 	    sizeof(struct kse_control_data), (void **)&sc->sc_control_data,
443 	    BUS_DMA_COHERENT);
444 	if (error != 0) {
445 		aprint_error_dev(&sc->sc_dev, "unable to map control data, error = %d\n", error);
446 		goto fail_1;
447 	}
448 	error = bus_dmamap_create(sc->sc_dmat,
449 	    sizeof(struct kse_control_data), 1,
450 	    sizeof(struct kse_control_data), 0, 0, &sc->sc_cddmamap);
451 	if (error != 0) {
452 		aprint_error_dev(&sc->sc_dev, "unable to create control data DMA map, "
453 		    "error = %d\n", error);
454 		goto fail_2;
455 	}
456 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
457 	    sc->sc_control_data, sizeof(struct kse_control_data), NULL, 0);
458 	if (error != 0) {
459 		aprint_error_dev(&sc->sc_dev, "unable to load control data DMA map, error = %d\n",
460 		    error);
461 		goto fail_3;
462 	}
463 	for (i = 0; i < KSE_TXQUEUELEN; i++) {
464 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
465 		    KSE_NTXSEGS, MCLBYTES, 0, 0,
466 		    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
467 			aprint_error_dev(&sc->sc_dev, "unable to create tx DMA map %d, "
468 			    "error = %d\n", i, error);
469 			goto fail_4;
470 		}
471 	}
472 	for (i = 0; i < KSE_NRXDESC; i++) {
473 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
474 		    1, MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
475 			aprint_error_dev(&sc->sc_dev, "unable to create rx DMA map %d, "
476 			    "error = %d\n", i, error);
477 			goto fail_5;
478 		}
479 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
480 	}
481 
482 	callout_init(&sc->sc_callout, 0);
483 	callout_init(&sc->sc_stat_ch, 0);
484 
485 	ifm = &sc->sc_media;
486 	if (sc->sc_chip == 0x8841) {
487 		ifmedia_init(ifm, 0, ifmedia_upd, ifmedia_sts);
488 		ifmedia_add(ifm, IFM_ETHER|IFM_10_T, 0, NULL);
489 		ifmedia_add(ifm, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
490 		ifmedia_add(ifm, IFM_ETHER|IFM_100_TX, 0, NULL);
491 		ifmedia_add(ifm, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
492 		ifmedia_add(ifm, IFM_ETHER|IFM_AUTO, 0, NULL);
493 		ifmedia_set(ifm, IFM_ETHER|IFM_AUTO);
494 	}
495 	else {
496 		ifmedia_init(ifm, 0, ifmedia2_upd, ifmedia2_sts);
497 		ifmedia_add(ifm, IFM_ETHER|IFM_AUTO, 0, NULL);
498 		ifmedia_set(ifm, IFM_ETHER|IFM_AUTO);
499 	}
500 
501 	printf("%s: 10baseT, 10baseT-FDX, 100baseTX, 100baseTX-FDX, auto\n",
502 	    device_xname(&sc->sc_dev));
503 
504 	ifp = &sc->sc_ethercom.ec_if;
505 	strlcpy(ifp->if_xname, device_xname(&sc->sc_dev), IFNAMSIZ);
506 	ifp->if_softc = sc;
507 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
508 	ifp->if_ioctl = kse_ioctl;
509 	ifp->if_start = kse_start;
510 	ifp->if_watchdog = kse_watchdog;
511 	ifp->if_init = kse_init;
512 	ifp->if_stop = kse_stop;
513 	IFQ_SET_READY(&ifp->if_snd);
514 
515 	/*
516 	 * KSZ8842 can handle 802.1Q VLAN-sized frames,
517 	 * can do IPv4, TCPv4, and UDPv4 checksums in hardware.
518 	 */
519 	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
520 	ifp->if_capabilities |=
521 	    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
522 	    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
523 	    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
524 
525 	if_attach(ifp);
526 	ether_ifattach(ifp, enaddr);
527 
528 	p = (sc->sc_chip == 0x8842) ? 3 : 1;
529 #ifdef KSE_EVENT_COUNTERS
530 	for (i = 0; i < p; i++) {
531 		struct ksext *ee = &sc->sc_ext;
532 		sprintf(ee->evcntname[i], "%s.%d", device_xname(&sc->sc_dev), i+1);
533 		evcnt_attach_dynamic(&ee->pev[i][0], EVCNT_TYPE_MISC,
534 		    NULL, ee->evcntname[i], "RxLoPriotyByte");
535 		evcnt_attach_dynamic(&ee->pev[i][1], EVCNT_TYPE_MISC,
536 		    NULL, ee->evcntname[i], "RxHiPriotyByte");
537 		evcnt_attach_dynamic(&ee->pev[i][2], EVCNT_TYPE_MISC,
538 		    NULL, ee->evcntname[i], "RxUndersizePkt");
539 		evcnt_attach_dynamic(&ee->pev[i][3], EVCNT_TYPE_MISC,
540 		    NULL, ee->evcntname[i], "RxFragments");
541 		evcnt_attach_dynamic(&ee->pev[i][4], EVCNT_TYPE_MISC,
542 		    NULL, ee->evcntname[i], "RxOversize");
543 		evcnt_attach_dynamic(&ee->pev[i][5], EVCNT_TYPE_MISC,
544 		    NULL, ee->evcntname[i], "RxJabbers");
545 		evcnt_attach_dynamic(&ee->pev[i][6], EVCNT_TYPE_MISC,
546 		    NULL, ee->evcntname[i], "RxSymbolError");
547 		evcnt_attach_dynamic(&ee->pev[i][7], EVCNT_TYPE_MISC,
548 		    NULL, ee->evcntname[i], "RxCRCError");
549 		evcnt_attach_dynamic(&ee->pev[i][8], EVCNT_TYPE_MISC,
550 		    NULL, ee->evcntname[i], "RxAlignmentError");
551 		evcnt_attach_dynamic(&ee->pev[i][9], EVCNT_TYPE_MISC,
552 		    NULL, ee->evcntname[i], "RxControl8808Pkts");
553 		evcnt_attach_dynamic(&ee->pev[i][10], EVCNT_TYPE_MISC,
554 		    NULL, ee->evcntname[i], "RxPausePkts");
555 		evcnt_attach_dynamic(&ee->pev[i][11], EVCNT_TYPE_MISC,
556 		    NULL, ee->evcntname[i], "RxBroadcast");
557 		evcnt_attach_dynamic(&ee->pev[i][12], EVCNT_TYPE_MISC,
558 		    NULL, ee->evcntname[i], "RxMulticast");
559 		evcnt_attach_dynamic(&ee->pev[i][13], EVCNT_TYPE_MISC,
560 		    NULL, ee->evcntname[i], "RxUnicast");
561 		evcnt_attach_dynamic(&ee->pev[i][14], EVCNT_TYPE_MISC,
562 		    NULL, ee->evcntname[i], "Rx64Octets");
563 		evcnt_attach_dynamic(&ee->pev[i][15], EVCNT_TYPE_MISC,
564 		    NULL, ee->evcntname[i], "Rx65To127Octets");
565 		evcnt_attach_dynamic(&ee->pev[i][16], EVCNT_TYPE_MISC,
566 		    NULL, ee->evcntname[i], "Rx128To255Octets");
567 		evcnt_attach_dynamic(&ee->pev[i][17], EVCNT_TYPE_MISC,
568 		    NULL, ee->evcntname[i], "Rx255To511Octets");
569 		evcnt_attach_dynamic(&ee->pev[i][18], EVCNT_TYPE_MISC,
570 		    NULL, ee->evcntname[i], "Rx512To1023Octets");
571 		evcnt_attach_dynamic(&ee->pev[i][19], EVCNT_TYPE_MISC,
572 		    NULL, ee->evcntname[i], "Rx1024To1522Octets");
573 		evcnt_attach_dynamic(&ee->pev[i][20], EVCNT_TYPE_MISC,
574 		    NULL, ee->evcntname[i], "TxLoPriotyByte");
575 		evcnt_attach_dynamic(&ee->pev[i][21], EVCNT_TYPE_MISC,
576 		    NULL, ee->evcntname[i], "TxHiPriotyByte");
577 		evcnt_attach_dynamic(&ee->pev[i][22], EVCNT_TYPE_MISC,
578 		    NULL, ee->evcntname[i], "TxLateCollision");
579 		evcnt_attach_dynamic(&ee->pev[i][23], EVCNT_TYPE_MISC,
580 		    NULL, ee->evcntname[i], "TxPausePkts");
581 		evcnt_attach_dynamic(&ee->pev[i][24], EVCNT_TYPE_MISC,
582 		    NULL, ee->evcntname[i], "TxBroadcastPkts");
583 		evcnt_attach_dynamic(&ee->pev[i][25], EVCNT_TYPE_MISC,
584 		    NULL, ee->evcntname[i], "TxMulticastPkts");
585 		evcnt_attach_dynamic(&ee->pev[i][26], EVCNT_TYPE_MISC,
586 		    NULL, ee->evcntname[i], "TxUnicastPkts");
587 		evcnt_attach_dynamic(&ee->pev[i][27], EVCNT_TYPE_MISC,
588 		    NULL, ee->evcntname[i], "TxDeferred");
589 		evcnt_attach_dynamic(&ee->pev[i][28], EVCNT_TYPE_MISC,
590 		    NULL, ee->evcntname[i], "TxTotalCollision");
591 		evcnt_attach_dynamic(&ee->pev[i][29], EVCNT_TYPE_MISC,
592 		    NULL, ee->evcntname[i], "TxExcessiveCollision");
593 		evcnt_attach_dynamic(&ee->pev[i][30], EVCNT_TYPE_MISC,
594 		    NULL, ee->evcntname[i], "TxSingleCollision");
595 		evcnt_attach_dynamic(&ee->pev[i][31], EVCNT_TYPE_MISC,
596 		    NULL, ee->evcntname[i], "TxMultipleCollision");
597 		evcnt_attach_dynamic(&ee->pev[i][32], EVCNT_TYPE_MISC,
598 		    NULL, ee->evcntname[i], "TxDropPkts");
599 		evcnt_attach_dynamic(&ee->pev[i][33], EVCNT_TYPE_MISC,
600 		    NULL, ee->evcntname[i], "RxDropPkts");
601 	}
602 #endif
603 	return;
604 
605  fail_5:
606 	for (i = 0; i < KSE_NRXDESC; i++) {
607 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
608 			bus_dmamap_destroy(sc->sc_dmat,
609 			    sc->sc_rxsoft[i].rxs_dmamap);
610 	}
611  fail_4:
612 	for (i = 0; i < KSE_TXQUEUELEN; i++) {
613 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
614 			bus_dmamap_destroy(sc->sc_dmat,
615 			    sc->sc_txsoft[i].txs_dmamap);
616 	}
617 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
618  fail_3:
619 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
620  fail_2:
621 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
622 	    sizeof(struct kse_control_data));
623  fail_1:
624 	bus_dmamem_free(sc->sc_dmat, &seg, nseg);
625  fail_0:
626 	return;
627 }
628 
629 static int
630 kse_ioctl(struct ifnet *ifp, u_long cmd, void *data)
631 {
632 	struct kse_softc *sc = ifp->if_softc;
633 	struct ifreq *ifr = (struct ifreq *)data;
634 	int s, error;
635 
636 	s = splnet();
637 
638 	switch (cmd) {
639 	case SIOCSIFMEDIA:
640 	case SIOCGIFMEDIA:
641 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
642 		break;
643 
644 	default:
645 		if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
646 			break;
647 
648 		error = 0;
649 
650 		if (cmd == SIOCSIFCAP)
651 			error = (*ifp->if_init)(ifp);
652 		if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
653 			;
654 		else if (ifp->if_flags & IFF_RUNNING) {
655 			/*
656 			 * Multicast list has changed; set the hardware filter
657 			 * accordingly.
658 			 */
659 			kse_set_filter(sc);
660 		}
661 		break;
662 	}
663 
664 	kse_start(ifp);
665 
666 	splx(s);
667 	return error;
668 }
669 
670 static int
671 kse_init(struct ifnet *ifp)
672 {
673 	struct kse_softc *sc = ifp->if_softc;
674 	uint32_t paddr;
675 	int i, error = 0;
676 
677 	/* cancel pending I/O */
678 	kse_stop(ifp, 0);
679 
680 	/* reset all registers but PCI configuration */
681 	kse_reset(sc);
682 
683 	/* craft Tx descriptor ring */
684 	memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
685 	for (i = 0, paddr = KSE_CDTXADDR(sc, 1); i < KSE_NTXDESC - 1; i++) {
686 		sc->sc_txdescs[i].t3 = paddr;
687 		paddr += sizeof(struct tdes);
688 	}
689 	sc->sc_txdescs[KSE_NTXDESC - 1].t3 = KSE_CDTXADDR(sc, 0);
690 	KSE_CDTXSYNC(sc, 0, KSE_NTXDESC,
691 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
692 	sc->sc_txfree = KSE_NTXDESC;
693 	sc->sc_txnext = 0;
694 
695 	for (i = 0; i < KSE_TXQUEUELEN; i++)
696 		sc->sc_txsoft[i].txs_mbuf = NULL;
697 	sc->sc_txsfree = KSE_TXQUEUELEN;
698 	sc->sc_txsnext = 0;
699 	sc->sc_txsdirty = 0;
700 
701 	/* craft Rx descriptor ring */
702 	memset(sc->sc_rxdescs, 0, sizeof(sc->sc_rxdescs));
703 	for (i = 0, paddr = KSE_CDRXADDR(sc, 1); i < KSE_NRXDESC - 1; i++) {
704 		sc->sc_rxdescs[i].r3 = paddr;
705 		paddr += sizeof(struct rdes);
706 	}
707 	sc->sc_rxdescs[KSE_NRXDESC - 1].r3 = KSE_CDRXADDR(sc, 0);
708 	for (i = 0; i < KSE_NRXDESC; i++) {
709 		if (sc->sc_rxsoft[i].rxs_mbuf == NULL) {
710 			if ((error = add_rxbuf(sc, i)) != 0) {
711 				printf("%s: unable to allocate or map rx "
712 				    "buffer %d, error = %d\n",
713 				     device_xname(&sc->sc_dev), i, error);
714 				rxdrain(sc);
715 				goto out;
716 			}
717 		}
718 		else
719 			KSE_INIT_RXDESC(sc, i);
720 	}
721 	sc->sc_rxptr = 0;
722 
723 	/* hand Tx/Rx rings to HW */
724 	CSR_WRITE_4(sc, TDLB, KSE_CDTXADDR(sc, 0));
725 	CSR_WRITE_4(sc, RDLB, KSE_CDRXADDR(sc, 0));
726 
727 	sc->sc_txc = TXC_TEN | TXC_EP | TXC_AC | TXC_FCE;
728 	sc->sc_rxc = RXC_REN | RXC_RU | RXC_FCE;
729 	if (ifp->if_flags & IFF_PROMISC)
730 		sc->sc_rxc |= RXC_RA;
731 	if (ifp->if_flags & IFF_BROADCAST)
732 		sc->sc_rxc |= RXC_RB;
733 	sc->sc_t1csum = sc->sc_mcsum = 0;
734 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) {
735 		sc->sc_rxc |= RXC_ICC;
736 		sc->sc_mcsum |= M_CSUM_IPv4;
737 	}
738 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Tx) {
739 		sc->sc_txc |= TXC_ICG;
740 		sc->sc_t1csum |= T1_IPCKG;
741 	}
742 	if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Rx) {
743 		sc->sc_rxc |= RXC_TCC;
744 		sc->sc_mcsum |= M_CSUM_TCPv4;
745 	}
746 	if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Tx) {
747 		sc->sc_txc |= TXC_TCG;
748 		sc->sc_t1csum |= T1_TCPCKG;
749 	}
750 	if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Rx) {
751 		sc->sc_rxc |= RXC_UCC;
752 		sc->sc_mcsum |= M_CSUM_UDPv4;
753 	}
754 	if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Tx) {
755 		sc->sc_txc |= TXC_UCG;
756 		sc->sc_t1csum |= T1_UDPCKG;
757 	}
758 	sc->sc_txc |= (kse_burstsize << TXC_BS_SFT);
759 	sc->sc_rxc |= (kse_burstsize << RXC_BS_SFT);
760 
761 	/* build multicast hash filter if necessary */
762 	kse_set_filter(sc);
763 
764 	/* set current media */
765 	(void)ifmedia_upd(ifp);
766 
767 	/* enable transmitter and receiver */
768 	CSR_WRITE_4(sc, MDTXC, sc->sc_txc);
769 	CSR_WRITE_4(sc, MDRXC, sc->sc_rxc);
770 	CSR_WRITE_4(sc, MDRSC, 1);
771 
772 	/* enable interrupts */
773 	sc->sc_inten = INT_DMTS|INT_DMRS|INT_DMRBUS;
774 	if (sc->sc_chip == 0x8841)
775 		sc->sc_inten |= INT_DMLCS;
776 	CSR_WRITE_4(sc, INTST, ~0);
777 	CSR_WRITE_4(sc, INTEN, sc->sc_inten);
778 
779 	ifp->if_flags |= IFF_RUNNING;
780 	ifp->if_flags &= ~IFF_OACTIVE;
781 
782 	if (sc->sc_chip == 0x8841) {
783 		/* start one second timer */
784 		callout_reset(&sc->sc_callout, hz, phy_tick, sc);
785 	}
786 #ifdef KSE_EVENT_COUNTERS
787 	/* start statistics gather 1 minute timer */
788 	zerostats(sc);
789 	callout_reset(&sc->sc_stat_ch, hz * 60, stat_tick, sc);
790 #endif
791 
792  out:
793 	if (error) {
794 		ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
795 		ifp->if_timer = 0;
796 		printf("%s: interface not running\n", device_xname(&sc->sc_dev));
797 	}
798 	return error;
799 }
800 
801 static void
802 kse_stop(struct ifnet *ifp, int disable)
803 {
804 	struct kse_softc *sc = ifp->if_softc;
805 	struct kse_txsoft *txs;
806 	int i;
807 
808 	if (sc->sc_chip == 0x8841)
809 		callout_stop(&sc->sc_callout);
810 	callout_stop(&sc->sc_stat_ch);
811 
812 	sc->sc_txc &= ~TXC_TEN;
813 	sc->sc_rxc &= ~RXC_REN;
814 	CSR_WRITE_4(sc, MDTXC, sc->sc_txc);
815 	CSR_WRITE_4(sc, MDRXC, sc->sc_rxc);
816 
817 	for (i = 0; i < KSE_TXQUEUELEN; i++) {
818 		txs = &sc->sc_txsoft[i];
819 		if (txs->txs_mbuf != NULL) {
820 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
821 			m_freem(txs->txs_mbuf);
822 			txs->txs_mbuf = NULL;
823 		}
824 	}
825 
826 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
827 	ifp->if_timer = 0;
828 
829 	if (disable)
830 		rxdrain(sc);
831 }
832 
833 static void
834 kse_reset(struct kse_softc *sc)
835 {
836 
837 	CSR_WRITE_2(sc, GRR, 1);
838 	delay(1000); /* PDF does not mention the delay amount */
839 	CSR_WRITE_2(sc, GRR, 0);
840 
841 	CSR_WRITE_2(sc, CIDR, 1);
842 }
843 
844 static void
845 kse_watchdog(struct ifnet *ifp)
846 {
847 	struct kse_softc *sc = ifp->if_softc;
848 
849 	/*
850 	 * Since we're not interrupting every packet, sweep
851 	 * up before we report an error.
852 	 */
853 	txreap(sc);
854 
855 	if (sc->sc_txfree != KSE_NTXDESC) {
856 		printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
857 		    device_xname(&sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
858 		    sc->sc_txnext);
859 		ifp->if_oerrors++;
860 
861 		/* Reset the interface. */
862 		kse_init(ifp);
863 	}
864 	else if (ifp->if_flags & IFF_DEBUG)
865 		printf("%s: recovered from device timeout\n",
866 		    device_xname(&sc->sc_dev));
867 
868 	/* Try to get more packets going. */
869 	kse_start(ifp);
870 }
871 
872 static void
873 kse_start(struct ifnet *ifp)
874 {
875 	struct kse_softc *sc = ifp->if_softc;
876 	struct mbuf *m0, *m;
877 	struct kse_txsoft *txs;
878 	bus_dmamap_t dmamap;
879 	int error, nexttx, lasttx, ofree, seg;
880 	uint32_t tdes0;
881 
882 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
883 		return;
884 
885 	/*
886 	 * Remember the previous number of free descriptors.
887 	 */
888 	ofree = sc->sc_txfree;
889 
890 	/*
891 	 * Loop through the send queue, setting up transmit descriptors
892 	 * until we drain the queue, or use up all available transmit
893 	 * descriptors.
894 	 */
895 	for (;;) {
896 		IFQ_POLL(&ifp->if_snd, m0);
897 		if (m0 == NULL)
898 			break;
899 
900 		if (sc->sc_txsfree < KSE_TXQUEUE_GC) {
901 			txreap(sc);
902 			if (sc->sc_txsfree == 0)
903 				break;
904 		}
905 		txs = &sc->sc_txsoft[sc->sc_txsnext];
906 		dmamap = txs->txs_dmamap;
907 
908 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
909 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
910 		if (error) {
911 			if (error == EFBIG) {
912 				printf("%s: Tx packet consumes too many "
913 				    "DMA segments, dropping...\n",
914 				    device_xname(&sc->sc_dev));
915 				    IFQ_DEQUEUE(&ifp->if_snd, m0);
916 				    m_freem(m0);
917 				    continue;
918 			}
919 			/* Short on resources, just stop for now. */
920 			break;
921 		}
922 
923 		if (dmamap->dm_nsegs > sc->sc_txfree) {
924 			/*
925 			 * Not enough free descriptors to transmit this
926 			 * packet.  We haven't committed anything yet,
927 			 * so just unload the DMA map, put the packet
928 			 * back on the queue, and punt.	 Notify the upper
929 			 * layer that there are not more slots left.
930 			 */
931 			ifp->if_flags |= IFF_OACTIVE;
932 			bus_dmamap_unload(sc->sc_dmat, dmamap);
933 			break;
934 		}
935 
936 		IFQ_DEQUEUE(&ifp->if_snd, m0);
937 
938 		/*
939 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
940 		 */
941 
942 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
943 		    BUS_DMASYNC_PREWRITE);
944 
945 		lasttx = -1; tdes0 = 0;
946 		for (nexttx = sc->sc_txnext, seg = 0;
947 		     seg < dmamap->dm_nsegs;
948 		     seg++, nexttx = KSE_NEXTTX(nexttx)) {
949 			struct tdes *tdes = &sc->sc_txdescs[nexttx];
950 			/*
951 			 * If this is the first descriptor we're
952 			 * enqueueing, don't set the OWN bit just
953 			 * yet.	 That could cause a race condition.
954 			 * We'll do it below.
955 			 */
956 			tdes->t2 = dmamap->dm_segs[seg].ds_addr;
957 			tdes->t1 = sc->sc_t1csum
958 			     | (dmamap->dm_segs[seg].ds_len & T1_TBS_MASK);
959 			tdes->t0 = tdes0;
960 			tdes0 |= T0_OWN;
961 			lasttx = nexttx;
962 		}
963 
964 		/*
965 		 * Outgoing NFS mbuf must be unloaded when Tx completed.
966 		 * Without T1_IC NFS mbuf is left unack'ed for excessive
967 		 * time and NFS stops to proceed until kse_watchdog()
968 		 * calls txreap() to reclaim the unack'ed mbuf.
969 		 * It's painful to traverse every mbuf chain to determine
970 		 * whether someone is waiting for Tx completion.
971 		 */
972 		m = m0;
973 		do {
974 			if ((m->m_flags & M_EXT) && m->m_ext.ext_free) {
975 				sc->sc_txdescs[lasttx].t1 |= T1_IC;
976 				break;
977 			}
978 		} while ((m = m->m_next) != NULL);
979 
980 		/* write last T0_OWN bit of the 1st segment */
981 		sc->sc_txdescs[lasttx].t1 |= T1_LS;
982 		sc->sc_txdescs[sc->sc_txnext].t1 |= T1_FS;
983 		sc->sc_txdescs[sc->sc_txnext].t0 = T0_OWN;
984 		KSE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
985 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
986 
987 		/* tell DMA start transmit */
988 		CSR_WRITE_4(sc, MDTSC, 1);
989 
990 		txs->txs_mbuf = m0;
991 		txs->txs_firstdesc = sc->sc_txnext;
992 		txs->txs_lastdesc = lasttx;
993 		txs->txs_ndesc = dmamap->dm_nsegs;
994 
995 		sc->sc_txfree -= txs->txs_ndesc;
996 		sc->sc_txnext = nexttx;
997 		sc->sc_txsfree--;
998 		sc->sc_txsnext = KSE_NEXTTXS(sc->sc_txsnext);
999 		/*
1000 		 * Pass the packet to any BPF listeners.
1001 		 */
1002 		bpf_mtap(ifp, m0);
1003 	}
1004 
1005 	if (sc->sc_txsfree == 0 || sc->sc_txfree == 0) {
1006 		/* No more slots left; notify upper layer. */
1007 		ifp->if_flags |= IFF_OACTIVE;
1008 	}
1009 	if (sc->sc_txfree != ofree) {
1010 		/* Set a watchdog timer in case the chip flakes out. */
1011 		ifp->if_timer = 5;
1012 	}
1013 }
1014 
1015 static void
1016 kse_set_filter(struct kse_softc *sc)
1017 {
1018 	struct ether_multistep step;
1019 	struct ether_multi *enm;
1020 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1021 	uint32_t h, hashes[2];
1022 
1023 	sc->sc_rxc &= ~(RXC_MHTE | RXC_RM);
1024 	ifp->if_flags &= ~IFF_ALLMULTI;
1025 	if (ifp->if_flags & IFF_PROMISC)
1026 		return;
1027 
1028 	ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
1029 	if (enm == NULL)
1030 		return;
1031 	hashes[0] = hashes[1] = 0;
1032 	do {
1033 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1034 			/*
1035 			 * We must listen to a range of multicast addresses.
1036 			 * For now, just accept all multicasts, rather than
1037 			 * trying to set only those filter bits needed to match
1038 			 * the range.  (At this time, the only use of address
1039 			 * ranges is for IP multicast routing, for which the
1040 			 * range is big enough to require all bits set.)
1041 			 */
1042 			goto allmulti;
1043 		}
1044 		h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
1045 		hashes[h >> 5] |= 1 << (h & 0x1f);
1046 		ETHER_NEXT_MULTI(step, enm);
1047 	} while (enm != NULL);
1048 	sc->sc_rxc |= RXC_MHTE;
1049 	CSR_WRITE_4(sc, MTR0, hashes[0]);
1050 	CSR_WRITE_4(sc, MTR1, hashes[1]);
1051 	return;
1052  allmulti:
1053 	sc->sc_rxc |= RXC_RM;
1054 	ifp->if_flags |= IFF_ALLMULTI;
1055 }
1056 
1057 static int
1058 add_rxbuf(struct kse_softc *sc, int idx)
1059 {
1060 	struct kse_rxsoft *rxs = &sc->sc_rxsoft[idx];
1061 	struct mbuf *m;
1062 	int error;
1063 
1064 	MGETHDR(m, M_DONTWAIT, MT_DATA);
1065 	if (m == NULL)
1066 		return ENOBUFS;
1067 
1068 	MCLGET(m, M_DONTWAIT);
1069 	if ((m->m_flags & M_EXT) == 0) {
1070 		m_freem(m);
1071 		return ENOBUFS;
1072 	}
1073 
1074 	if (rxs->rxs_mbuf != NULL)
1075 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1076 
1077 	rxs->rxs_mbuf = m;
1078 
1079 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
1080 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1081 	if (error) {
1082 		printf("%s: can't load rx DMA map %d, error = %d\n",
1083 		    device_xname(&sc->sc_dev), idx, error);
1084 		panic("kse_add_rxbuf");
1085 	}
1086 
1087 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1088 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1089 
1090 	KSE_INIT_RXDESC(sc, idx);
1091 
1092 	return 0;
1093 }
1094 
1095 static void
1096 rxdrain(struct kse_softc *sc)
1097 {
1098 	struct kse_rxsoft *rxs;
1099 	int i;
1100 
1101 	for (i = 0; i < KSE_NRXDESC; i++) {
1102 		rxs = &sc->sc_rxsoft[i];
1103 		if (rxs->rxs_mbuf != NULL) {
1104 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1105 			m_freem(rxs->rxs_mbuf);
1106 			rxs->rxs_mbuf = NULL;
1107 		}
1108 	}
1109 }
1110 
1111 static int
1112 kse_intr(void *arg)
1113 {
1114 	struct kse_softc *sc = arg;
1115 	uint32_t isr;
1116 
1117 	if ((isr = CSR_READ_4(sc, INTST)) == 0)
1118 		return 0;
1119 
1120 	if (isr & INT_DMRS)
1121 		rxintr(sc);
1122 	if (isr & INT_DMTS)
1123 		txreap(sc);
1124 	if (isr & INT_DMLCS)
1125 		lnkchg(sc);
1126 	if (isr & INT_DMRBUS)
1127 		printf("%s: Rx descriptor full\n", device_xname(&sc->sc_dev));
1128 
1129 	CSR_WRITE_4(sc, INTST, isr);
1130 	return 1;
1131 }
1132 
1133 static void
1134 rxintr(struct kse_softc *sc)
1135 {
1136 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1137 	struct kse_rxsoft *rxs;
1138 	struct mbuf *m;
1139 	uint32_t rxstat;
1140 	int i, len;
1141 
1142 	for (i = sc->sc_rxptr; /*CONSTCOND*/ 1; i = KSE_NEXTRX(i)) {
1143 		rxs = &sc->sc_rxsoft[i];
1144 
1145 		KSE_CDRXSYNC(sc, i,
1146 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1147 
1148 		rxstat = sc->sc_rxdescs[i].r0;
1149 
1150 		if (rxstat & R0_OWN) /* desc is left empty */
1151 			break;
1152 
1153 		/* R0_FS|R0_LS must have been marked for this desc */
1154 
1155 		if (rxstat & R0_ES) {
1156 			ifp->if_ierrors++;
1157 #define PRINTERR(bit, str)						\
1158 			if (rxstat & (bit))				\
1159 				printf("%s: receive error: %s\n",	\
1160 				    device_xname(&sc->sc_dev), str)
1161 			PRINTERR(R0_TL, "frame too long");
1162 			PRINTERR(R0_RF, "runt frame");
1163 			PRINTERR(R0_CE, "bad FCS");
1164 #undef PRINTERR
1165 			KSE_INIT_RXDESC(sc, i);
1166 			continue;
1167 		}
1168 
1169 		/* HW errata; frame might be too small or too large */
1170 
1171 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1172 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1173 
1174 		len = rxstat & R0_FL_MASK;
1175 		len -= ETHER_CRC_LEN;	/* trim CRC off */
1176 		m = rxs->rxs_mbuf;
1177 
1178 		if (add_rxbuf(sc, i) != 0) {
1179 			ifp->if_ierrors++;
1180 			KSE_INIT_RXDESC(sc, i);
1181 			bus_dmamap_sync(sc->sc_dmat,
1182 			    rxs->rxs_dmamap, 0,
1183 			    rxs->rxs_dmamap->dm_mapsize,
1184 			    BUS_DMASYNC_PREREAD);
1185 			continue;
1186 		}
1187 
1188 		ifp->if_ipackets++;
1189 		m->m_pkthdr.rcvif = ifp;
1190 		m->m_pkthdr.len = m->m_len = len;
1191 
1192 		if (sc->sc_mcsum) {
1193 			m->m_pkthdr.csum_flags |= sc->sc_mcsum;
1194 			if (rxstat & R0_IPE)
1195 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1196 			if (rxstat & (R0_TCPE | R0_UDPE))
1197 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1198 		}
1199 		bpf_mtap(ifp, m);
1200 		(*ifp->if_input)(ifp, m);
1201 #ifdef KSEDIAGNOSTIC
1202 		if (kse_monitor_rxintr > 0) {
1203 			printf("m stat %x data %p len %d\n",
1204 			    rxstat, m->m_data, m->m_len);
1205 		}
1206 #endif
1207 	}
1208 	sc->sc_rxptr = i;
1209 }
1210 
1211 static void
1212 txreap(struct kse_softc *sc)
1213 {
1214 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1215 	struct kse_txsoft *txs;
1216 	uint32_t txstat;
1217 	int i;
1218 
1219 	ifp->if_flags &= ~IFF_OACTIVE;
1220 
1221 	for (i = sc->sc_txsdirty; sc->sc_txsfree != KSE_TXQUEUELEN;
1222 	     i = KSE_NEXTTXS(i), sc->sc_txsfree++) {
1223 		txs = &sc->sc_txsoft[i];
1224 
1225 		KSE_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
1226 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1227 
1228 		txstat = sc->sc_txdescs[txs->txs_lastdesc].t0;
1229 
1230 		if (txstat & T0_OWN) /* desc is still in use */
1231 			break;
1232 
1233 		/* there is no way to tell transmission status per frame */
1234 
1235 		ifp->if_opackets++;
1236 
1237 		sc->sc_txfree += txs->txs_ndesc;
1238 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1239 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1240 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1241 		m_freem(txs->txs_mbuf);
1242 		txs->txs_mbuf = NULL;
1243 	}
1244 	sc->sc_txsdirty = i;
1245 	if (sc->sc_txsfree == KSE_TXQUEUELEN)
1246 		ifp->if_timer = 0;
1247 }
1248 
1249 static void
1250 lnkchg(struct kse_softc *sc)
1251 {
1252 	struct ifmediareq ifmr;
1253 
1254 #if 0 /* rambling link status */
1255 	printf("%s: link %s\n", device_xname(&sc->sc_dev),
1256 	    (CSR_READ_2(sc, P1SR) & (1U << 5)) ? "up" : "down");
1257 #endif
1258 	ifmedia_sts(&sc->sc_ethercom.ec_if, &ifmr);
1259 }
1260 
1261 static int
1262 ifmedia_upd(struct ifnet *ifp)
1263 {
1264 	struct kse_softc *sc = ifp->if_softc;
1265 	struct ifmedia *ifm = &sc->sc_media;
1266 	uint16_t ctl;
1267 
1268 	ctl = 0;
1269 	if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
1270 		ctl |= (1U << 13); /* restart AN */
1271 		ctl |= (1U << 7);  /* enable AN */
1272 		ctl |= (1U << 4);  /* advertise flow control pause */
1273 		ctl |= (1U << 3) | (1U << 2) | (1U << 1) | (1U << 0);
1274 	}
1275 	else {
1276 		if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX)
1277 			ctl |= (1U << 6);
1278 		if (ifm->ifm_media & IFM_FDX)
1279 			ctl |= (1U << 5);
1280 	}
1281 	CSR_WRITE_2(sc, P1CR4, ctl);
1282 
1283 	sc->sc_media_active = IFM_NONE;
1284 	sc->sc_media_status = IFM_AVALID;
1285 
1286 	return 0;
1287 }
1288 
1289 static void
1290 ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1291 {
1292 	struct kse_softc *sc = ifp->if_softc;
1293 	struct ifmedia *ifm = &sc->sc_media;
1294 	uint16_t ctl, sts, result;
1295 
1296 	ifmr->ifm_status = IFM_AVALID;
1297 	ifmr->ifm_active = IFM_ETHER;
1298 
1299 	ctl = CSR_READ_2(sc, P1CR4);
1300 	sts = CSR_READ_2(sc, P1SR);
1301 	if ((sts & (1U << 5)) == 0) {
1302 		ifmr->ifm_active |= IFM_NONE;
1303 		goto out; /* link is down */
1304 	}
1305 	ifmr->ifm_status |= IFM_ACTIVE;
1306 	if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
1307 		if ((sts & (1U << 6)) == 0) {
1308 			ifmr->ifm_active |= IFM_NONE;
1309 			goto out; /* negotiation in progress */
1310 		}
1311 		result = ctl & sts & 017;
1312 		if (result & (1U << 3))
1313 			ifmr->ifm_active |= IFM_100_TX|IFM_FDX;
1314 		else if (result & (1U << 2))
1315 			ifmr->ifm_active |= IFM_100_TX;
1316 		else if (result & (1U << 1))
1317 			ifmr->ifm_active |= IFM_10_T|IFM_FDX;
1318 		else if (result & (1U << 0))
1319 			ifmr->ifm_active |= IFM_10_T;
1320 		else
1321 			ifmr->ifm_active |= IFM_NONE;
1322 		if (ctl & (1U << 4))
1323 			ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
1324 		if (sts & (1U << 4))
1325 			ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
1326 	}
1327 	else {
1328 		ifmr->ifm_active |= (sts & (1U << 10)) ? IFM_100_TX : IFM_10_T;
1329 		if (sts & (1U << 9))
1330 			ifmr->ifm_active |= IFM_FDX;
1331 		if (sts & (1U << 12))
1332 			ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
1333 		if (sts & (1U << 11))
1334 			ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
1335 	}
1336 
1337   out:
1338 	sc->sc_media_status = ifmr->ifm_status;
1339 	sc->sc_media_active = ifmr->ifm_active;
1340 }
1341 
1342 static void
1343 phy_tick(void *arg)
1344 {
1345 	struct kse_softc *sc = arg;
1346 	struct ifmediareq ifmr;
1347 	int s;
1348 
1349 	s = splnet();
1350 	ifmedia_sts(&sc->sc_ethercom.ec_if, &ifmr);
1351 	splx(s);
1352 
1353 	callout_reset(&sc->sc_callout, hz, phy_tick, sc);
1354 }
1355 
1356 static int
1357 ifmedia2_upd(struct ifnet *ifp)
1358 {
1359 	struct kse_softc *sc = ifp->if_softc;
1360 
1361 	sc->sc_media_status = IFM_AVALID;
1362 	sc->sc_media_active = IFM_NONE;
1363 	return 0;
1364 }
1365 
1366 static void
1367 ifmedia2_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1368 {
1369 	struct kse_softc *sc = ifp->if_softc;
1370 	int p1sts, p2sts;
1371 
1372 	ifmr->ifm_status = IFM_AVALID;
1373 	ifmr->ifm_active = IFM_ETHER;
1374 	p1sts = CSR_READ_2(sc, P1SR);
1375 	p2sts = CSR_READ_2(sc, P2SR);
1376 	if (((p1sts | p2sts) & (1U << 5)) == 0)
1377 		ifmr->ifm_active |= IFM_NONE;
1378 	else {
1379 		ifmr->ifm_status |= IFM_ACTIVE;
1380 		ifmr->ifm_active |= IFM_100_TX|IFM_FDX;
1381 		ifmr->ifm_active |= IFM_FLOW|IFM_ETH_RXPAUSE|IFM_ETH_TXPAUSE;
1382 	}
1383 	sc->sc_media_status = ifmr->ifm_status;
1384 	sc->sc_media_active = ifmr->ifm_active;
1385 }
1386 
1387 #ifdef KSE_EVENT_COUNTERS
1388 static void
1389 stat_tick(void *arg)
1390 {
1391 	struct kse_softc *sc = arg;
1392 	struct ksext *ee = &sc->sc_ext;
1393 	int nport, p, i, val;
1394 
1395 	nport = (sc->sc_chip == 0x8842) ? 3 : 1;
1396 	for (p = 0; p < nport; p++) {
1397 		for (i = 0; i < 32; i++) {
1398 			val = 0x1c00 | (p * 0x20 + i);
1399 			CSR_WRITE_2(sc, IACR, val);
1400 			do {
1401 				val = CSR_READ_2(sc, IADR5) << 16;
1402 			} while ((val & (1U << 30)) == 0);
1403 			if (val & (1U << 31)) {
1404 				(void)CSR_READ_2(sc, IADR4);
1405 				val = 0x3fffffff; /* has made overflow */
1406 			}
1407 			else {
1408 				val &= 0x3fff0000;		/* 29:16 */
1409 				val |= CSR_READ_2(sc, IADR4);	/* 15:0 */
1410 			}
1411 			ee->pev[p][i].ev_count += val; /* i (0-31) */
1412 		}
1413 		CSR_WRITE_2(sc, IACR, 0x1c00 + 0x100 + p);
1414 		ee->pev[p][32].ev_count = CSR_READ_2(sc, IADR4); /* 32 */
1415 		CSR_WRITE_2(sc, IACR, 0x1c00 + 0x100 + p * 3 + 1);
1416 		ee->pev[p][33].ev_count = CSR_READ_2(sc, IADR4); /* 33 */
1417 	}
1418 	callout_reset(&sc->sc_stat_ch, hz * 60, stat_tick, arg);
1419 }
1420 
1421 static void
1422 zerostats(struct kse_softc *sc)
1423 {
1424 	struct ksext *ee = &sc->sc_ext;
1425 	int nport, p, i, val;
1426 
1427 	/* make sure all the HW counters get zero */
1428 	nport = (sc->sc_chip == 0x8842) ? 3 : 1;
1429 	for (p = 0; p < nport; p++) {
1430 		for (i = 0; i < 31; i++) {
1431 			val = 0x1c00 | (p * 0x20 + i);
1432 			CSR_WRITE_2(sc, IACR, val);
1433 			do {
1434 				val = CSR_READ_2(sc, IADR5) << 16;
1435 			} while ((val & (1U << 30)) == 0);
1436 			(void)CSR_READ_2(sc, IADR4);
1437 			ee->pev[p][i].ev_count = 0;
1438 		}
1439 	}
1440 }
1441 #endif
1442