1 /* $NetBSD: if_kse.c,v 1.30 2016/06/10 13:27:14 ozaki-r Exp $ */
2
3 /*-
4 * Copyright (c) 2006 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Tohru Nishimura.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: if_kse.c,v 1.30 2016/06/10 13:27:14 ozaki-r Exp $");
34
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/callout.h>
39 #include <sys/mbuf.h>
40 #include <sys/malloc.h>
41 #include <sys/kernel.h>
42 #include <sys/ioctl.h>
43 #include <sys/errno.h>
44 #include <sys/device.h>
45 #include <sys/queue.h>
46
47 #include <machine/endian.h>
48 #include <sys/bus.h>
49 #include <sys/intr.h>
50
51 #include <net/if.h>
52 #include <net/if_media.h>
53 #include <net/if_dl.h>
54 #include <net/if_ether.h>
55
56 #include <net/bpf.h>
57
58 #include <dev/pci/pcivar.h>
59 #include <dev/pci/pcireg.h>
60 #include <dev/pci/pcidevs.h>
61
62 #define CSR_READ_4(sc, off) \
63 bus_space_read_4(sc->sc_st, sc->sc_sh, off)
64 #define CSR_WRITE_4(sc, off, val) \
65 bus_space_write_4(sc->sc_st, sc->sc_sh, off, val)
66 #define CSR_READ_2(sc, off) \
67 bus_space_read_2(sc->sc_st, sc->sc_sh, off)
68 #define CSR_WRITE_2(sc, off, val) \
69 bus_space_write_2(sc->sc_st, sc->sc_sh, off, val)
70
71 #define MDTXC 0x000 /* DMA transmit control */
72 #define MDRXC 0x004 /* DMA receive control */
73 #define MDTSC 0x008 /* DMA transmit start */
74 #define MDRSC 0x00c /* DMA receive start */
75 #define TDLB 0x010 /* transmit descriptor list base */
76 #define RDLB 0x014 /* receive descriptor list base */
77 #define MTR0 0x020 /* multicast table 31:0 */
78 #define MTR1 0x024 /* multicast table 63:32 */
79 #define INTEN 0x028 /* interrupt enable */
80 #define INTST 0x02c /* interrupt status */
81 #define MARL 0x200 /* MAC address low */
82 #define MARM 0x202 /* MAC address middle */
83 #define MARH 0x204 /* MAC address high */
84 #define GRR 0x216 /* global reset */
85 #define CIDR 0x400 /* chip ID and enable */
86 #define CGCR 0x40a /* chip global control */
87 #define IACR 0x4a0 /* indirect access control */
88 #define IADR1 0x4a2 /* indirect access data 66:63 */
89 #define IADR2 0x4a4 /* indirect access data 47:32 */
90 #define IADR3 0x4a6 /* indirect access data 63:48 */
91 #define IADR4 0x4a8 /* indirect access data 15:0 */
92 #define IADR5 0x4aa /* indirect access data 31:16 */
93 #define P1CR4 0x512 /* port 1 control 4 */
94 #define P1SR 0x514 /* port 1 status */
95 #define P2CR4 0x532 /* port 2 control 4 */
96 #define P2SR 0x534 /* port 2 status */
97
98 #define TXC_BS_MSK 0x3f000000 /* burst size */
99 #define TXC_BS_SFT (24) /* 1,2,4,8,16,32 or 0 for unlimited */
100 #define TXC_UCG (1U<<18) /* generate UDP checksum */
101 #define TXC_TCG (1U<<17) /* generate TCP checksum */
102 #define TXC_ICG (1U<<16) /* generate IP checksum */
103 #define TXC_FCE (1U<<9) /* enable flowcontrol */
104 #define TXC_EP (1U<<2) /* enable automatic padding */
105 #define TXC_AC (1U<<1) /* add CRC to frame */
106 #define TXC_TEN (1) /* enable DMA to run */
107
108 #define RXC_BS_MSK 0x3f000000 /* burst size */
109 #define RXC_BS_SFT (24) /* 1,2,4,8,16,32 or 0 for unlimited */
110 #define RXC_IHAE (1U<<19) /* IP header alignment enable */
111 #define RXC_UCC (1U<<18) /* run UDP checksum */
112 #define RXC_TCC (1U<<17) /* run TDP checksum */
113 #define RXC_ICC (1U<<16) /* run IP checksum */
114 #define RXC_FCE (1U<<9) /* enable flowcontrol */
115 #define RXC_RB (1U<<6) /* receive broadcast frame */
116 #define RXC_RM (1U<<5) /* receive multicast frame */
117 #define RXC_RU (1U<<4) /* receive unicast frame */
118 #define RXC_RE (1U<<3) /* accept error frame */
119 #define RXC_RA (1U<<2) /* receive all frame */
120 #define RXC_MHTE (1U<<1) /* use multicast hash table */
121 #define RXC_REN (1) /* enable DMA to run */
122
123 #define INT_DMLCS (1U<<31) /* link status change */
124 #define INT_DMTS (1U<<30) /* sending desc. has posted Tx done */
125 #define INT_DMRS (1U<<29) /* frame was received */
126 #define INT_DMRBUS (1U<<27) /* Rx descriptor pool is full */
127
128 #define T0_OWN (1U<<31) /* desc is ready to Tx */
129
130 #define R0_OWN (1U<<31) /* desc is empty */
131 #define R0_FS (1U<<30) /* first segment of frame */
132 #define R0_LS (1U<<29) /* last segment of frame */
133 #define R0_IPE (1U<<28) /* IP checksum error */
134 #define R0_TCPE (1U<<27) /* TCP checksum error */
135 #define R0_UDPE (1U<<26) /* UDP checksum error */
136 #define R0_ES (1U<<25) /* error summary */
137 #define R0_MF (1U<<24) /* multicast frame */
138 #define R0_SPN 0x00300000 /* 21:20 switch port 1/2 */
139 #define R0_ALIGN 0x00300000 /* 21:20 (KSZ8692P) Rx align amount */
140 #define R0_RE (1U<<19) /* MII reported error */
141 #define R0_TL (1U<<18) /* frame too long, beyond 1518 */
142 #define R0_RF (1U<<17) /* damaged runt frame */
143 #define R0_CE (1U<<16) /* CRC error */
144 #define R0_FT (1U<<15) /* frame type */
145 #define R0_FL_MASK 0x7ff /* frame length 10:0 */
146
147 #define T1_IC (1U<<31) /* post interrupt on complete */
148 #define T1_FS (1U<<30) /* first segment of frame */
149 #define T1_LS (1U<<29) /* last segment of frame */
150 #define T1_IPCKG (1U<<28) /* generate IP checksum */
151 #define T1_TCPCKG (1U<<27) /* generate TCP checksum */
152 #define T1_UDPCKG (1U<<26) /* generate UDP checksum */
153 #define T1_TER (1U<<25) /* end of ring */
154 #define T1_SPN 0x00300000 /* 21:20 switch port 1/2 */
155 #define T1_TBS_MASK 0x7ff /* segment size 10:0 */
156
157 #define R1_RER (1U<<25) /* end of ring */
158 #define R1_RBS_MASK 0x7fc /* segment size 10:0 */
159
160 #define KSE_NTXSEGS 16
161 #define KSE_TXQUEUELEN 64
162 #define KSE_TXQUEUELEN_MASK (KSE_TXQUEUELEN - 1)
163 #define KSE_TXQUEUE_GC (KSE_TXQUEUELEN / 4)
164 #define KSE_NTXDESC 256
165 #define KSE_NTXDESC_MASK (KSE_NTXDESC - 1)
166 #define KSE_NEXTTX(x) (((x) + 1) & KSE_NTXDESC_MASK)
167 #define KSE_NEXTTXS(x) (((x) + 1) & KSE_TXQUEUELEN_MASK)
168
169 #define KSE_NRXDESC 64
170 #define KSE_NRXDESC_MASK (KSE_NRXDESC - 1)
171 #define KSE_NEXTRX(x) (((x) + 1) & KSE_NRXDESC_MASK)
172
173 struct tdes {
174 uint32_t t0, t1, t2, t3;
175 };
176
177 struct rdes {
178 uint32_t r0, r1, r2, r3;
179 };
180
181 struct kse_control_data {
182 struct tdes kcd_txdescs[KSE_NTXDESC];
183 struct rdes kcd_rxdescs[KSE_NRXDESC];
184 };
185 #define KSE_CDOFF(x) offsetof(struct kse_control_data, x)
186 #define KSE_CDTXOFF(x) KSE_CDOFF(kcd_txdescs[(x)])
187 #define KSE_CDRXOFF(x) KSE_CDOFF(kcd_rxdescs[(x)])
188
189 struct kse_txsoft {
190 struct mbuf *txs_mbuf; /* head of our mbuf chain */
191 bus_dmamap_t txs_dmamap; /* our DMA map */
192 int txs_firstdesc; /* first descriptor in packet */
193 int txs_lastdesc; /* last descriptor in packet */
194 int txs_ndesc; /* # of descriptors used */
195 };
196
197 struct kse_rxsoft {
198 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
199 bus_dmamap_t rxs_dmamap; /* our DMA map */
200 };
201
202 struct kse_softc {
203 device_t sc_dev; /* generic device information */
204 bus_space_tag_t sc_st; /* bus space tag */
205 bus_space_handle_t sc_sh; /* bus space handle */
206 bus_dma_tag_t sc_dmat; /* bus DMA tag */
207 struct ethercom sc_ethercom; /* Ethernet common data */
208 void *sc_ih; /* interrupt cookie */
209
210 struct ifmedia sc_media; /* ifmedia information */
211 int sc_media_status; /* PHY */
212 int sc_media_active; /* PHY */
213 callout_t sc_callout; /* MII tick callout */
214 callout_t sc_stat_ch; /* statistics counter callout */
215
216 bus_dmamap_t sc_cddmamap; /* control data DMA map */
217 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
218
219 struct kse_control_data *sc_control_data;
220 #define sc_txdescs sc_control_data->kcd_txdescs
221 #define sc_rxdescs sc_control_data->kcd_rxdescs
222
223 struct kse_txsoft sc_txsoft[KSE_TXQUEUELEN];
224 struct kse_rxsoft sc_rxsoft[KSE_NRXDESC];
225 int sc_txfree; /* number of free Tx descriptors */
226 int sc_txnext; /* next ready Tx descriptor */
227 int sc_txsfree; /* number of free Tx jobs */
228 int sc_txsnext; /* next ready Tx job */
229 int sc_txsdirty; /* dirty Tx jobs */
230 int sc_rxptr; /* next ready Rx descriptor/descsoft */
231
232 uint32_t sc_txc, sc_rxc;
233 uint32_t sc_t1csum;
234 int sc_mcsum;
235 uint32_t sc_inten;
236
237 uint32_t sc_chip;
238 uint8_t sc_altmac[16][ETHER_ADDR_LEN];
239 uint16_t sc_vlan[16];
240
241 #ifdef KSE_EVENT_COUNTERS
242 struct ksext {
243 char evcntname[3][8];
244 struct evcnt pev[3][34];
245 } sc_ext; /* switch statistics */
246 #endif
247 };
248
249 #define KSE_CDTXADDR(sc, x) ((sc)->sc_cddma + KSE_CDTXOFF((x)))
250 #define KSE_CDRXADDR(sc, x) ((sc)->sc_cddma + KSE_CDRXOFF((x)))
251
252 #define KSE_CDTXSYNC(sc, x, n, ops) \
253 do { \
254 int __x, __n; \
255 \
256 __x = (x); \
257 __n = (n); \
258 \
259 /* If it will wrap around, sync to the end of the ring. */ \
260 if ((__x + __n) > KSE_NTXDESC) { \
261 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
262 KSE_CDTXOFF(__x), sizeof(struct tdes) * \
263 (KSE_NTXDESC - __x), (ops)); \
264 __n -= (KSE_NTXDESC - __x); \
265 __x = 0; \
266 } \
267 \
268 /* Now sync whatever is left. */ \
269 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
270 KSE_CDTXOFF(__x), sizeof(struct tdes) * __n, (ops)); \
271 } while (/*CONSTCOND*/0)
272
273 #define KSE_CDRXSYNC(sc, x, ops) \
274 do { \
275 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
276 KSE_CDRXOFF((x)), sizeof(struct rdes), (ops)); \
277 } while (/*CONSTCOND*/0)
278
279 #define KSE_INIT_RXDESC(sc, x) \
280 do { \
281 struct kse_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
282 struct rdes *__rxd = &(sc)->sc_rxdescs[(x)]; \
283 struct mbuf *__m = __rxs->rxs_mbuf; \
284 \
285 __m->m_data = __m->m_ext.ext_buf; \
286 __rxd->r2 = __rxs->rxs_dmamap->dm_segs[0].ds_addr; \
287 __rxd->r1 = R1_RBS_MASK /* __m->m_ext.ext_size */; \
288 __rxd->r0 = R0_OWN; \
289 KSE_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
290 } while (/*CONSTCOND*/0)
291
292 u_int kse_burstsize = 8; /* DMA burst length tuning knob */
293
294 #ifdef KSEDIAGNOSTIC
295 u_int kse_monitor_rxintr; /* fragmented UDP csum HW bug hook */
296 #endif
297
298 static int kse_match(device_t, cfdata_t, void *);
299 static void kse_attach(device_t, device_t, void *);
300
301 CFATTACH_DECL_NEW(kse, sizeof(struct kse_softc),
302 kse_match, kse_attach, NULL, NULL);
303
304 static int kse_ioctl(struct ifnet *, u_long, void *);
305 static void kse_start(struct ifnet *);
306 static void kse_watchdog(struct ifnet *);
307 static int kse_init(struct ifnet *);
308 static void kse_stop(struct ifnet *, int);
309 static void kse_reset(struct kse_softc *);
310 static void kse_set_filter(struct kse_softc *);
311 static int add_rxbuf(struct kse_softc *, int);
312 static void rxdrain(struct kse_softc *);
313 static int kse_intr(void *);
314 static void rxintr(struct kse_softc *);
315 static void txreap(struct kse_softc *);
316 static void lnkchg(struct kse_softc *);
317 static int ifmedia_upd(struct ifnet *);
318 static void ifmedia_sts(struct ifnet *, struct ifmediareq *);
319 static void phy_tick(void *);
320 static int ifmedia2_upd(struct ifnet *);
321 static void ifmedia2_sts(struct ifnet *, struct ifmediareq *);
322 #ifdef KSE_EVENT_COUNTERS
323 static void stat_tick(void *);
324 static void zerostats(struct kse_softc *);
325 #endif
326
327 static int
kse_match(device_t parent,cfdata_t match,void * aux)328 kse_match(device_t parent, cfdata_t match, void *aux)
329 {
330 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
331
332 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_MICREL &&
333 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_MICREL_KSZ8842 ||
334 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_MICREL_KSZ8841) &&
335 PCI_CLASS(pa->pa_class) == PCI_CLASS_NETWORK)
336 return 1;
337
338 return 0;
339 }
340
341 static void
kse_attach(device_t parent,device_t self,void * aux)342 kse_attach(device_t parent, device_t self, void *aux)
343 {
344 struct kse_softc *sc = device_private(self);
345 struct pci_attach_args *pa = aux;
346 pci_chipset_tag_t pc = pa->pa_pc;
347 pci_intr_handle_t ih;
348 const char *intrstr;
349 struct ifnet *ifp;
350 struct ifmedia *ifm;
351 uint8_t enaddr[ETHER_ADDR_LEN];
352 bus_dma_segment_t seg;
353 int i, error, nseg;
354 pcireg_t pmode;
355 int pmreg;
356 char intrbuf[PCI_INTRSTR_LEN];
357
358 if (pci_mapreg_map(pa, 0x10,
359 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT,
360 0, &sc->sc_st, &sc->sc_sh, NULL, NULL) != 0) {
361 printf(": unable to map device registers\n");
362 return;
363 }
364
365 sc->sc_dev = self;
366 sc->sc_dmat = pa->pa_dmat;
367
368 /* Make sure bus mastering is enabled. */
369 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
370 pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) |
371 PCI_COMMAND_MASTER_ENABLE);
372
373 /* Get it out of power save mode, if needed. */
374 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
375 pmode = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
376 PCI_PMCSR_STATE_MASK;
377 if (pmode == PCI_PMCSR_STATE_D3) {
378 /*
379 * The card has lost all configuration data in
380 * this state, so punt.
381 */
382 printf("%s: unable to wake from power state D3\n",
383 device_xname(sc->sc_dev));
384 return;
385 }
386 if (pmode != PCI_PMCSR_STATE_D0) {
387 printf("%s: waking up from power date D%d\n",
388 device_xname(sc->sc_dev), pmode);
389 pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
390 PCI_PMCSR_STATE_D0);
391 }
392 }
393
394 sc->sc_chip = PCI_PRODUCT(pa->pa_id);
395 printf(": Micrel KSZ%04x Ethernet (rev. 0x%02x)\n",
396 sc->sc_chip, PCI_REVISION(pa->pa_class));
397
398 /*
399 * Read the Ethernet address from the EEPROM.
400 */
401 i = CSR_READ_2(sc, MARL);
402 enaddr[5] = i; enaddr[4] = i >> 8;
403 i = CSR_READ_2(sc, MARM);
404 enaddr[3] = i; enaddr[2] = i >> 8;
405 i = CSR_READ_2(sc, MARH);
406 enaddr[1] = i; enaddr[0] = i >> 8;
407 printf("%s: Ethernet address: %s\n",
408 device_xname(sc->sc_dev), ether_sprintf(enaddr));
409
410 /*
411 * Enable chip function.
412 */
413 CSR_WRITE_2(sc, CIDR, 1);
414
415 /*
416 * Map and establish our interrupt.
417 */
418 if (pci_intr_map(pa, &ih)) {
419 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
420 return;
421 }
422 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
423 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, kse_intr, sc);
424 if (sc->sc_ih == NULL) {
425 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
426 if (intrstr != NULL)
427 aprint_error(" at %s", intrstr);
428 aprint_error("\n");
429 return;
430 }
431 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
432
433 /*
434 * Allocate the control data structures, and create and load the
435 * DMA map for it.
436 */
437 error = bus_dmamem_alloc(sc->sc_dmat,
438 sizeof(struct kse_control_data), PAGE_SIZE, 0, &seg, 1, &nseg, 0);
439 if (error != 0) {
440 aprint_error_dev(sc->sc_dev, "unable to allocate control data, error = %d\n", error);
441 goto fail_0;
442 }
443 error = bus_dmamem_map(sc->sc_dmat, &seg, nseg,
444 sizeof(struct kse_control_data), (void **)&sc->sc_control_data,
445 BUS_DMA_COHERENT);
446 if (error != 0) {
447 aprint_error_dev(sc->sc_dev, "unable to map control data, error = %d\n", error);
448 goto fail_1;
449 }
450 error = bus_dmamap_create(sc->sc_dmat,
451 sizeof(struct kse_control_data), 1,
452 sizeof(struct kse_control_data), 0, 0, &sc->sc_cddmamap);
453 if (error != 0) {
454 aprint_error_dev(sc->sc_dev, "unable to create control data DMA map, "
455 "error = %d\n", error);
456 goto fail_2;
457 }
458 error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
459 sc->sc_control_data, sizeof(struct kse_control_data), NULL, 0);
460 if (error != 0) {
461 aprint_error_dev(sc->sc_dev, "unable to load control data DMA map, error = %d\n",
462 error);
463 goto fail_3;
464 }
465 for (i = 0; i < KSE_TXQUEUELEN; i++) {
466 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
467 KSE_NTXSEGS, MCLBYTES, 0, 0,
468 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
469 aprint_error_dev(sc->sc_dev, "unable to create tx DMA map %d, "
470 "error = %d\n", i, error);
471 goto fail_4;
472 }
473 }
474 for (i = 0; i < KSE_NRXDESC; i++) {
475 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
476 1, MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
477 aprint_error_dev(sc->sc_dev, "unable to create rx DMA map %d, "
478 "error = %d\n", i, error);
479 goto fail_5;
480 }
481 sc->sc_rxsoft[i].rxs_mbuf = NULL;
482 }
483
484 callout_init(&sc->sc_callout, 0);
485 callout_init(&sc->sc_stat_ch, 0);
486
487 ifm = &sc->sc_media;
488 if (sc->sc_chip == 0x8841) {
489 ifmedia_init(ifm, 0, ifmedia_upd, ifmedia_sts);
490 ifmedia_add(ifm, IFM_ETHER|IFM_10_T, 0, NULL);
491 ifmedia_add(ifm, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
492 ifmedia_add(ifm, IFM_ETHER|IFM_100_TX, 0, NULL);
493 ifmedia_add(ifm, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
494 ifmedia_add(ifm, IFM_ETHER|IFM_AUTO, 0, NULL);
495 ifmedia_set(ifm, IFM_ETHER|IFM_AUTO);
496 }
497 else {
498 ifmedia_init(ifm, 0, ifmedia2_upd, ifmedia2_sts);
499 ifmedia_add(ifm, IFM_ETHER|IFM_AUTO, 0, NULL);
500 ifmedia_set(ifm, IFM_ETHER|IFM_AUTO);
501 }
502
503 printf("%s: 10baseT, 10baseT-FDX, 100baseTX, 100baseTX-FDX, auto\n",
504 device_xname(sc->sc_dev));
505
506 ifp = &sc->sc_ethercom.ec_if;
507 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
508 ifp->if_softc = sc;
509 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
510 ifp->if_ioctl = kse_ioctl;
511 ifp->if_start = kse_start;
512 ifp->if_watchdog = kse_watchdog;
513 ifp->if_init = kse_init;
514 ifp->if_stop = kse_stop;
515 IFQ_SET_READY(&ifp->if_snd);
516
517 /*
518 * KSZ8842 can handle 802.1Q VLAN-sized frames,
519 * can do IPv4, TCPv4, and UDPv4 checksums in hardware.
520 */
521 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
522 ifp->if_capabilities |=
523 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
524 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
525 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
526
527 if_attach(ifp);
528 ether_ifattach(ifp, enaddr);
529
530 #ifdef KSE_EVENT_COUNTERS
531 int p = (sc->sc_chip == 0x8842) ? 3 : 1;
532 for (i = 0; i < p; i++) {
533 struct ksext *ee = &sc->sc_ext;
534 snprintf(ee->evcntname[i], sizeof(ee->evcntname[i]),
535 "%s.%d", device_xname(sc->sc_dev), i+1);
536 evcnt_attach_dynamic(&ee->pev[i][0], EVCNT_TYPE_MISC,
537 NULL, ee->evcntname[i], "RxLoPriotyByte");
538 evcnt_attach_dynamic(&ee->pev[i][1], EVCNT_TYPE_MISC,
539 NULL, ee->evcntname[i], "RxHiPriotyByte");
540 evcnt_attach_dynamic(&ee->pev[i][2], EVCNT_TYPE_MISC,
541 NULL, ee->evcntname[i], "RxUndersizePkt");
542 evcnt_attach_dynamic(&ee->pev[i][3], EVCNT_TYPE_MISC,
543 NULL, ee->evcntname[i], "RxFragments");
544 evcnt_attach_dynamic(&ee->pev[i][4], EVCNT_TYPE_MISC,
545 NULL, ee->evcntname[i], "RxOversize");
546 evcnt_attach_dynamic(&ee->pev[i][5], EVCNT_TYPE_MISC,
547 NULL, ee->evcntname[i], "RxJabbers");
548 evcnt_attach_dynamic(&ee->pev[i][6], EVCNT_TYPE_MISC,
549 NULL, ee->evcntname[i], "RxSymbolError");
550 evcnt_attach_dynamic(&ee->pev[i][7], EVCNT_TYPE_MISC,
551 NULL, ee->evcntname[i], "RxCRCError");
552 evcnt_attach_dynamic(&ee->pev[i][8], EVCNT_TYPE_MISC,
553 NULL, ee->evcntname[i], "RxAlignmentError");
554 evcnt_attach_dynamic(&ee->pev[i][9], EVCNT_TYPE_MISC,
555 NULL, ee->evcntname[i], "RxControl8808Pkts");
556 evcnt_attach_dynamic(&ee->pev[i][10], EVCNT_TYPE_MISC,
557 NULL, ee->evcntname[i], "RxPausePkts");
558 evcnt_attach_dynamic(&ee->pev[i][11], EVCNT_TYPE_MISC,
559 NULL, ee->evcntname[i], "RxBroadcast");
560 evcnt_attach_dynamic(&ee->pev[i][12], EVCNT_TYPE_MISC,
561 NULL, ee->evcntname[i], "RxMulticast");
562 evcnt_attach_dynamic(&ee->pev[i][13], EVCNT_TYPE_MISC,
563 NULL, ee->evcntname[i], "RxUnicast");
564 evcnt_attach_dynamic(&ee->pev[i][14], EVCNT_TYPE_MISC,
565 NULL, ee->evcntname[i], "Rx64Octets");
566 evcnt_attach_dynamic(&ee->pev[i][15], EVCNT_TYPE_MISC,
567 NULL, ee->evcntname[i], "Rx65To127Octets");
568 evcnt_attach_dynamic(&ee->pev[i][16], EVCNT_TYPE_MISC,
569 NULL, ee->evcntname[i], "Rx128To255Octets");
570 evcnt_attach_dynamic(&ee->pev[i][17], EVCNT_TYPE_MISC,
571 NULL, ee->evcntname[i], "Rx255To511Octets");
572 evcnt_attach_dynamic(&ee->pev[i][18], EVCNT_TYPE_MISC,
573 NULL, ee->evcntname[i], "Rx512To1023Octets");
574 evcnt_attach_dynamic(&ee->pev[i][19], EVCNT_TYPE_MISC,
575 NULL, ee->evcntname[i], "Rx1024To1522Octets");
576 evcnt_attach_dynamic(&ee->pev[i][20], EVCNT_TYPE_MISC,
577 NULL, ee->evcntname[i], "TxLoPriotyByte");
578 evcnt_attach_dynamic(&ee->pev[i][21], EVCNT_TYPE_MISC,
579 NULL, ee->evcntname[i], "TxHiPriotyByte");
580 evcnt_attach_dynamic(&ee->pev[i][22], EVCNT_TYPE_MISC,
581 NULL, ee->evcntname[i], "TxLateCollision");
582 evcnt_attach_dynamic(&ee->pev[i][23], EVCNT_TYPE_MISC,
583 NULL, ee->evcntname[i], "TxPausePkts");
584 evcnt_attach_dynamic(&ee->pev[i][24], EVCNT_TYPE_MISC,
585 NULL, ee->evcntname[i], "TxBroadcastPkts");
586 evcnt_attach_dynamic(&ee->pev[i][25], EVCNT_TYPE_MISC,
587 NULL, ee->evcntname[i], "TxMulticastPkts");
588 evcnt_attach_dynamic(&ee->pev[i][26], EVCNT_TYPE_MISC,
589 NULL, ee->evcntname[i], "TxUnicastPkts");
590 evcnt_attach_dynamic(&ee->pev[i][27], EVCNT_TYPE_MISC,
591 NULL, ee->evcntname[i], "TxDeferred");
592 evcnt_attach_dynamic(&ee->pev[i][28], EVCNT_TYPE_MISC,
593 NULL, ee->evcntname[i], "TxTotalCollision");
594 evcnt_attach_dynamic(&ee->pev[i][29], EVCNT_TYPE_MISC,
595 NULL, ee->evcntname[i], "TxExcessiveCollision");
596 evcnt_attach_dynamic(&ee->pev[i][30], EVCNT_TYPE_MISC,
597 NULL, ee->evcntname[i], "TxSingleCollision");
598 evcnt_attach_dynamic(&ee->pev[i][31], EVCNT_TYPE_MISC,
599 NULL, ee->evcntname[i], "TxMultipleCollision");
600 evcnt_attach_dynamic(&ee->pev[i][32], EVCNT_TYPE_MISC,
601 NULL, ee->evcntname[i], "TxDropPkts");
602 evcnt_attach_dynamic(&ee->pev[i][33], EVCNT_TYPE_MISC,
603 NULL, ee->evcntname[i], "RxDropPkts");
604 }
605 #endif
606 return;
607
608 fail_5:
609 for (i = 0; i < KSE_NRXDESC; i++) {
610 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
611 bus_dmamap_destroy(sc->sc_dmat,
612 sc->sc_rxsoft[i].rxs_dmamap);
613 }
614 fail_4:
615 for (i = 0; i < KSE_TXQUEUELEN; i++) {
616 if (sc->sc_txsoft[i].txs_dmamap != NULL)
617 bus_dmamap_destroy(sc->sc_dmat,
618 sc->sc_txsoft[i].txs_dmamap);
619 }
620 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
621 fail_3:
622 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
623 fail_2:
624 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
625 sizeof(struct kse_control_data));
626 fail_1:
627 bus_dmamem_free(sc->sc_dmat, &seg, nseg);
628 fail_0:
629 return;
630 }
631
632 static int
kse_ioctl(struct ifnet * ifp,u_long cmd,void * data)633 kse_ioctl(struct ifnet *ifp, u_long cmd, void *data)
634 {
635 struct kse_softc *sc = ifp->if_softc;
636 struct ifreq *ifr = (struct ifreq *)data;
637 int s, error;
638
639 s = splnet();
640
641 switch (cmd) {
642 case SIOCSIFMEDIA:
643 case SIOCGIFMEDIA:
644 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
645 break;
646
647 default:
648 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
649 break;
650
651 error = 0;
652
653 if (cmd == SIOCSIFCAP)
654 error = (*ifp->if_init)(ifp);
655 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
656 ;
657 else if (ifp->if_flags & IFF_RUNNING) {
658 /*
659 * Multicast list has changed; set the hardware filter
660 * accordingly.
661 */
662 kse_set_filter(sc);
663 }
664 break;
665 }
666
667 kse_start(ifp);
668
669 splx(s);
670 return error;
671 }
672
673 static int
kse_init(struct ifnet * ifp)674 kse_init(struct ifnet *ifp)
675 {
676 struct kse_softc *sc = ifp->if_softc;
677 uint32_t paddr;
678 int i, error = 0;
679
680 /* cancel pending I/O */
681 kse_stop(ifp, 0);
682
683 /* reset all registers but PCI configuration */
684 kse_reset(sc);
685
686 /* craft Tx descriptor ring */
687 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
688 for (i = 0, paddr = KSE_CDTXADDR(sc, 1); i < KSE_NTXDESC - 1; i++) {
689 sc->sc_txdescs[i].t3 = paddr;
690 paddr += sizeof(struct tdes);
691 }
692 sc->sc_txdescs[KSE_NTXDESC - 1].t3 = KSE_CDTXADDR(sc, 0);
693 KSE_CDTXSYNC(sc, 0, KSE_NTXDESC,
694 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
695 sc->sc_txfree = KSE_NTXDESC;
696 sc->sc_txnext = 0;
697
698 for (i = 0; i < KSE_TXQUEUELEN; i++)
699 sc->sc_txsoft[i].txs_mbuf = NULL;
700 sc->sc_txsfree = KSE_TXQUEUELEN;
701 sc->sc_txsnext = 0;
702 sc->sc_txsdirty = 0;
703
704 /* craft Rx descriptor ring */
705 memset(sc->sc_rxdescs, 0, sizeof(sc->sc_rxdescs));
706 for (i = 0, paddr = KSE_CDRXADDR(sc, 1); i < KSE_NRXDESC - 1; i++) {
707 sc->sc_rxdescs[i].r3 = paddr;
708 paddr += sizeof(struct rdes);
709 }
710 sc->sc_rxdescs[KSE_NRXDESC - 1].r3 = KSE_CDRXADDR(sc, 0);
711 for (i = 0; i < KSE_NRXDESC; i++) {
712 if (sc->sc_rxsoft[i].rxs_mbuf == NULL) {
713 if ((error = add_rxbuf(sc, i)) != 0) {
714 printf("%s: unable to allocate or map rx "
715 "buffer %d, error = %d\n",
716 device_xname(sc->sc_dev), i, error);
717 rxdrain(sc);
718 goto out;
719 }
720 }
721 else
722 KSE_INIT_RXDESC(sc, i);
723 }
724 sc->sc_rxptr = 0;
725
726 /* hand Tx/Rx rings to HW */
727 CSR_WRITE_4(sc, TDLB, KSE_CDTXADDR(sc, 0));
728 CSR_WRITE_4(sc, RDLB, KSE_CDRXADDR(sc, 0));
729
730 sc->sc_txc = TXC_TEN | TXC_EP | TXC_AC | TXC_FCE;
731 sc->sc_rxc = RXC_REN | RXC_RU | RXC_FCE;
732 if (ifp->if_flags & IFF_PROMISC)
733 sc->sc_rxc |= RXC_RA;
734 if (ifp->if_flags & IFF_BROADCAST)
735 sc->sc_rxc |= RXC_RB;
736 sc->sc_t1csum = sc->sc_mcsum = 0;
737 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) {
738 sc->sc_rxc |= RXC_ICC;
739 sc->sc_mcsum |= M_CSUM_IPv4;
740 }
741 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Tx) {
742 sc->sc_txc |= TXC_ICG;
743 sc->sc_t1csum |= T1_IPCKG;
744 }
745 if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Rx) {
746 sc->sc_rxc |= RXC_TCC;
747 sc->sc_mcsum |= M_CSUM_TCPv4;
748 }
749 if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Tx) {
750 sc->sc_txc |= TXC_TCG;
751 sc->sc_t1csum |= T1_TCPCKG;
752 }
753 if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Rx) {
754 sc->sc_rxc |= RXC_UCC;
755 sc->sc_mcsum |= M_CSUM_UDPv4;
756 }
757 if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Tx) {
758 sc->sc_txc |= TXC_UCG;
759 sc->sc_t1csum |= T1_UDPCKG;
760 }
761 sc->sc_txc |= (kse_burstsize << TXC_BS_SFT);
762 sc->sc_rxc |= (kse_burstsize << RXC_BS_SFT);
763
764 /* build multicast hash filter if necessary */
765 kse_set_filter(sc);
766
767 /* set current media */
768 (void)ifmedia_upd(ifp);
769
770 /* enable transmitter and receiver */
771 CSR_WRITE_4(sc, MDTXC, sc->sc_txc);
772 CSR_WRITE_4(sc, MDRXC, sc->sc_rxc);
773 CSR_WRITE_4(sc, MDRSC, 1);
774
775 /* enable interrupts */
776 sc->sc_inten = INT_DMTS|INT_DMRS|INT_DMRBUS;
777 if (sc->sc_chip == 0x8841)
778 sc->sc_inten |= INT_DMLCS;
779 CSR_WRITE_4(sc, INTST, ~0);
780 CSR_WRITE_4(sc, INTEN, sc->sc_inten);
781
782 ifp->if_flags |= IFF_RUNNING;
783 ifp->if_flags &= ~IFF_OACTIVE;
784
785 if (sc->sc_chip == 0x8841) {
786 /* start one second timer */
787 callout_reset(&sc->sc_callout, hz, phy_tick, sc);
788 }
789 #ifdef KSE_EVENT_COUNTERS
790 /* start statistics gather 1 minute timer */
791 zerostats(sc);
792 callout_reset(&sc->sc_stat_ch, hz * 60, stat_tick, sc);
793 #endif
794
795 out:
796 if (error) {
797 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
798 ifp->if_timer = 0;
799 printf("%s: interface not running\n", device_xname(sc->sc_dev));
800 }
801 return error;
802 }
803
804 static void
kse_stop(struct ifnet * ifp,int disable)805 kse_stop(struct ifnet *ifp, int disable)
806 {
807 struct kse_softc *sc = ifp->if_softc;
808 struct kse_txsoft *txs;
809 int i;
810
811 if (sc->sc_chip == 0x8841)
812 callout_stop(&sc->sc_callout);
813 callout_stop(&sc->sc_stat_ch);
814
815 sc->sc_txc &= ~TXC_TEN;
816 sc->sc_rxc &= ~RXC_REN;
817 CSR_WRITE_4(sc, MDTXC, sc->sc_txc);
818 CSR_WRITE_4(sc, MDRXC, sc->sc_rxc);
819
820 for (i = 0; i < KSE_TXQUEUELEN; i++) {
821 txs = &sc->sc_txsoft[i];
822 if (txs->txs_mbuf != NULL) {
823 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
824 m_freem(txs->txs_mbuf);
825 txs->txs_mbuf = NULL;
826 }
827 }
828
829 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
830 ifp->if_timer = 0;
831
832 if (disable)
833 rxdrain(sc);
834 }
835
836 static void
kse_reset(struct kse_softc * sc)837 kse_reset(struct kse_softc *sc)
838 {
839
840 CSR_WRITE_2(sc, GRR, 1);
841 delay(1000); /* PDF does not mention the delay amount */
842 CSR_WRITE_2(sc, GRR, 0);
843
844 CSR_WRITE_2(sc, CIDR, 1);
845 }
846
847 static void
kse_watchdog(struct ifnet * ifp)848 kse_watchdog(struct ifnet *ifp)
849 {
850 struct kse_softc *sc = ifp->if_softc;
851
852 /*
853 * Since we're not interrupting every packet, sweep
854 * up before we report an error.
855 */
856 txreap(sc);
857
858 if (sc->sc_txfree != KSE_NTXDESC) {
859 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
860 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
861 sc->sc_txnext);
862 ifp->if_oerrors++;
863
864 /* Reset the interface. */
865 kse_init(ifp);
866 }
867 else if (ifp->if_flags & IFF_DEBUG)
868 printf("%s: recovered from device timeout\n",
869 device_xname(sc->sc_dev));
870
871 /* Try to get more packets going. */
872 kse_start(ifp);
873 }
874
875 static void
kse_start(struct ifnet * ifp)876 kse_start(struct ifnet *ifp)
877 {
878 struct kse_softc *sc = ifp->if_softc;
879 struct mbuf *m0, *m;
880 struct kse_txsoft *txs;
881 bus_dmamap_t dmamap;
882 int error, nexttx, lasttx, ofree, seg;
883 uint32_t tdes0;
884
885 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
886 return;
887
888 /*
889 * Remember the previous number of free descriptors.
890 */
891 ofree = sc->sc_txfree;
892
893 /*
894 * Loop through the send queue, setting up transmit descriptors
895 * until we drain the queue, or use up all available transmit
896 * descriptors.
897 */
898 for (;;) {
899 IFQ_POLL(&ifp->if_snd, m0);
900 if (m0 == NULL)
901 break;
902
903 if (sc->sc_txsfree < KSE_TXQUEUE_GC) {
904 txreap(sc);
905 if (sc->sc_txsfree == 0)
906 break;
907 }
908 txs = &sc->sc_txsoft[sc->sc_txsnext];
909 dmamap = txs->txs_dmamap;
910
911 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
912 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
913 if (error) {
914 if (error == EFBIG) {
915 printf("%s: Tx packet consumes too many "
916 "DMA segments, dropping...\n",
917 device_xname(sc->sc_dev));
918 IFQ_DEQUEUE(&ifp->if_snd, m0);
919 m_freem(m0);
920 continue;
921 }
922 /* Short on resources, just stop for now. */
923 break;
924 }
925
926 if (dmamap->dm_nsegs > sc->sc_txfree) {
927 /*
928 * Not enough free descriptors to transmit this
929 * packet. We haven't committed anything yet,
930 * so just unload the DMA map, put the packet
931 * back on the queue, and punt. Notify the upper
932 * layer that there are not more slots left.
933 */
934 ifp->if_flags |= IFF_OACTIVE;
935 bus_dmamap_unload(sc->sc_dmat, dmamap);
936 break;
937 }
938
939 IFQ_DEQUEUE(&ifp->if_snd, m0);
940
941 /*
942 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
943 */
944
945 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
946 BUS_DMASYNC_PREWRITE);
947
948 lasttx = -1; tdes0 = 0;
949 for (nexttx = sc->sc_txnext, seg = 0;
950 seg < dmamap->dm_nsegs;
951 seg++, nexttx = KSE_NEXTTX(nexttx)) {
952 struct tdes *tdes = &sc->sc_txdescs[nexttx];
953 /*
954 * If this is the first descriptor we're
955 * enqueueing, don't set the OWN bit just
956 * yet. That could cause a race condition.
957 * We'll do it below.
958 */
959 tdes->t2 = dmamap->dm_segs[seg].ds_addr;
960 tdes->t1 = sc->sc_t1csum
961 | (dmamap->dm_segs[seg].ds_len & T1_TBS_MASK);
962 tdes->t0 = tdes0;
963 tdes0 |= T0_OWN;
964 lasttx = nexttx;
965 }
966
967 /*
968 * Outgoing NFS mbuf must be unloaded when Tx completed.
969 * Without T1_IC NFS mbuf is left unack'ed for excessive
970 * time and NFS stops to proceed until kse_watchdog()
971 * calls txreap() to reclaim the unack'ed mbuf.
972 * It's painful to traverse every mbuf chain to determine
973 * whether someone is waiting for Tx completion.
974 */
975 m = m0;
976 do {
977 if ((m->m_flags & M_EXT) && m->m_ext.ext_free) {
978 sc->sc_txdescs[lasttx].t1 |= T1_IC;
979 break;
980 }
981 } while ((m = m->m_next) != NULL);
982
983 /* write last T0_OWN bit of the 1st segment */
984 sc->sc_txdescs[lasttx].t1 |= T1_LS;
985 sc->sc_txdescs[sc->sc_txnext].t1 |= T1_FS;
986 sc->sc_txdescs[sc->sc_txnext].t0 = T0_OWN;
987 KSE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
988 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
989
990 /* tell DMA start transmit */
991 CSR_WRITE_4(sc, MDTSC, 1);
992
993 txs->txs_mbuf = m0;
994 txs->txs_firstdesc = sc->sc_txnext;
995 txs->txs_lastdesc = lasttx;
996 txs->txs_ndesc = dmamap->dm_nsegs;
997
998 sc->sc_txfree -= txs->txs_ndesc;
999 sc->sc_txnext = nexttx;
1000 sc->sc_txsfree--;
1001 sc->sc_txsnext = KSE_NEXTTXS(sc->sc_txsnext);
1002 /*
1003 * Pass the packet to any BPF listeners.
1004 */
1005 bpf_mtap(ifp, m0);
1006 }
1007
1008 if (sc->sc_txsfree == 0 || sc->sc_txfree == 0) {
1009 /* No more slots left; notify upper layer. */
1010 ifp->if_flags |= IFF_OACTIVE;
1011 }
1012 if (sc->sc_txfree != ofree) {
1013 /* Set a watchdog timer in case the chip flakes out. */
1014 ifp->if_timer = 5;
1015 }
1016 }
1017
1018 static void
kse_set_filter(struct kse_softc * sc)1019 kse_set_filter(struct kse_softc *sc)
1020 {
1021 struct ether_multistep step;
1022 struct ether_multi *enm;
1023 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1024 uint32_t h, hashes[2];
1025
1026 sc->sc_rxc &= ~(RXC_MHTE | RXC_RM);
1027 ifp->if_flags &= ~IFF_ALLMULTI;
1028 if (ifp->if_flags & IFF_PROMISC)
1029 return;
1030
1031 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
1032 if (enm == NULL)
1033 return;
1034 hashes[0] = hashes[1] = 0;
1035 do {
1036 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1037 /*
1038 * We must listen to a range of multicast addresses.
1039 * For now, just accept all multicasts, rather than
1040 * trying to set only those filter bits needed to match
1041 * the range. (At this time, the only use of address
1042 * ranges is for IP multicast routing, for which the
1043 * range is big enough to require all bits set.)
1044 */
1045 goto allmulti;
1046 }
1047 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
1048 hashes[h >> 5] |= 1 << (h & 0x1f);
1049 ETHER_NEXT_MULTI(step, enm);
1050 } while (enm != NULL);
1051 sc->sc_rxc |= RXC_MHTE;
1052 CSR_WRITE_4(sc, MTR0, hashes[0]);
1053 CSR_WRITE_4(sc, MTR1, hashes[1]);
1054 return;
1055 allmulti:
1056 sc->sc_rxc |= RXC_RM;
1057 ifp->if_flags |= IFF_ALLMULTI;
1058 }
1059
1060 static int
add_rxbuf(struct kse_softc * sc,int idx)1061 add_rxbuf(struct kse_softc *sc, int idx)
1062 {
1063 struct kse_rxsoft *rxs = &sc->sc_rxsoft[idx];
1064 struct mbuf *m;
1065 int error;
1066
1067 MGETHDR(m, M_DONTWAIT, MT_DATA);
1068 if (m == NULL)
1069 return ENOBUFS;
1070
1071 MCLGET(m, M_DONTWAIT);
1072 if ((m->m_flags & M_EXT) == 0) {
1073 m_freem(m);
1074 return ENOBUFS;
1075 }
1076
1077 if (rxs->rxs_mbuf != NULL)
1078 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1079
1080 rxs->rxs_mbuf = m;
1081
1082 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
1083 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1084 if (error) {
1085 printf("%s: can't load rx DMA map %d, error = %d\n",
1086 device_xname(sc->sc_dev), idx, error);
1087 panic("kse_add_rxbuf");
1088 }
1089
1090 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1091 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1092
1093 KSE_INIT_RXDESC(sc, idx);
1094
1095 return 0;
1096 }
1097
1098 static void
rxdrain(struct kse_softc * sc)1099 rxdrain(struct kse_softc *sc)
1100 {
1101 struct kse_rxsoft *rxs;
1102 int i;
1103
1104 for (i = 0; i < KSE_NRXDESC; i++) {
1105 rxs = &sc->sc_rxsoft[i];
1106 if (rxs->rxs_mbuf != NULL) {
1107 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1108 m_freem(rxs->rxs_mbuf);
1109 rxs->rxs_mbuf = NULL;
1110 }
1111 }
1112 }
1113
1114 static int
kse_intr(void * arg)1115 kse_intr(void *arg)
1116 {
1117 struct kse_softc *sc = arg;
1118 uint32_t isr;
1119
1120 if ((isr = CSR_READ_4(sc, INTST)) == 0)
1121 return 0;
1122
1123 if (isr & INT_DMRS)
1124 rxintr(sc);
1125 if (isr & INT_DMTS)
1126 txreap(sc);
1127 if (isr & INT_DMLCS)
1128 lnkchg(sc);
1129 if (isr & INT_DMRBUS)
1130 printf("%s: Rx descriptor full\n", device_xname(sc->sc_dev));
1131
1132 CSR_WRITE_4(sc, INTST, isr);
1133 return 1;
1134 }
1135
1136 static void
rxintr(struct kse_softc * sc)1137 rxintr(struct kse_softc *sc)
1138 {
1139 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1140 struct kse_rxsoft *rxs;
1141 struct mbuf *m;
1142 uint32_t rxstat;
1143 int i, len;
1144
1145 for (i = sc->sc_rxptr; /*CONSTCOND*/ 1; i = KSE_NEXTRX(i)) {
1146 rxs = &sc->sc_rxsoft[i];
1147
1148 KSE_CDRXSYNC(sc, i,
1149 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1150
1151 rxstat = sc->sc_rxdescs[i].r0;
1152
1153 if (rxstat & R0_OWN) /* desc is left empty */
1154 break;
1155
1156 /* R0_FS|R0_LS must have been marked for this desc */
1157
1158 if (rxstat & R0_ES) {
1159 ifp->if_ierrors++;
1160 #define PRINTERR(bit, str) \
1161 if (rxstat & (bit)) \
1162 printf("%s: receive error: %s\n", \
1163 device_xname(sc->sc_dev), str)
1164 PRINTERR(R0_TL, "frame too long");
1165 PRINTERR(R0_RF, "runt frame");
1166 PRINTERR(R0_CE, "bad FCS");
1167 #undef PRINTERR
1168 KSE_INIT_RXDESC(sc, i);
1169 continue;
1170 }
1171
1172 /* HW errata; frame might be too small or too large */
1173
1174 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1175 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1176
1177 len = rxstat & R0_FL_MASK;
1178 len -= ETHER_CRC_LEN; /* trim CRC off */
1179 m = rxs->rxs_mbuf;
1180
1181 if (add_rxbuf(sc, i) != 0) {
1182 ifp->if_ierrors++;
1183 KSE_INIT_RXDESC(sc, i);
1184 bus_dmamap_sync(sc->sc_dmat,
1185 rxs->rxs_dmamap, 0,
1186 rxs->rxs_dmamap->dm_mapsize,
1187 BUS_DMASYNC_PREREAD);
1188 continue;
1189 }
1190
1191 ifp->if_ipackets++;
1192 m_set_rcvif(m, ifp);
1193 m->m_pkthdr.len = m->m_len = len;
1194
1195 if (sc->sc_mcsum) {
1196 m->m_pkthdr.csum_flags |= sc->sc_mcsum;
1197 if (rxstat & R0_IPE)
1198 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1199 if (rxstat & (R0_TCPE | R0_UDPE))
1200 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1201 }
1202 bpf_mtap(ifp, m);
1203 if_percpuq_enqueue(ifp->if_percpuq, m);
1204 #ifdef KSEDIAGNOSTIC
1205 if (kse_monitor_rxintr > 0) {
1206 printf("m stat %x data %p len %d\n",
1207 rxstat, m->m_data, m->m_len);
1208 }
1209 #endif
1210 }
1211 sc->sc_rxptr = i;
1212 }
1213
1214 static void
txreap(struct kse_softc * sc)1215 txreap(struct kse_softc *sc)
1216 {
1217 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1218 struct kse_txsoft *txs;
1219 uint32_t txstat;
1220 int i;
1221
1222 ifp->if_flags &= ~IFF_OACTIVE;
1223
1224 for (i = sc->sc_txsdirty; sc->sc_txsfree != KSE_TXQUEUELEN;
1225 i = KSE_NEXTTXS(i), sc->sc_txsfree++) {
1226 txs = &sc->sc_txsoft[i];
1227
1228 KSE_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
1229 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1230
1231 txstat = sc->sc_txdescs[txs->txs_lastdesc].t0;
1232
1233 if (txstat & T0_OWN) /* desc is still in use */
1234 break;
1235
1236 /* there is no way to tell transmission status per frame */
1237
1238 ifp->if_opackets++;
1239
1240 sc->sc_txfree += txs->txs_ndesc;
1241 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1242 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1243 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1244 m_freem(txs->txs_mbuf);
1245 txs->txs_mbuf = NULL;
1246 }
1247 sc->sc_txsdirty = i;
1248 if (sc->sc_txsfree == KSE_TXQUEUELEN)
1249 ifp->if_timer = 0;
1250 }
1251
1252 static void
lnkchg(struct kse_softc * sc)1253 lnkchg(struct kse_softc *sc)
1254 {
1255 struct ifmediareq ifmr;
1256
1257 #if 0 /* rambling link status */
1258 printf("%s: link %s\n", device_xname(sc->sc_dev),
1259 (CSR_READ_2(sc, P1SR) & (1U << 5)) ? "up" : "down");
1260 #endif
1261 ifmedia_sts(&sc->sc_ethercom.ec_if, &ifmr);
1262 }
1263
1264 static int
ifmedia_upd(struct ifnet * ifp)1265 ifmedia_upd(struct ifnet *ifp)
1266 {
1267 struct kse_softc *sc = ifp->if_softc;
1268 struct ifmedia *ifm = &sc->sc_media;
1269 uint16_t ctl;
1270
1271 ctl = 0;
1272 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
1273 ctl |= (1U << 13); /* restart AN */
1274 ctl |= (1U << 7); /* enable AN */
1275 ctl |= (1U << 4); /* advertise flow control pause */
1276 ctl |= (1U << 3) | (1U << 2) | (1U << 1) | (1U << 0);
1277 }
1278 else {
1279 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX)
1280 ctl |= (1U << 6);
1281 if (ifm->ifm_media & IFM_FDX)
1282 ctl |= (1U << 5);
1283 }
1284 CSR_WRITE_2(sc, P1CR4, ctl);
1285
1286 sc->sc_media_active = IFM_NONE;
1287 sc->sc_media_status = IFM_AVALID;
1288
1289 return 0;
1290 }
1291
1292 static void
ifmedia_sts(struct ifnet * ifp,struct ifmediareq * ifmr)1293 ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1294 {
1295 struct kse_softc *sc = ifp->if_softc;
1296 struct ifmedia *ifm = &sc->sc_media;
1297 uint16_t ctl, sts, result;
1298
1299 ifmr->ifm_status = IFM_AVALID;
1300 ifmr->ifm_active = IFM_ETHER;
1301
1302 ctl = CSR_READ_2(sc, P1CR4);
1303 sts = CSR_READ_2(sc, P1SR);
1304 if ((sts & (1U << 5)) == 0) {
1305 ifmr->ifm_active |= IFM_NONE;
1306 goto out; /* link is down */
1307 }
1308 ifmr->ifm_status |= IFM_ACTIVE;
1309 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
1310 if ((sts & (1U << 6)) == 0) {
1311 ifmr->ifm_active |= IFM_NONE;
1312 goto out; /* negotiation in progress */
1313 }
1314 result = ctl & sts & 017;
1315 if (result & (1U << 3))
1316 ifmr->ifm_active |= IFM_100_TX|IFM_FDX;
1317 else if (result & (1U << 2))
1318 ifmr->ifm_active |= IFM_100_TX|IFM_HDX;
1319 else if (result & (1U << 1))
1320 ifmr->ifm_active |= IFM_10_T|IFM_FDX;
1321 else if (result & (1U << 0))
1322 ifmr->ifm_active |= IFM_10_T|IFM_HDX;
1323 else
1324 ifmr->ifm_active |= IFM_NONE;
1325 if (ctl & (1U << 4))
1326 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
1327 if (sts & (1U << 4))
1328 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
1329 }
1330 else {
1331 ifmr->ifm_active |= (sts & (1U << 10)) ? IFM_100_TX : IFM_10_T;
1332 if (sts & (1U << 9))
1333 ifmr->ifm_active |= IFM_FDX;
1334 if (sts & (1U << 12))
1335 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
1336 if (sts & (1U << 11))
1337 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
1338 }
1339
1340 out:
1341 sc->sc_media_status = ifmr->ifm_status;
1342 sc->sc_media_active = ifmr->ifm_active;
1343 }
1344
1345 static void
phy_tick(void * arg)1346 phy_tick(void *arg)
1347 {
1348 struct kse_softc *sc = arg;
1349 struct ifmediareq ifmr;
1350 int s;
1351
1352 s = splnet();
1353 ifmedia_sts(&sc->sc_ethercom.ec_if, &ifmr);
1354 splx(s);
1355
1356 callout_reset(&sc->sc_callout, hz, phy_tick, sc);
1357 }
1358
1359 static int
ifmedia2_upd(struct ifnet * ifp)1360 ifmedia2_upd(struct ifnet *ifp)
1361 {
1362 struct kse_softc *sc = ifp->if_softc;
1363
1364 sc->sc_media_status = IFM_AVALID;
1365 sc->sc_media_active = IFM_NONE;
1366 return 0;
1367 }
1368
1369 static void
ifmedia2_sts(struct ifnet * ifp,struct ifmediareq * ifmr)1370 ifmedia2_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1371 {
1372 struct kse_softc *sc = ifp->if_softc;
1373 int p1sts, p2sts;
1374
1375 ifmr->ifm_status = IFM_AVALID;
1376 ifmr->ifm_active = IFM_ETHER;
1377 p1sts = CSR_READ_2(sc, P1SR);
1378 p2sts = CSR_READ_2(sc, P2SR);
1379 if (((p1sts | p2sts) & (1U << 5)) == 0)
1380 ifmr->ifm_active |= IFM_NONE;
1381 else {
1382 ifmr->ifm_status |= IFM_ACTIVE;
1383 ifmr->ifm_active |= IFM_100_TX|IFM_FDX;
1384 ifmr->ifm_active |= IFM_FLOW|IFM_ETH_RXPAUSE|IFM_ETH_TXPAUSE;
1385 }
1386 sc->sc_media_status = ifmr->ifm_status;
1387 sc->sc_media_active = ifmr->ifm_active;
1388 }
1389
1390 #ifdef KSE_EVENT_COUNTERS
1391 static void
stat_tick(void * arg)1392 stat_tick(void *arg)
1393 {
1394 struct kse_softc *sc = arg;
1395 struct ksext *ee = &sc->sc_ext;
1396 int nport, p, i, val;
1397
1398 nport = (sc->sc_chip == 0x8842) ? 3 : 1;
1399 for (p = 0; p < nport; p++) {
1400 for (i = 0; i < 32; i++) {
1401 val = 0x1c00 | (p * 0x20 + i);
1402 CSR_WRITE_2(sc, IACR, val);
1403 do {
1404 val = CSR_READ_2(sc, IADR5) << 16;
1405 } while ((val & (1U << 30)) == 0);
1406 if (val & (1U << 31)) {
1407 (void)CSR_READ_2(sc, IADR4);
1408 val = 0x3fffffff; /* has made overflow */
1409 }
1410 else {
1411 val &= 0x3fff0000; /* 29:16 */
1412 val |= CSR_READ_2(sc, IADR4); /* 15:0 */
1413 }
1414 ee->pev[p][i].ev_count += val; /* i (0-31) */
1415 }
1416 CSR_WRITE_2(sc, IACR, 0x1c00 + 0x100 + p);
1417 ee->pev[p][32].ev_count = CSR_READ_2(sc, IADR4); /* 32 */
1418 CSR_WRITE_2(sc, IACR, 0x1c00 + 0x100 + p * 3 + 1);
1419 ee->pev[p][33].ev_count = CSR_READ_2(sc, IADR4); /* 33 */
1420 }
1421 callout_reset(&sc->sc_stat_ch, hz * 60, stat_tick, arg);
1422 }
1423
1424 static void
zerostats(struct kse_softc * sc)1425 zerostats(struct kse_softc *sc)
1426 {
1427 struct ksext *ee = &sc->sc_ext;
1428 int nport, p, i, val;
1429
1430 /* make sure all the HW counters get zero */
1431 nport = (sc->sc_chip == 0x8842) ? 3 : 1;
1432 for (p = 0; p < nport; p++) {
1433 for (i = 0; i < 31; i++) {
1434 val = 0x1c00 | (p * 0x20 + i);
1435 CSR_WRITE_2(sc, IACR, val);
1436 do {
1437 val = CSR_READ_2(sc, IADR5) << 16;
1438 } while ((val & (1U << 30)) == 0);
1439 (void)CSR_READ_2(sc, IADR4);
1440 ee->pev[p][i].ev_count = 0;
1441 }
1442 }
1443 }
1444 #endif
1445