1 /* $NetBSD: ixp425_if_npe.c,v 1.53 2022/09/27 06:13:42 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 2006 Sam Leffler. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 #if 0
29 __FBSDID("$FreeBSD: src/sys/arm/xscale/ixp425/if_npe.c,v 1.1 2006/11/19 23:55:23 sam Exp $");
30 #endif
31 __KERNEL_RCSID(0, "$NetBSD: ixp425_if_npe.c,v 1.53 2022/09/27 06:13:42 skrll Exp $");
32
33 /*
34 * Intel XScale NPE Ethernet driver.
35 *
36 * This driver handles the two ports present on the IXP425.
37 * Packet processing is done by the Network Processing Engines
38 * (NPE's) that work together with a MAC and PHY. The MAC
39 * is also mapped to the XScale cpu; the PHY is accessed via
40 * the MAC. NPE-XScale communication happens through h/w
41 * queues managed by the Q Manager block.
42 *
43 * The code here replaces the ethAcc, ethMii, and ethDB classes
44 * in the Intel Access Library (IAL) and the OS-specific driver.
45 *
46 * XXX add vlan support
47 * XXX NPE-C port doesn't work yet
48 */
49
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/kernel.h>
53 #include <sys/device.h>
54 #include <sys/callout.h>
55 #include <sys/kmem.h>
56 #include <sys/mbuf.h>
57 #include <sys/socket.h>
58 #include <sys/endian.h>
59 #include <sys/ioctl.h>
60 #include <sys/syslog.h>
61 #include <sys/bus.h>
62 #include <sys/rndsource.h>
63
64 #include <net/if.h>
65 #include <net/if_dl.h>
66 #include <net/if_media.h>
67 #include <net/if_ether.h>
68 #include <net/bpf.h>
69
70 #include <arm/xscale/ixp425reg.h>
71 #include <arm/xscale/ixp425var.h>
72 #include <arm/xscale/ixp425_qmgr.h>
73 #include <arm/xscale/ixp425_npevar.h>
74 #include <arm/xscale/ixp425_if_npereg.h>
75
76 #include <dev/mii/miivar.h>
77
78 #include "locators.h"
79
80 struct npebuf {
81 struct npebuf *ix_next; /* chain to next buffer */
82 void *ix_m; /* backpointer to mbuf */
83 bus_dmamap_t ix_map; /* bus dma map for associated data */
84 struct npehwbuf *ix_hw; /* associated h/w block */
85 uint32_t ix_neaddr; /* phys address of ix_hw */
86 };
87
88 struct npedma {
89 const char* name;
90 int nbuf; /* # npebuf's allocated */
91 bus_dmamap_t m_map;
92 struct npehwbuf *hwbuf; /* NPE h/w buffers */
93 bus_dmamap_t buf_map;
94 bus_addr_t buf_phys; /* phys addr of buffers */
95 struct npebuf *buf; /* s/w buffers (1-1 w/ h/w) */
96 };
97
98 struct npe_softc {
99 device_t sc_dev;
100 struct ethercom sc_ethercom;
101 uint8_t sc_enaddr[ETHER_ADDR_LEN];
102 struct mii_data sc_mii;
103 bus_space_tag_t sc_iot;
104 bus_dma_tag_t sc_dt;
105 bus_space_handle_t sc_ioh; /* MAC register window */
106 bus_space_handle_t sc_miih; /* MII register window */
107 struct ixpnpe_softc *sc_npe; /* NPE support */
108 int sc_unit;
109 int sc_phy;
110 struct callout sc_tick_ch; /* Tick callout */
111 struct npedma txdma;
112 struct npebuf *tx_free; /* list of free tx buffers */
113 struct npedma rxdma;
114 int rx_qid; /* rx qid */
115 int rx_freeqid; /* rx free buffers qid */
116 int tx_qid; /* tx qid */
117 int tx_doneqid; /* tx completed qid */
118 struct npestats *sc_stats;
119 bus_dmamap_t sc_stats_map;
120 bus_addr_t sc_stats_phys; /* phys addr of sc_stats */
121 u_short sc_if_flags; /* keep last if_flags */
122 krndsource_t rnd_source; /* random source */
123 };
124
125 /*
126 * Per-unit static configuration for IXP425. The tx and
127 * rx free Q id's are fixed by the NPE microcode. The
128 * rx Q id's are programmed to be separate to simplify
129 * multi-port processing. It may be better to handle
130 * all traffic through one Q (as done by the Intel drivers).
131 *
132 * Note that the PHY's are accessible only from MAC A
133 * on the IXP425. This and other platform-specific
134 * assumptions probably need to be handled through hints.
135 */
136 static const struct {
137 const char *desc; /* device description */
138 int npeid; /* NPE assignment */
139 int macport; /* Port number of the MAC */
140 uint32_t imageid; /* NPE firmware image id */
141 uint32_t regbase;
142 int regsize;
143 uint32_t miibase;
144 int miisize;
145 uint8_t rx_qid;
146 uint8_t rx_freeqid;
147 uint8_t tx_qid;
148 uint8_t tx_doneqid;
149 } npeconfig[NPE_PORTS_MAX] = {
150 { .desc = "IXP NPE-B",
151 .npeid = NPE_B,
152 .macport = 0x10,
153 .imageid = IXP425_NPE_B_IMAGEID,
154 .regbase = IXP425_MAC_A_HWBASE,
155 .regsize = IXP425_MAC_A_SIZE,
156 .miibase = IXP425_MAC_A_HWBASE,
157 .miisize = IXP425_MAC_A_SIZE,
158 .rx_qid = 4,
159 .rx_freeqid = 27,
160 .tx_qid = 24,
161 .tx_doneqid = 31
162 },
163 { .desc = "IXP NPE-C",
164 .npeid = NPE_C,
165 .macport = 0x20,
166 .imageid = IXP425_NPE_C_IMAGEID,
167 .regbase = IXP425_MAC_B_HWBASE,
168 .regsize = IXP425_MAC_B_SIZE,
169 .miibase = IXP425_MAC_A_HWBASE,
170 .miisize = IXP425_MAC_A_SIZE,
171 .rx_qid = 12,
172 .rx_freeqid = 28,
173 .tx_qid = 25,
174 .tx_doneqid = 31
175 },
176 };
177 static struct npe_softc *npes[NPE_MAX]; /* NB: indexed by npeid */
178
179 static __inline uint32_t
RD4(struct npe_softc * sc,bus_size_t off)180 RD4(struct npe_softc *sc, bus_size_t off)
181 {
182 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
183 }
184
185 static __inline void
WR4(struct npe_softc * sc,bus_size_t off,uint32_t val)186 WR4(struct npe_softc *sc, bus_size_t off, uint32_t val)
187 {
188 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
189 }
190
191 static int npe_activate(struct npe_softc *);
192 #if 0
193 static void npe_deactivate(struct npe_softc *);
194 #endif
195 static void npe_setmac(struct npe_softc *, const u_char *);
196 static void npe_getmac(struct npe_softc *);
197 static void npe_txdone(int, void *);
198 static int npe_rxbuf_init(struct npe_softc *, struct npebuf *,
199 struct mbuf *);
200 static void npe_rxdone(int, void *);
201 static void npeinit_macreg(struct npe_softc *);
202 static int npeinit(struct ifnet *);
203 static void npeinit_resetcb(void *);
204 static void npeinit_locked(void *);
205 static void npestart(struct ifnet *);
206 static void npestop(struct ifnet *, int);
207 static void npewatchdog(struct ifnet *);
208 static int npeioctl(struct ifnet *, u_long, void *);
209
210 static int npe_setrxqosentry(struct npe_softc *, int, int, int);
211 static int npe_updatestats(struct npe_softc *);
212 #if 0
213 static int npe_getstats(struct npe_softc *);
214 static uint32_t npe_getimageid(struct npe_softc *);
215 static int npe_setloopback(struct npe_softc *, int);
216 #endif
217
218 static int npe_miibus_readreg(device_t, int, int, uint16_t *);
219 static int npe_miibus_writereg(device_t, int, int, uint16_t);
220 static void npe_miibus_statchg(struct ifnet *);
221
222 static int npe_debug;
223 #define DPRINTF(sc, fmt, ...) do { \
224 if (npe_debug) printf(fmt, __VA_ARGS__); \
225 } while (0)
226 #define DPRINTFn(n, sc, fmt, ...) do { \
227 if (npe_debug >= n) printf(fmt, __VA_ARGS__); \
228 } while (0)
229
230 #define NPE_TXBUF 128
231 #define NPE_RXBUF 64
232
233 #define MAC2UINT64(addr) (((uint64_t)addr[0] << 40) \
234 + ((uint64_t)addr[1] << 32) \
235 + ((uint64_t)addr[2] << 24) \
236 + ((uint64_t)addr[3] << 16) \
237 + ((uint64_t)addr[4] << 8) \
238 + (uint64_t)addr[5])
239
240 /* NB: all tx done processing goes through one queue */
241 static int tx_doneqid = -1;
242
243 void (*npe_getmac_md)(int, uint8_t *);
244
245 static int npe_match(device_t, cfdata_t, void *);
246 static void npe_attach(device_t, device_t, void *);
247
248 CFATTACH_DECL_NEW(npe, sizeof(struct npe_softc),
249 npe_match, npe_attach, NULL, NULL);
250
251 static int
npe_match(device_t parent,cfdata_t cf,void * arg)252 npe_match(device_t parent, cfdata_t cf, void *arg)
253 {
254 struct ixpnpe_attach_args *na = arg;
255
256 return (na->na_unit == NPE_B || na->na_unit == NPE_C);
257 }
258
259 static void
npe_attach(device_t parent,device_t self,void * arg)260 npe_attach(device_t parent, device_t self, void *arg)
261 {
262 struct npe_softc *sc = device_private(self);
263 struct ixpnpe_softc *isc = device_private(parent);
264 struct ixpnpe_attach_args *na = arg;
265 struct ifnet *ifp;
266 struct mii_data * const mii = &sc->sc_mii;
267
268 aprint_naive("\n");
269 aprint_normal(": Ethernet co-processor\n");
270
271 sc->sc_dev = self;
272 sc->sc_iot = na->na_iot;
273 sc->sc_dt = na->na_dt;
274 sc->sc_npe = na->na_npe;
275 sc->sc_unit = (na->na_unit == NPE_B) ? 0 : 1;
276 sc->sc_phy = na->na_phy;
277
278 memset(&sc->sc_ethercom, 0, sizeof(sc->sc_ethercom));
279 memset(mii, 0, sizeof(*mii));
280
281 callout_init(&sc->sc_tick_ch, 0);
282
283 if (npe_activate(sc)) {
284 aprint_error_dev(sc->sc_dev,
285 "Failed to activate NPE (missing microcode?)\n");
286 return;
287 }
288
289 npe_getmac(sc);
290 npeinit_macreg(sc);
291
292 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
293 ether_sprintf(sc->sc_enaddr));
294
295 ifp = &sc->sc_ethercom.ec_if;
296 mii->mii_ifp = ifp;
297 mii->mii_readreg = npe_miibus_readreg;
298 mii->mii_writereg = npe_miibus_writereg;
299 mii->mii_statchg = npe_miibus_statchg;
300 sc->sc_ethercom.ec_mii = mii;
301
302 ifmedia_init(&mii->mii_media, IFM_IMASK, ether_mediachange,
303 ether_mediastatus);
304
305 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
306 MII_OFFSET_ANY, MIIF_DOPAUSE);
307 if (LIST_FIRST(&mii->mii_phys) == NULL) {
308 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
309 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
310 } else
311 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
312
313 ifp->if_softc = sc;
314 strcpy(ifp->if_xname, device_xname(sc->sc_dev));
315 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
316 ifp->if_start = npestart;
317 ifp->if_ioctl = npeioctl;
318 ifp->if_watchdog = npewatchdog;
319 ifp->if_init = npeinit;
320 ifp->if_stop = npestop;
321 IFQ_SET_READY(&ifp->if_snd);
322
323 /* VLAN capable */
324 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
325
326 if_attach(ifp);
327 if_deferred_start_init(ifp, NULL);
328 ether_ifattach(ifp, sc->sc_enaddr);
329 rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
330 RND_TYPE_NET, RND_FLAG_DEFAULT);
331
332 /* callback function to reset MAC */
333 isc->macresetcbfunc = npeinit_resetcb;
334 isc->macresetcbarg = sc;
335 }
336
337 /*
338 * Compute and install the multicast filter.
339 */
340 static void
npe_setmcast(struct npe_softc * sc)341 npe_setmcast(struct npe_softc *sc)
342 {
343 struct ethercom *ec = &sc->sc_ethercom;
344 struct ifnet *ifp = &ec->ec_if;
345 uint8_t mask[ETHER_ADDR_LEN], addr[ETHER_ADDR_LEN];
346 uint32_t reg;
347 uint32_t msg[2];
348 int i;
349
350 /* Always use filter. Is here a correct position? */
351 reg = RD4(sc, NPE_MAC_RX_CNTRL1);
352 WR4(sc, NPE_MAC_RX_CNTRL1, reg | NPE_RX_CNTRL1_ADDR_FLTR_EN);
353
354 if (ifp->if_flags & IFF_PROMISC) {
355 memset(mask, 0, ETHER_ADDR_LEN);
356 memset(addr, 0, ETHER_ADDR_LEN);
357 } else if (ifp->if_flags & IFF_ALLMULTI) {
358 static const uint8_t allmulti[ETHER_ADDR_LEN] =
359 { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
360 all_multi:
361 memcpy(mask, allmulti, ETHER_ADDR_LEN);
362 memcpy(addr, allmulti, ETHER_ADDR_LEN);
363 } else {
364 uint8_t clr[ETHER_ADDR_LEN], set[ETHER_ADDR_LEN];
365 struct ether_multistep step;
366 struct ether_multi *enm;
367
368 memset(clr, 0, ETHER_ADDR_LEN);
369 memset(set, 0xff, ETHER_ADDR_LEN);
370
371 ETHER_LOCK(ec);
372 ETHER_FIRST_MULTI(step, ec, enm);
373 while (enm != NULL) {
374 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
375 ETHER_ADDR_LEN)) {
376 ifp->if_flags |= IFF_ALLMULTI;
377 ETHER_UNLOCK(ec);
378 goto all_multi;
379 }
380
381 for (i = 0; i < ETHER_ADDR_LEN; i++) {
382 clr[i] |= enm->enm_addrlo[i];
383 set[i] &= enm->enm_addrlo[i];
384 }
385
386 ETHER_NEXT_MULTI(step, enm);
387 }
388 ETHER_UNLOCK(ec);
389
390 for (i = 0; i < ETHER_ADDR_LEN; i++) {
391 mask[i] = set[i] | ~clr[i];
392 addr[i] = set[i];
393 }
394 }
395
396 /*
397 * Write the mask and address registers.
398 */
399 for (i = 0; i < ETHER_ADDR_LEN; i++) {
400 WR4(sc, NPE_MAC_ADDR_MASK(i), mask[i]);
401 WR4(sc, NPE_MAC_ADDR(i), addr[i]);
402 }
403
404 msg[0] = NPE_ADDRESSFILTERCONFIG << NPE_MAC_MSGID_SHL
405 | (npeconfig[sc->sc_unit].macport << NPE_MAC_PORTID_SHL);
406 msg[1] = ((ifp->if_flags & IFF_PROMISC) ? 1 : 0) << 24
407 | ((RD4(sc, NPE_MAC_UNI_ADDR_6) & 0xff) << 16)
408 | (addr[5] << 8) | mask[5];
409 ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg);
410 }
411
412 static int
npe_dma_setup(struct npe_softc * sc,struct npedma * dma,const char * name,int nbuf,int maxseg)413 npe_dma_setup(struct npe_softc *sc, struct npedma *dma,
414 const char *name, int nbuf, int maxseg)
415 {
416 bus_dma_segment_t seg;
417 int rseg, error, i;
418 void *hwbuf;
419 size_t size;
420
421 memset(dma, 0, sizeof(*dma));
422
423 dma->name = name;
424 dma->nbuf = nbuf;
425
426 size = nbuf * sizeof(struct npehwbuf);
427
428 /* XXX COHERENT for now */
429 error = bus_dmamem_alloc(sc->sc_dt, size, sizeof(uint32_t), 0, &seg,
430 1, &rseg, BUS_DMA_NOWAIT);
431 if (error) {
432 aprint_error_dev(sc->sc_dev,
433 "unable to %s for %s %s buffers, error %u\n",
434 "allocate memory", dma->name, "h/w", error);
435 }
436
437 error = bus_dmamem_map(sc->sc_dt, &seg, 1, size, &hwbuf,
438 BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_NOCACHE);
439 if (error) {
440 aprint_error_dev(sc->sc_dev,
441 "unable to %s for %s %s buffers, error %u\n",
442 "map memory", dma->name, "h/w", error);
443 free_dmamem:
444 bus_dmamem_free(sc->sc_dt, &seg, rseg);
445 return error;
446 }
447 dma->hwbuf = (void *)hwbuf;
448
449 error = bus_dmamap_create(sc->sc_dt, size, 1, size, 0,
450 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &dma->buf_map);
451 if (error) {
452 aprint_error_dev(sc->sc_dev,
453 "unable to %s for %s %s buffers, error %u\n",
454 "create map", dma->name, "h/w", error);
455 unmap_dmamem:
456 dma->hwbuf = NULL;
457 bus_dmamem_unmap(sc->sc_dt, hwbuf, size);
458 goto free_dmamem;
459 }
460
461 error = bus_dmamap_load(sc->sc_dt, dma->buf_map, hwbuf, size, NULL,
462 BUS_DMA_NOWAIT);
463 if (error) {
464 aprint_error_dev(sc->sc_dev,
465 "unable to %s for %s %s buffers, error %u\n",
466 "load map", dma->name, "h/w", error);
467 bus_dmamap_destroy(sc->sc_dt, dma->buf_map);
468 goto unmap_dmamem;
469 }
470
471 dma->buf = kmem_zalloc(nbuf * sizeof(struct npebuf), KM_SLEEP);
472 dma->buf_phys = dma->buf_map->dm_segs[0].ds_addr;
473 for (i = 0; i < dma->nbuf; i++) {
474 struct npebuf *npe = &dma->buf[i];
475 struct npehwbuf *hw = &dma->hwbuf[i];
476
477 /* Calculate offset to shared area */
478 npe->ix_neaddr = dma->buf_phys +
479 ((uintptr_t)hw - (uintptr_t)dma->hwbuf);
480 KASSERT((npe->ix_neaddr & 0x1f) == 0);
481 error = bus_dmamap_create(sc->sc_dt, MCLBYTES, maxseg,
482 MCLBYTES, 0, 0, &npe->ix_map);
483 if (error != 0) {
484 aprint_error_dev(sc->sc_dev,
485 "unable to %s for %s buffer %u, error %u\n",
486 "create dmamap", dma->name, i, error);
487 /* XXXSCW: Free up maps... */
488 return error;
489 }
490 npe->ix_hw = hw;
491 }
492 bus_dmamap_sync(sc->sc_dt, dma->buf_map, 0, dma->buf_map->dm_mapsize,
493 BUS_DMASYNC_PREWRITE);
494 return 0;
495 }
496
497 #if 0
498 static void
499 npe_dma_destroy(struct npe_softc *sc, struct npedma *dma)
500 {
501 int i;
502
503 /* XXXSCW: Clean this up */
504
505 if (dma->hwbuf != NULL) {
506 for (i = 0; i < dma->nbuf; i++) {
507 struct npebuf *npe = &dma->buf[i];
508 bus_dmamap_destroy(sc->sc_dt, npe->ix_map);
509 }
510 bus_dmamap_unload(sc->sc_dt, dma->buf_map);
511 bus_dmamem_free(sc->sc_dt, (void *)dma->hwbuf, dma->buf_map);
512 bus_dmamap_destroy(sc->sc_dt, dma->buf_map);
513 }
514 if (dma->buf != NULL)
515 kmem_free(dma->buf, dma->nbuf * sizeof(struct npebuf));
516 memset(dma, 0, sizeof(*dma));
517 }
518 #endif
519
520 static int
npe_activate(struct npe_softc * sc)521 npe_activate(struct npe_softc *sc)
522 {
523 bus_dma_segment_t seg;
524 int unit = sc->sc_unit;
525 int error, i, rseg;
526 void *statbuf;
527
528 /* load NPE firmware and start it running */
529 error = ixpnpe_init(sc->sc_npe, "npe_fw", npeconfig[unit].imageid);
530 if (error != 0)
531 return error;
532
533 if (bus_space_map(sc->sc_iot, npeconfig[unit].regbase,
534 npeconfig[unit].regsize, 0, &sc->sc_ioh)) {
535 aprint_error_dev(sc->sc_dev, "Cannot map registers 0x%x:0x%x\n",
536 npeconfig[unit].regbase, npeconfig[unit].regsize);
537 return ENOMEM;
538 }
539
540 if (npeconfig[unit].miibase != npeconfig[unit].regbase) {
541 /*
542 * The PHY's are only accessible from one MAC (it appears)
543 * so for other MAC's setup an additional mapping for
544 * frobbing the PHY registers.
545 */
546 if (bus_space_map(sc->sc_iot, npeconfig[unit].miibase,
547 npeconfig[unit].miisize, 0, &sc->sc_miih)) {
548 aprint_error_dev(sc->sc_dev,
549 "Cannot map MII registers 0x%x:0x%x\n",
550 npeconfig[unit].miibase, npeconfig[unit].miisize);
551 return ENOMEM;
552 }
553 } else
554 sc->sc_miih = sc->sc_ioh;
555 error = npe_dma_setup(sc, &sc->txdma, "tx", NPE_TXBUF, NPE_MAXSEG);
556 if (error != 0)
557 return error;
558 error = npe_dma_setup(sc, &sc->rxdma, "rx", NPE_RXBUF, 1);
559 if (error != 0)
560 return error;
561
562 /* setup statistics block */
563 error = bus_dmamem_alloc(sc->sc_dt, sizeof(struct npestats),
564 sizeof(uint32_t), 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
565 if (error) {
566 aprint_error_dev(sc->sc_dev,
567 "unable to %s for %s, error %u\n",
568 "allocate memory", "stats block", error);
569 return error;
570 }
571
572 error = bus_dmamem_map(sc->sc_dt, &seg, 1, sizeof(struct npestats),
573 &statbuf, BUS_DMA_NOWAIT);
574 if (error) {
575 aprint_error_dev(sc->sc_dev,
576 "unable to %s for %s, error %u\n",
577 "map memory", "stats block", error);
578 return error;
579 }
580 sc->sc_stats = (void *)statbuf;
581
582 error = bus_dmamap_create(sc->sc_dt, sizeof(struct npestats), 1,
583 sizeof(struct npestats), 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
584 &sc->sc_stats_map);
585 if (error) {
586 aprint_error_dev(sc->sc_dev,
587 "unable to %s for %s, error %u\n",
588 "create map", "stats block", error);
589 return error;
590 }
591
592 error = bus_dmamap_load(sc->sc_dt, sc->sc_stats_map, sc->sc_stats,
593 sizeof(struct npestats), NULL, BUS_DMA_NOWAIT);
594 if (error) {
595 aprint_error_dev(sc->sc_dev,
596 "unable to %s for %s, error %u\n",
597 "load map", "stats block", error);
598 return error;
599 }
600 sc->sc_stats_phys = sc->sc_stats_map->dm_segs[0].ds_addr;
601
602 /* XXX disable half-bridge LEARNING+FILTERING feature */
603
604 /*
605 * Setup h/w rx/tx queues. There are four q's:
606 * rx inbound q of rx'd frames
607 * rx_free pool of ixpbuf's for receiving frames
608 * tx outbound q of frames to send
609 * tx_done q of tx frames that have been processed
610 *
611 * The NPE handles the actual tx/rx process and the q manager
612 * handles the queues. The driver just writes entries to the
613 * q manager mailbox's and gets callbacks when there are rx'd
614 * frames to process or tx'd frames to reap. These callbacks
615 * are controlled by the q configurations; e.g. we get a
616 * callback when tx_done has 2 or more frames to process and
617 * when the rx q has at least one frame. These settings can
618 * changed at the time the q is configured.
619 */
620 sc->rx_qid = npeconfig[unit].rx_qid;
621 ixpqmgr_qconfig(sc->rx_qid, NPE_RXBUF, 0, 1,
622 IX_QMGR_Q_SOURCE_ID_NOT_E, npe_rxdone, sc);
623 sc->rx_freeqid = npeconfig[unit].rx_freeqid;
624 ixpqmgr_qconfig(sc->rx_freeqid, NPE_RXBUF, 0, NPE_RXBUF/2, 0, NULL, sc);
625 /* tell the NPE to direct all traffic to rx_qid */
626 #if 0
627 for (i = 0; i < 8; i++)
628 #else
629 printf("%s: remember to fix rx q setup\n", device_xname(sc->sc_dev));
630 for (i = 0; i < 4; i++)
631 #endif
632 npe_setrxqosentry(sc, i, 0, sc->rx_qid);
633
634 sc->tx_qid = npeconfig[unit].tx_qid;
635 sc->tx_doneqid = npeconfig[unit].tx_doneqid;
636 ixpqmgr_qconfig(sc->tx_qid, NPE_TXBUF, 0, NPE_TXBUF, 0, NULL, sc);
637 if (tx_doneqid == -1) {
638 ixpqmgr_qconfig(sc->tx_doneqid, NPE_TXBUF, 0, 2,
639 IX_QMGR_Q_SOURCE_ID_NOT_E, npe_txdone, sc);
640 tx_doneqid = sc->tx_doneqid;
641 }
642
643 KASSERT(npes[npeconfig[unit].npeid] == NULL);
644 npes[npeconfig[unit].npeid] = sc;
645
646 return 0;
647 }
648
649 #if 0
650 static void
651 npe_deactivate(struct npe_softc *sc);
652 {
653 int unit = sc->sc_unit;
654
655 npes[npeconfig[unit].npeid] = NULL;
656
657 /* XXX disable q's */
658 if (sc->sc_npe != NULL)
659 ixpnpe_stop(sc->sc_npe);
660 if (sc->sc_stats != NULL) {
661 bus_dmamap_unload(sc->sc_stats_tag, sc->sc_stats_map);
662 bus_dmamem_free(sc->sc_stats_tag, sc->sc_stats,
663 sc->sc_stats_map);
664 bus_dmamap_destroy(sc->sc_stats_tag, sc->sc_stats_map);
665 }
666 if (sc->sc_stats_tag != NULL)
667 bus_dma_tag_destroy(sc->sc_stats_tag);
668 npe_dma_destroy(sc, &sc->txdma);
669 npe_dma_destroy(sc, &sc->rxdma);
670 bus_generic_detach(sc->sc_dev);
671 XXX ifmedia_fini somewhere
672 if (sc->sc_mii)
673 device_delete_child(sc->sc_dev, sc->sc_mii);
674 #if 0
675 /* XXX sc_ioh and sc_miih */
676 if (sc->mem_res)
677 bus_release_resource(dev, SYS_RES_IOPORT,
678 rman_get_rid(sc->mem_res), sc->mem_res);
679 sc->mem_res = 0;
680 #endif
681 }
682 #endif
683
684 static void
npe_addstats(struct npe_softc * sc)685 npe_addstats(struct npe_softc *sc)
686 {
687 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
688 struct npestats *ns = sc->sc_stats;
689
690 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
691 if_statadd_ref(nsr, if_oerrors,
692 be32toh(ns->dot3StatsInternalMacTransmitErrors)
693 + be32toh(ns->dot3StatsCarrierSenseErrors)
694 + be32toh(ns->TxVLANIdFilterDiscards)
695 );
696 if_statadd_ref(nsr, if_ierrors,
697 be32toh(ns->dot3StatsFCSErrors)
698 + be32toh(ns->dot3StatsInternalMacReceiveErrors)
699 + be32toh(ns->RxOverrunDiscards)
700 + be32toh(ns->RxUnderflowEntryDiscards)
701 );
702 if_statadd_ref(nsr, if_collisions,
703 be32toh(ns->dot3StatsSingleCollisionFrames)
704 + be32toh(ns->dot3StatsMultipleCollisionFrames)
705 );
706 IF_STAT_PUTREF(ifp);
707 }
708
709 static void
npe_tick(void * xsc)710 npe_tick(void *xsc)
711 {
712 #define ACK (NPE_RESETSTATS << NPE_MAC_MSGID_SHL)
713 struct npe_softc *sc = xsc;
714 uint32_t msg[2];
715
716 /*
717 * NB: to avoid sleeping with the softc lock held we
718 * split the NPE msg processing into two parts. The
719 * request for statistics is sent w/o waiting for a
720 * reply and then on the next tick we retrieve the
721 * results. This works because npe_tick is the only
722 * code that talks via the mailbox's (except at setup).
723 * This likely can be handled better.
724 */
725 if (ixpnpe_recvmsg(sc->sc_npe, msg) == 0 && msg[0] == ACK) {
726 bus_dmamap_sync(sc->sc_dt, sc->sc_stats_map, 0,
727 sizeof(struct npestats), BUS_DMASYNC_POSTREAD);
728 npe_addstats(sc);
729 }
730 npe_updatestats(sc);
731 mii_tick(&sc->sc_mii);
732
733 /* Schedule next poll */
734 callout_reset(&sc->sc_tick_ch, hz, npe_tick, sc);
735 #undef ACK
736 }
737
738 static void
npe_setmac(struct npe_softc * sc,const u_char * eaddr)739 npe_setmac(struct npe_softc *sc, const u_char *eaddr)
740 {
741
742 WR4(sc, NPE_MAC_UNI_ADDR_1, eaddr[0]);
743 WR4(sc, NPE_MAC_UNI_ADDR_2, eaddr[1]);
744 WR4(sc, NPE_MAC_UNI_ADDR_3, eaddr[2]);
745 WR4(sc, NPE_MAC_UNI_ADDR_4, eaddr[3]);
746 WR4(sc, NPE_MAC_UNI_ADDR_5, eaddr[4]);
747 WR4(sc, NPE_MAC_UNI_ADDR_6, eaddr[5]);
748 }
749
750 static void
npe_getmac(struct npe_softc * sc)751 npe_getmac(struct npe_softc *sc)
752 {
753 uint8_t *eaddr = sc->sc_enaddr;
754
755 if (npe_getmac_md != NULL) {
756 (*npe_getmac_md)(device_unit(sc->sc_dev), eaddr);
757 } else {
758 /*
759 * Some system's unicast address appears to be loaded from
760 * EEPROM on reset
761 */
762 eaddr[0] = RD4(sc, NPE_MAC_UNI_ADDR_1) & 0xff;
763 eaddr[1] = RD4(sc, NPE_MAC_UNI_ADDR_2) & 0xff;
764 eaddr[2] = RD4(sc, NPE_MAC_UNI_ADDR_3) & 0xff;
765 eaddr[3] = RD4(sc, NPE_MAC_UNI_ADDR_4) & 0xff;
766 eaddr[4] = RD4(sc, NPE_MAC_UNI_ADDR_5) & 0xff;
767 eaddr[5] = RD4(sc, NPE_MAC_UNI_ADDR_6) & 0xff;
768 }
769 }
770
771 struct txdone {
772 struct npebuf *head;
773 struct npebuf **tail;
774 int count;
775 };
776
777 static __inline void
npe_txdone_finish(struct npe_softc * sc,const struct txdone * td)778 npe_txdone_finish(struct npe_softc *sc, const struct txdone *td)
779 {
780 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
781
782 *td->tail = sc->tx_free;
783 sc->tx_free = td->head;
784 /*
785 * We're no longer busy, so clear the busy flag and call the
786 * start routine to xmit more packets.
787 */
788 if_statadd(ifp, if_opackets, td->count);
789 ifp->if_timer = 0;
790 if_schedule_deferred_start(ifp);
791 }
792
793 /*
794 * Q manager callback on tx done queue. Reap mbufs
795 * and return tx buffers to the free list. Finally
796 * restart output. Note the microcode has only one
797 * txdone q wired into it so we must use the NPE ID
798 * returned with each npehwbuf to decide where to
799 * send buffers.
800 */
801 static void
npe_txdone(int qid,void * arg)802 npe_txdone(int qid, void *arg)
803 {
804 #define P2V(a, dma) \
805 &(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)]
806 struct npe_softc *sc;
807 struct npebuf *npe;
808 struct txdone *td, q[NPE_MAX];
809 uint32_t entry;
810
811 /* XXX no NPE-A support */
812 q[NPE_B].tail = &q[NPE_B].head; q[NPE_B].count = 0;
813 q[NPE_C].tail = &q[NPE_C].head; q[NPE_C].count = 0;
814 /* XXX max # at a time? */
815 while (ixpqmgr_qread(qid, &entry) == 0) {
816 sc = npes[NPE_QM_Q_NPE(entry)];
817 DPRINTF(sc, "%s: entry 0x%x NPE %u port %u\n",
818 __func__, entry, NPE_QM_Q_NPE(entry), NPE_QM_Q_PORT(entry));
819 rnd_add_uint32(&sc->rnd_source, entry);
820
821 npe = P2V(NPE_QM_Q_ADDR(entry), &sc->txdma);
822 m_freem(npe->ix_m);
823 npe->ix_m = NULL;
824
825 td = &q[NPE_QM_Q_NPE(entry)];
826 *td->tail = npe;
827 td->tail = &npe->ix_next;
828 td->count++;
829 }
830
831 if (q[NPE_B].count)
832 npe_txdone_finish(npes[NPE_B], &q[NPE_B]);
833 if (q[NPE_C].count)
834 npe_txdone_finish(npes[NPE_C], &q[NPE_C]);
835 #undef P2V
836 }
837
838 static __inline struct mbuf *
npe_getcl(void)839 npe_getcl(void)
840 {
841 struct mbuf *m;
842
843 MGETHDR(m, M_DONTWAIT, MT_DATA);
844 if (m != NULL) {
845 MCLGET(m, M_DONTWAIT);
846 if ((m->m_flags & M_EXT) == 0) {
847 m_freem(m);
848 m = NULL;
849 }
850 }
851 return m;
852 }
853
854 static int
npe_rxbuf_init(struct npe_softc * sc,struct npebuf * npe,struct mbuf * m)855 npe_rxbuf_init(struct npe_softc *sc, struct npebuf *npe, struct mbuf *m)
856 {
857 struct npehwbuf *hw;
858 int error;
859
860 if (m == NULL) {
861 m = npe_getcl();
862 if (m == NULL)
863 return ENOBUFS;
864 }
865 KASSERT(m->m_ext.ext_size >= (NPE_FRAME_SIZE_DEFAULT + ETHER_ALIGN));
866 m->m_pkthdr.len = m->m_len = NPE_FRAME_SIZE_DEFAULT;
867 /* backload payload and align ip hdr */
868 m->m_data = m->m_ext.ext_buf + (m->m_ext.ext_size
869 - (NPE_FRAME_SIZE_DEFAULT + ETHER_ALIGN));
870 error = bus_dmamap_load_mbuf(sc->sc_dt, npe->ix_map, m,
871 BUS_DMA_READ | BUS_DMA_NOWAIT);
872 if (error != 0) {
873 m_freem(m);
874 return error;
875 }
876 hw = npe->ix_hw;
877 hw->ix_ne[0].data = htobe32(npe->ix_map->dm_segs[0].ds_addr);
878 /* NB: NPE requires length be a multiple of 64 */
879 /* NB: buffer length is shifted in word */
880 hw->ix_ne[0].len = htobe32(npe->ix_map->dm_segs[0].ds_len << 16);
881 hw->ix_ne[0].next = 0;
882 npe->ix_m = m;
883 /* Flush the memory in the mbuf */
884 bus_dmamap_sync(sc->sc_dt, npe->ix_map, 0, npe->ix_map->dm_mapsize,
885 BUS_DMASYNC_PREREAD);
886 return 0;
887 }
888
889 /*
890 * RX q processing for a specific NPE. Claim entries
891 * from the hardware queue and pass the frames up the
892 * stack. Pass the rx buffers to the free list.
893 */
894 static void
npe_rxdone(int qid,void * arg)895 npe_rxdone(int qid, void *arg)
896 {
897 #define P2V(a, dma) \
898 &(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)]
899 struct npe_softc *sc = arg;
900 struct npedma *dma = &sc->rxdma;
901 uint32_t entry;
902
903 while (ixpqmgr_qread(qid, &entry) == 0) {
904 struct npebuf *npe = P2V(NPE_QM_Q_ADDR(entry), dma);
905 struct mbuf *m;
906
907 DPRINTF(sc, "%s: entry 0x%x neaddr 0x%x ne_len 0x%x\n",
908 __func__, entry, npe->ix_neaddr, npe->ix_hw->ix_ne[0].len);
909 rnd_add_uint32(&sc->rnd_source, entry);
910 /*
911 * Allocate a new mbuf to replenish the rx buffer.
912 * If doing so fails we drop the rx'd frame so we
913 * can reuse the previous mbuf. When we're able to
914 * allocate a new mbuf dispatch the mbuf w/ rx'd
915 * data up the stack and replace it with the newly
916 * allocated one.
917 */
918 m = npe_getcl();
919 if (m != NULL) {
920 struct mbuf *mrx = npe->ix_m;
921 struct npehwbuf *hw = npe->ix_hw;
922 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
923
924 /* Flush mbuf memory for rx'd data */
925 bus_dmamap_sync(sc->sc_dt, npe->ix_map, 0,
926 npe->ix_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
927
928 /* XXX flush hw buffer; works now 'cuz coherent */
929 /* set m_len etc. per rx frame size */
930 mrx->m_len = be32toh(hw->ix_ne[0].len) & 0xffff;
931 mrx->m_pkthdr.len = mrx->m_len;
932 m_set_rcvif(mrx, ifp);
933 /* Don't add M_HASFCS. See below */
934
935 #if 1
936 if (mrx->m_pkthdr.len < sizeof(struct ether_header)) {
937 log(LOG_INFO, "%s: too short frame (len=%d)\n",
938 device_xname(sc->sc_dev),
939 mrx->m_pkthdr.len);
940 /* Back out "newly allocated" mbuf. */
941 m_freem(m);
942 if_statinc(ifp, if_ierrors);
943 goto fail;
944 }
945 if ((ifp->if_flags & IFF_PROMISC) == 0) {
946 struct ether_header *eh;
947
948 /*
949 * Workaround for "Non-Intel XScale Technology
950 * Eratta" No. 29. AA:BB:CC:DD:EE:xF's packet
951 * matches the filter (both unicast and
952 * multicast).
953 */
954 eh = mtod(mrx, struct ether_header *);
955 if (ETHER_IS_MULTICAST(eh->ether_dhost) == 0) {
956 /* Unicast */
957
958 if (sc->sc_enaddr[5] != eh->ether_dhost[5]) {
959 /* Discard it */
960 #if 0
961 printf("discard it\n");
962 #endif
963 /*
964 * Back out "newly allocated"
965 * mbuf.
966 */
967 m_freem(m);
968 goto fail;
969 }
970 } else if (memcmp(eh->ether_dhost,
971 etherbroadcastaddr, 6) == 0) {
972 /* Always accept broadcast packet*/
973 } else {
974 struct ethercom *ec = &sc->sc_ethercom;
975 struct ether_multi *enm;
976 struct ether_multistep step;
977 int match = 0;
978
979 /* Multicast */
980
981 ETHER_LOCK(ec);
982 ETHER_FIRST_MULTI(step, ec, enm);
983 while (enm != NULL) {
984 uint64_t lowint, highint, dest;
985
986 lowint = MAC2UINT64(enm->enm_addrlo);
987 highint = MAC2UINT64(enm->enm_addrhi);
988 dest = MAC2UINT64(eh->ether_dhost);
989 #if 0
990 printf("%llx\n", lowint);
991 printf("%llx\n", dest);
992 printf("%llx\n", highint);
993 #endif
994 if ((lowint <= dest) && (dest <= highint)) {
995 match = 1;
996 break;
997 }
998 ETHER_NEXT_MULTI(step, enm);
999 }
1000 ETHER_UNLOCK(ec);
1001
1002 if (match == 0) {
1003 /* Discard it */
1004 #if 0
1005 printf("discard it(M)\n");
1006 #endif
1007 /*
1008 * Back out "newly allocated"
1009 * mbuf.
1010 */
1011 m_freem(m);
1012 goto fail;
1013 }
1014 }
1015 }
1016 if (mrx->m_pkthdr.len > NPE_FRAME_SIZE_DEFAULT) {
1017 log(LOG_INFO, "%s: oversized frame (len=%d)\n",
1018 device_xname(sc->sc_dev), mrx->m_pkthdr.len);
1019 /* Back out "newly allocated" mbuf. */
1020 m_freem(m);
1021 if_statinc(ifp, if_ierrors);
1022 goto fail;
1023 }
1024 #endif
1025
1026 /*
1027 * Trim FCS!
1028 * NPE always adds the FCS by this driver's setting,
1029 * so we always trim it here and not add M_HASFCS.
1030 */
1031 m_adj(mrx, -ETHER_CRC_LEN);
1032
1033 /*
1034 * Tap off here if there is a bpf listener.
1035 */
1036
1037 if_percpuq_enqueue(ifp->if_percpuq, mrx);
1038 } else {
1039 fail:
1040 /* discard frame and re-use mbuf */
1041 m = npe->ix_m;
1042 }
1043 if (npe_rxbuf_init(sc, npe, m) == 0) {
1044 /* return npe buf to rx free list */
1045 ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr);
1046 } else {
1047 /* XXX should not happen */
1048 }
1049 }
1050 #undef P2V
1051 }
1052
1053 static void
npe_startxmit(struct npe_softc * sc)1054 npe_startxmit(struct npe_softc *sc)
1055 {
1056 struct npedma *dma = &sc->txdma;
1057 int i;
1058
1059 sc->tx_free = NULL;
1060 for (i = 0; i < dma->nbuf; i++) {
1061 struct npebuf *npe = &dma->buf[i];
1062 if (npe->ix_m != NULL) {
1063 /* NB: should not happen */
1064 printf("%s: %s: free mbuf at entry %u\n",
1065 device_xname(sc->sc_dev), __func__, i);
1066 m_freem(npe->ix_m);
1067 }
1068 npe->ix_m = NULL;
1069 npe->ix_next = sc->tx_free;
1070 sc->tx_free = npe;
1071 }
1072 }
1073
1074 static void
npe_startrecv(struct npe_softc * sc)1075 npe_startrecv(struct npe_softc *sc)
1076 {
1077 struct npedma *dma = &sc->rxdma;
1078 struct npebuf *npe;
1079 int i;
1080
1081 for (i = 0; i < dma->nbuf; i++) {
1082 npe = &dma->buf[i];
1083 npe_rxbuf_init(sc, npe, npe->ix_m);
1084 /* Set npe buf on rx free list */
1085 ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr);
1086 }
1087 }
1088
1089 static void
npeinit_macreg(struct npe_softc * sc)1090 npeinit_macreg(struct npe_softc *sc)
1091 {
1092
1093 /*
1094 * Reset MAC core.
1095 */
1096 WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET);
1097 DELAY(NPE_MAC_RESET_DELAY);
1098 /* Configure MAC to generate MDC clock */
1099 WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN);
1100
1101 /* Disable transmitter and receiver in the MAC */
1102 WR4(sc, NPE_MAC_RX_CNTRL1,
1103 RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN);
1104 WR4(sc, NPE_MAC_TX_CNTRL1,
1105 RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN);
1106
1107 /*
1108 * Set the MAC core registers.
1109 */
1110 WR4(sc, NPE_MAC_INT_CLK_THRESH, 0x1); /* clock ratio: for ipx4xx */
1111 WR4(sc, NPE_MAC_TX_CNTRL2, 0xf); /* max retries */
1112 WR4(sc, NPE_MAC_RANDOM_SEED, 0x8); /* LFSR back-off seed */
1113 /* Thresholds determined by NPE firmware FS */
1114 WR4(sc, NPE_MAC_THRESH_P_EMPTY, 0x12);
1115 WR4(sc, NPE_MAC_THRESH_P_FULL, 0x30);
1116 WR4(sc, NPE_MAC_BUF_SIZE_TX, NPE_MAC_BUF_SIZE_TX_DEFAULT);
1117 /* tx fifo threshold (bytes) */
1118 WR4(sc, NPE_MAC_TX_DEFER, 0x15); /* for single deferral */
1119 WR4(sc, NPE_MAC_RX_DEFER, 0x16); /* deferral on inter-frame gap*/
1120 WR4(sc, NPE_MAC_TX_TWO_DEFER_1, 0x8); /* for 2-part deferral */
1121 WR4(sc, NPE_MAC_TX_TWO_DEFER_2, 0x7); /* for 2-part deferral */
1122 WR4(sc, NPE_MAC_SLOT_TIME, NPE_MAC_SLOT_TIME_MII_DEFAULT);
1123 /* assumes MII mode */
1124 WR4(sc, NPE_MAC_TX_CNTRL1,
1125 NPE_TX_CNTRL1_RETRY /* retry failed xmits */
1126 | NPE_TX_CNTRL1_FCS_EN /* append FCS */
1127 | NPE_TX_CNTRL1_2DEFER /* 2-part deferal */
1128 | NPE_TX_CNTRL1_PAD_EN); /* pad runt frames */
1129 /* XXX pad strip? */
1130 WR4(sc, NPE_MAC_RX_CNTRL1,
1131 NPE_RX_CNTRL1_CRC_EN /* include CRC/FCS */
1132 | NPE_RX_CNTRL1_PAUSE_EN); /* ena pause frame handling */
1133 WR4(sc, NPE_MAC_RX_CNTRL2, 0);
1134 }
1135
1136 static void
npeinit_resetcb(void * xsc)1137 npeinit_resetcb(void *xsc)
1138 {
1139 struct npe_softc *sc = xsc;
1140 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1141 uint32_t msg[2];
1142
1143 if_statinc(ifp, if_oerrors);
1144 npeinit_locked(sc);
1145
1146 msg[0] = NPE_NOTIFYMACRECOVERYDONE << NPE_MAC_MSGID_SHL
1147 | (npeconfig[sc->sc_unit].macport << NPE_MAC_PORTID_SHL);
1148 msg[1] = 0;
1149 ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg);
1150 }
1151
1152 /*
1153 * Reset and initialize the chip
1154 */
1155 static void
npeinit_locked(void * xsc)1156 npeinit_locked(void *xsc)
1157 {
1158 struct npe_softc *sc = xsc;
1159 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1160
1161 /* Cancel any pending I/O. */
1162 npestop(ifp, 0);
1163
1164 /* Reset the chip to a known state. */
1165 npeinit_macreg(sc);
1166 npe_setmac(sc, CLLADDR(ifp->if_sadl));
1167 ether_mediachange(ifp);
1168 npe_setmcast(sc);
1169
1170 npe_startxmit(sc);
1171 npe_startrecv(sc);
1172
1173 ifp->if_flags |= IFF_RUNNING;
1174 ifp->if_timer = 0; /* just in case */
1175
1176 /* Enable transmitter and receiver in the MAC */
1177 WR4(sc, NPE_MAC_RX_CNTRL1,
1178 RD4(sc, NPE_MAC_RX_CNTRL1) | NPE_RX_CNTRL1_RX_EN);
1179 WR4(sc, NPE_MAC_TX_CNTRL1,
1180 RD4(sc, NPE_MAC_TX_CNTRL1) | NPE_TX_CNTRL1_TX_EN);
1181
1182 callout_reset(&sc->sc_tick_ch, hz, npe_tick, sc);
1183 }
1184
1185 static int
npeinit(struct ifnet * ifp)1186 npeinit(struct ifnet *ifp)
1187 {
1188 struct npe_softc *sc = ifp->if_softc;
1189 int s;
1190
1191 s = splnet();
1192 npeinit_locked(sc);
1193 splx(s);
1194
1195 return 0;
1196 }
1197
1198 /*
1199 * Defragment an mbuf chain, returning at most maxfrags separate
1200 * mbufs+clusters. If this is not possible NULL is returned and
1201 * the original mbuf chain is left in its present (potentially
1202 * modified) state. We use two techniques: collapsing consecutive
1203 * mbufs and replacing consecutive mbufs by a cluster.
1204 */
1205 static __inline struct mbuf *
npe_defrag(struct mbuf * m0)1206 npe_defrag(struct mbuf *m0)
1207 {
1208 struct mbuf *m;
1209
1210 MGETHDR(m, M_DONTWAIT, MT_DATA);
1211 if (m == NULL)
1212 return NULL;
1213 m_copy_pkthdr(m, m0);
1214
1215 if ((m->m_len = m0->m_pkthdr.len) > MHLEN) {
1216 MCLGET(m, M_DONTWAIT);
1217 if ((m->m_flags & M_EXT) == 0) {
1218 m_freem(m);
1219 return NULL;
1220 }
1221 }
1222
1223 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
1224 m_freem(m0);
1225
1226 return m;
1227 }
1228
1229 /*
1230 * Dequeue packets and place on the h/w transmit queue.
1231 */
1232 static void
npestart(struct ifnet * ifp)1233 npestart(struct ifnet *ifp)
1234 {
1235 struct npe_softc *sc = ifp->if_softc;
1236 struct npebuf *npe;
1237 struct npehwbuf *hw;
1238 struct mbuf *m, *n;
1239 bus_dma_segment_t *segs;
1240 int nseg, len, error, i;
1241 uint32_t next;
1242
1243 if ((ifp->if_flags & IFF_RUNNING) == 0)
1244 return;
1245
1246 while (sc->tx_free != NULL) {
1247 IFQ_DEQUEUE(&ifp->if_snd, m);
1248 if (m == NULL)
1249 break;
1250 npe = sc->tx_free;
1251 error = bus_dmamap_load_mbuf(sc->sc_dt, npe->ix_map, m,
1252 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1253 if (error == EFBIG) {
1254 n = npe_defrag(m);
1255 if (n == NULL) {
1256 printf("%s: %s: too many fragments\n",
1257 device_xname(sc->sc_dev), __func__);
1258 m_freem(m);
1259 return; /* XXX? */
1260 }
1261 m = n;
1262 error = bus_dmamap_load_mbuf(sc->sc_dt, npe->ix_map,
1263 m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1264 }
1265 if (error != 0) {
1266 printf("%s: %s: error %u\n",
1267 device_xname(sc->sc_dev), __func__, error);
1268 m_freem(m);
1269 return; /* XXX? */
1270 }
1271 sc->tx_free = npe->ix_next;
1272
1273 /*
1274 * Tap off here if there is a bpf listener.
1275 */
1276 bpf_mtap(ifp, m, BPF_D_OUT);
1277
1278 bus_dmamap_sync(sc->sc_dt, npe->ix_map, 0,
1279 npe->ix_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1280
1281 npe->ix_m = m;
1282 hw = npe->ix_hw;
1283 len = m->m_pkthdr.len;
1284 nseg = npe->ix_map->dm_nsegs;
1285 segs = npe->ix_map->dm_segs;
1286 next = npe->ix_neaddr + sizeof(hw->ix_ne[0]);
1287 for (i = 0; i < nseg; i++) {
1288 hw->ix_ne[i].data = htobe32(segs[i].ds_addr);
1289 hw->ix_ne[i].len = htobe32((segs[i].ds_len<<16) | len);
1290 hw->ix_ne[i].next = htobe32(next);
1291
1292 len = 0; /* zero for segments > 1 */
1293 next += sizeof(hw->ix_ne[0]);
1294 }
1295 hw->ix_ne[i-1].next = 0; /* zero last in chain */
1296 /* XXX flush descriptor instead of using uncached memory */
1297
1298 DPRINTF(sc, "%s: qwrite(%u, 0x%x) ne_data %x ne_len 0x%x\n",
1299 __func__, sc->tx_qid, npe->ix_neaddr,
1300 hw->ix_ne[0].data, hw->ix_ne[0].len);
1301 /* stick it on the tx q */
1302 /* XXX add vlan priority */
1303 ixpqmgr_qwrite(sc->tx_qid, npe->ix_neaddr);
1304
1305 ifp->if_timer = 5;
1306 }
1307 }
1308
1309 static void
npe_stopxmit(struct npe_softc * sc)1310 npe_stopxmit(struct npe_softc *sc)
1311 {
1312 struct npedma *dma = &sc->txdma;
1313 int i;
1314
1315 /* XXX qmgr */
1316 for (i = 0; i < dma->nbuf; i++) {
1317 struct npebuf *npe = &dma->buf[i];
1318
1319 if (npe->ix_m != NULL) {
1320 bus_dmamap_unload(sc->sc_dt, npe->ix_map);
1321 m_freem(npe->ix_m);
1322 npe->ix_m = NULL;
1323 }
1324 }
1325 }
1326
1327 static void
npe_stoprecv(struct npe_softc * sc)1328 npe_stoprecv(struct npe_softc *sc)
1329 {
1330 struct npedma *dma = &sc->rxdma;
1331 int i;
1332
1333 /* XXX qmgr */
1334 for (i = 0; i < dma->nbuf; i++) {
1335 struct npebuf *npe = &dma->buf[i];
1336
1337 if (npe->ix_m != NULL) {
1338 bus_dmamap_unload(sc->sc_dt, npe->ix_map);
1339 m_freem(npe->ix_m);
1340 npe->ix_m = NULL;
1341 }
1342 }
1343 }
1344
1345 /*
1346 * Turn off interrupts, and stop the nic.
1347 */
1348 void
npestop(struct ifnet * ifp,int disable)1349 npestop(struct ifnet *ifp, int disable)
1350 {
1351 struct npe_softc *sc = ifp->if_softc;
1352
1353 /* Disable transmitter and receiver in the MAC */
1354 WR4(sc, NPE_MAC_RX_CNTRL1,
1355 RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN);
1356 WR4(sc, NPE_MAC_TX_CNTRL1,
1357 RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN);
1358
1359 callout_stop(&sc->sc_tick_ch);
1360
1361 npe_stopxmit(sc);
1362 npe_stoprecv(sc);
1363 /* XXX go into loopback & drain q's? */
1364 /* XXX but beware of disabling tx above */
1365
1366 /*
1367 * The MAC core rx/tx disable may leave the MAC hardware in an
1368 * unpredictable state. A hw reset is executed before resetting
1369 * all the MAC parameters to a known value.
1370 */
1371 WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET);
1372 DELAY(NPE_MAC_RESET_DELAY);
1373 WR4(sc, NPE_MAC_INT_CLK_THRESH, NPE_MAC_INT_CLK_THRESH_DEFAULT);
1374 WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN);
1375
1376 ifp->if_timer = 0;
1377 ifp->if_flags &= ~IFF_RUNNING;
1378 }
1379
1380 void
npewatchdog(struct ifnet * ifp)1381 npewatchdog(struct ifnet *ifp)
1382 {
1383 struct npe_softc *sc = ifp->if_softc;
1384 int s;
1385
1386 aprint_error_dev(sc->sc_dev, "device timeout\n");
1387 s = splnet();
1388 if_statinc(ifp, if_oerrors);
1389 npeinit_locked(sc);
1390 splx(s);
1391 }
1392
1393 static int
npeioctl(struct ifnet * ifp,u_long cmd,void * data)1394 npeioctl(struct ifnet *ifp, u_long cmd, void *data)
1395 {
1396 struct npe_softc *sc = ifp->if_softc;
1397 struct ifreq *ifr = (struct ifreq *) data;
1398 int s, error = 0;
1399
1400 s = splnet();
1401
1402 switch (cmd) {
1403 case SIOCSIFMEDIA:
1404 #if 0 /* not yet */
1405 /* Flow control requires full-duplex mode. */
1406 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
1407 (ifr->ifr_media & IFM_FDX) == 0)
1408 ifr->ifr_media &= ~IFM_ETH_FMASK;
1409 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
1410 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
1411 /* We can do both TXPAUSE and RXPAUSE. */
1412 ifr->ifr_media |=
1413 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
1414 }
1415 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
1416 }
1417 #endif
1418 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1419 break;
1420 case SIOCSIFFLAGS:
1421 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_RUNNING) {
1422 /*
1423 * If interface is marked down and it is running,
1424 * then stop and disable it.
1425 */
1426 if_stop(ifp, 1);
1427 } else if ((ifp->if_flags & (IFF_UP |IFF_RUNNING)) == IFF_UP) {
1428 /*
1429 * If interface is marked up and it is stopped, then
1430 * start it.
1431 */
1432 error = if_init(ifp);
1433 } else if ((ifp->if_flags & IFF_UP) != 0) {
1434 u_short diff;
1435
1436 /* Up (AND RUNNING). */
1437
1438 diff = (ifp->if_flags ^ sc->sc_if_flags)
1439 & (IFF_PROMISC | IFF_ALLMULTI);
1440 if ((diff & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
1441 /*
1442 * If the difference between last flag and
1443 * new flag only IFF_PROMISC or IFF_ALLMULTI,
1444 * set multicast filter only (don't reset to
1445 * prevent link down).
1446 */
1447 npe_setmcast(sc);
1448 } else {
1449 /*
1450 * Reset the interface to pick up changes in
1451 * any other flags that affect the hardware
1452 * state.
1453 */
1454 error = if_init(ifp);
1455 }
1456 }
1457 sc->sc_if_flags = ifp->if_flags;
1458 break;
1459 default:
1460 error = ether_ioctl(ifp, cmd, data);
1461 if (error == ENETRESET) {
1462 /*
1463 * Multicast list has changed; set the hardware filter
1464 * accordingly.
1465 */
1466 npe_setmcast(sc);
1467 error = 0;
1468 }
1469 }
1470
1471 npestart(ifp);
1472
1473 splx(s);
1474 return error;
1475 }
1476
1477 /*
1478 * Setup a traffic class -> rx queue mapping.
1479 */
1480 static int
npe_setrxqosentry(struct npe_softc * sc,int classix,int trafclass,int qid)1481 npe_setrxqosentry(struct npe_softc *sc, int classix, int trafclass, int qid)
1482 {
1483 int npeid = npeconfig[sc->sc_unit].npeid;
1484 uint32_t msg[2];
1485
1486 msg[0] = (NPE_SETRXQOSENTRY << NPE_MAC_MSGID_SHL) | (npeid << 20)
1487 | classix;
1488 msg[1] = (trafclass << 24) | (1 << 23) | (qid << 16) | (qid << 4);
1489 return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg);
1490 }
1491
1492 /*
1493 * Update and reset the statistics in the NPE.
1494 */
1495 static int
npe_updatestats(struct npe_softc * sc)1496 npe_updatestats(struct npe_softc *sc)
1497 {
1498 uint32_t msg[2];
1499
1500 msg[0] = NPE_RESETSTATS << NPE_MAC_MSGID_SHL;
1501 msg[1] = sc->sc_stats_phys; /* physical address of stat block */
1502 return ixpnpe_sendmsg(sc->sc_npe, msg); /* NB: no recv */
1503 }
1504
1505 #if 0
1506 /*
1507 * Get the current statistics block.
1508 */
1509 static int
1510 npe_getstats(struct npe_softc *sc)
1511 {
1512 uint32_t msg[2];
1513
1514 msg[0] = NPE_GETSTATS << NPE_MAC_MSGID_SHL;
1515 msg[1] = sc->sc_stats_phys; /* physical address of stat block */
1516 return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg);
1517 }
1518
1519 /*
1520 * Query the image id of the loaded firmware.
1521 */
1522 static uint32_t
1523 npe_getimageid(struct npe_softc *sc)
1524 {
1525 uint32_t msg[2];
1526
1527 msg[0] = NPE_GETSTATUS << NPE_MAC_MSGID_SHL;
1528 msg[1] = 0;
1529 return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg) == 0 ? msg[1] : 0;
1530 }
1531
1532 /*
1533 * Enable/disable loopback.
1534 */
1535 static int
1536 npe_setloopback(struct npe_softc *sc, int ena)
1537 {
1538 uint32_t msg[2];
1539
1540 msg[0] = (NPE_SETLOOPBACK << NPE_MAC_MSGID_SHL) | (ena != 0);
1541 msg[1] = 0;
1542 return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg);
1543 }
1544 #endif
1545
1546 /*
1547 * MII bus support routines.
1548 *
1549 * NB: ixp425 has one PHY per NPE
1550 */
1551 static uint32_t
npe_mii_mdio_read(struct npe_softc * sc,int reg)1552 npe_mii_mdio_read(struct npe_softc *sc, int reg)
1553 {
1554 #define MII_RD4(sc, reg) bus_space_read_4(sc->sc_iot, sc->sc_miih, reg)
1555 uint32_t v;
1556
1557 /* NB: registers are known to be sequential */
1558 v = (MII_RD4(sc, reg+0) & 0xff) << 0;
1559 v |= (MII_RD4(sc, reg+4) & 0xff) << 8;
1560 v |= (MII_RD4(sc, reg+8) & 0xff) << 16;
1561 v |= (MII_RD4(sc, reg+12) & 0xff) << 24;
1562 return v;
1563 #undef MII_RD4
1564 }
1565
1566 static void
npe_mii_mdio_write(struct npe_softc * sc,int reg,uint32_t cmd)1567 npe_mii_mdio_write(struct npe_softc *sc, int reg, uint32_t cmd)
1568 {
1569 #define MII_WR4(sc, reg, v) \
1570 bus_space_write_4(sc->sc_iot, sc->sc_miih, reg, v)
1571
1572 /* NB: registers are known to be sequential */
1573 MII_WR4(sc, reg+0, cmd & 0xff);
1574 MII_WR4(sc, reg+4, (cmd >> 8) & 0xff);
1575 MII_WR4(sc, reg+8, (cmd >> 16) & 0xff);
1576 MII_WR4(sc, reg+12, (cmd >> 24) & 0xff);
1577 #undef MII_WR4
1578 }
1579
1580 static int
npe_mii_mdio_wait(struct npe_softc * sc)1581 npe_mii_mdio_wait(struct npe_softc *sc)
1582 {
1583 #define MAXTRIES 100 /* XXX */
1584 uint32_t v;
1585 int i;
1586
1587 for (i = 0; i < MAXTRIES; i++) {
1588 v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_CMD);
1589 if ((v & NPE_MII_GO) == 0)
1590 return 0;
1591 }
1592 return ETIMEDOUT;
1593 #undef MAXTRIES
1594 }
1595
1596 static int
npe_miibus_readreg(device_t self,int phy,int reg,uint16_t * val)1597 npe_miibus_readreg(device_t self, int phy, int reg, uint16_t *val)
1598 {
1599 struct npe_softc *sc = device_private(self);
1600 uint32_t v;
1601
1602 if (sc->sc_phy > IXPNPECF_PHY_DEFAULT && phy != sc->sc_phy)
1603 return -1;
1604 v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL)
1605 | NPE_MII_GO;
1606 npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v);
1607 if (npe_mii_mdio_wait(sc) == 0)
1608 v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_STS);
1609 else
1610 v = 0xffff | NPE_MII_READ_FAIL;
1611
1612 if ((v & NPE_MII_READ_FAIL) != 0)
1613 return -1;
1614
1615 *val = v & 0xffff;
1616 return 0;
1617 #undef MAXTRIES
1618 }
1619
1620 static int
npe_miibus_writereg(device_t self,int phy,int reg,uint16_t val)1621 npe_miibus_writereg(device_t self, int phy, int reg, uint16_t val)
1622 {
1623 struct npe_softc *sc = device_private(self);
1624 uint32_t v;
1625
1626 if (sc->sc_phy > IXPNPECF_PHY_DEFAULT && phy != sc->sc_phy)
1627 return -1;
1628 v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL)
1629 | val | NPE_MII_WRITE
1630 | NPE_MII_GO;
1631 npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v);
1632
1633 return npe_mii_mdio_wait(sc);
1634 }
1635
1636 static void
npe_miibus_statchg(struct ifnet * ifp)1637 npe_miibus_statchg(struct ifnet *ifp)
1638 {
1639 struct npe_softc *sc = ifp->if_softc;
1640 uint32_t tx1, rx1;
1641 uint32_t randoff;
1642
1643 /* Sync MAC duplex state */
1644 tx1 = RD4(sc, NPE_MAC_TX_CNTRL1);
1645 rx1 = RD4(sc, NPE_MAC_RX_CNTRL1);
1646 if (sc->sc_mii.mii_media_active & IFM_FDX) {
1647 WR4(sc, NPE_MAC_SLOT_TIME, NPE_MAC_SLOT_TIME_MII_DEFAULT);
1648 tx1 &= ~NPE_TX_CNTRL1_DUPLEX;
1649 rx1 |= NPE_RX_CNTRL1_PAUSE_EN;
1650 } else {
1651 struct timeval now;
1652 getmicrotime(&now);
1653 randoff = (RD4(sc, NPE_MAC_UNI_ADDR_6) ^ now.tv_usec)
1654 & 0x7f;
1655 WR4(sc, NPE_MAC_SLOT_TIME, NPE_MAC_SLOT_TIME_MII_DEFAULT
1656 + randoff);
1657 tx1 |= NPE_TX_CNTRL1_DUPLEX;
1658 rx1 &= ~NPE_RX_CNTRL1_PAUSE_EN;
1659 }
1660 WR4(sc, NPE_MAC_RX_CNTRL1, rx1);
1661 WR4(sc, NPE_MAC_TX_CNTRL1, tx1);
1662 }
1663