1 /* $NetBSD: sgec.c,v 1.53 2020/03/15 22:19:00 thorpej Exp $ */
2 /*
3 * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 /*
27 * Driver for the SGEC (Second Generation Ethernet Controller), sitting
28 * on for example the VAX 4000/300 (KA670).
29 *
30 * The SGEC looks like a mixture of the DEQNA and the TULIP. Fun toy.
31 *
32 * Even though the chip is capable to use virtual addresses (read the
33 * System Page Table directly) this driver doesn't do so, and there
34 * is no benefit in doing it either in NetBSD of today.
35 *
36 * Things that is still to do:
37 * Collect statistics.
38 * Use imperfect filtering when many multicast addresses.
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: sgec.c,v 1.53 2020/03/15 22:19:00 thorpej Exp $");
43
44 #include "opt_inet.h"
45
46 #include <sys/param.h>
47 #include <sys/mbuf.h>
48 #include <sys/socket.h>
49 #include <sys/device.h>
50 #include <sys/systm.h>
51 #include <sys/sockio.h>
52
53 #include <net/if.h>
54 #include <net/if_ether.h>
55 #include <net/if_dl.h>
56 #include <net/bpf.h>
57
58 #include <netinet/in.h>
59 #include <netinet/if_inarp.h>
60
61 #include <sys/bus.h>
62
63 #include <dev/ic/sgecreg.h>
64 #include <dev/ic/sgecvar.h>
65
66 static void zeinit(struct ze_softc *);
67 static void zestart(struct ifnet *);
68 static int zeioctl(struct ifnet *, u_long, void *);
69 static int ze_add_rxbuf(struct ze_softc *, int);
70 static void ze_setup(struct ze_softc *);
71 static void zetimeout(struct ifnet *);
72 static bool zereset(struct ze_softc *);
73
74 #define ZE_WCSR(csr, val) \
75 bus_space_write_4(sc->sc_iot, sc->sc_ioh, csr, val)
76 #define ZE_RCSR(csr) \
77 bus_space_read_4(sc->sc_iot, sc->sc_ioh, csr)
78
79 /*
80 * Interface exists: make available by filling in network interface
81 * record. System will initialize the interface when it is ready
82 * to accept packets.
83 */
84 void
sgec_attach(struct ze_softc * sc)85 sgec_attach(struct ze_softc *sc)
86 {
87 struct ifnet *ifp = &sc->sc_if;
88 struct ze_tdes *tp;
89 struct ze_rdes *rp;
90 bus_dma_segment_t seg;
91 int i, rseg, error;
92
93 /*
94 * Allocate DMA safe memory for descriptors and setup memory.
95 */
96 error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct ze_cdata),
97 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
98 if (error) {
99 aprint_error(": unable to allocate control data, error = %d\n",
100 error);
101 goto fail_0;
102 }
103
104 error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(struct ze_cdata),
105 (void **)&sc->sc_zedata, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
106 if (error) {
107 aprint_error(
108 ": unable to map control data, error = %d\n", error);
109 goto fail_1;
110 }
111
112 error = bus_dmamap_create(sc->sc_dmat, sizeof(struct ze_cdata), 1,
113 sizeof(struct ze_cdata), 0, BUS_DMA_NOWAIT, &sc->sc_cmap);
114 if (error) {
115 aprint_error(
116 ": unable to create control data DMA map, error = %d\n",
117 error);
118 goto fail_2;
119 }
120
121 error = bus_dmamap_load(sc->sc_dmat, sc->sc_cmap, sc->sc_zedata,
122 sizeof(struct ze_cdata), NULL, BUS_DMA_NOWAIT);
123 if (error) {
124 aprint_error(
125 ": unable to load control data DMA map, error = %d\n",
126 error);
127 goto fail_3;
128 }
129
130 /*
131 * Zero the newly allocated memory.
132 */
133 memset(sc->sc_zedata, 0, sizeof(struct ze_cdata));
134
135 /*
136 * Create the transmit descriptor DMA maps.
137 */
138 for (i = 0; error == 0 && i < TXDESCS; i++) {
139 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, TXDESCS - 1,
140 MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
141 &sc->sc_xmtmap[i]);
142 }
143 if (error) {
144 aprint_error(": unable to create tx DMA map %d, error = %d\n",
145 i, error);
146 goto fail_4;
147 }
148
149 /*
150 * Create receive buffer DMA maps.
151 */
152 for (i = 0; error == 0 && i < RXDESCS; i++) {
153 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
154 MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_rcvmap[i]);
155 }
156 if (error) {
157 aprint_error(": unable to create rx DMA map %d, error = %d\n",
158 i, error);
159 goto fail_5;
160 }
161
162 /*
163 * Pre-allocate the receive buffers.
164 */
165 for (i = 0; error == 0 && i < RXDESCS; i++) {
166 error = ze_add_rxbuf(sc, i);
167 }
168
169 if (error) {
170 aprint_error(
171 ": unable to allocate or map rx buffer %d, error = %d\n",
172 i, error);
173 goto fail_6;
174 }
175
176 /* For vmstat -i
177 */
178 evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, NULL,
179 device_xname(sc->sc_dev), "intr");
180 evcnt_attach_dynamic(&sc->sc_rxintrcnt, EVCNT_TYPE_INTR,
181 &sc->sc_intrcnt, device_xname(sc->sc_dev), "rx intr");
182 evcnt_attach_dynamic(&sc->sc_txintrcnt, EVCNT_TYPE_INTR,
183 &sc->sc_intrcnt, device_xname(sc->sc_dev), "tx intr");
184 evcnt_attach_dynamic(&sc->sc_txdraincnt, EVCNT_TYPE_INTR,
185 &sc->sc_intrcnt, device_xname(sc->sc_dev), "tx drain");
186 evcnt_attach_dynamic(&sc->sc_nobufintrcnt, EVCNT_TYPE_INTR,
187 &sc->sc_intrcnt, device_xname(sc->sc_dev), "nobuf intr");
188 evcnt_attach_dynamic(&sc->sc_nointrcnt, EVCNT_TYPE_INTR,
189 &sc->sc_intrcnt, device_xname(sc->sc_dev), "no intr");
190
191 /*
192 * Create ring loops of the buffer chains.
193 * This is only done once.
194 */
195 sc->sc_pzedata = (struct ze_cdata *)sc->sc_cmap->dm_segs[0].ds_addr;
196
197 rp = sc->sc_zedata->zc_recv;
198 rp[RXDESCS].ze_framelen = ZE_FRAMELEN_OW;
199 rp[RXDESCS].ze_rdes1 = ZE_RDES1_CA;
200 rp[RXDESCS].ze_bufaddr = (char *)sc->sc_pzedata->zc_recv;
201
202 tp = sc->sc_zedata->zc_xmit;
203 tp[TXDESCS].ze_tdr = ZE_TDR_OW;
204 tp[TXDESCS].ze_tdes1 = ZE_TDES1_CA;
205 tp[TXDESCS].ze_bufaddr = (char *)sc->sc_pzedata->zc_xmit;
206
207 if (zereset(sc))
208 return;
209
210 strcpy(ifp->if_xname, device_xname(sc->sc_dev));
211 ifp->if_softc = sc;
212 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
213 ifp->if_start = zestart;
214 ifp->if_ioctl = zeioctl;
215 ifp->if_watchdog = zetimeout;
216 IFQ_SET_READY(&ifp->if_snd);
217
218 /*
219 * Attach the interface.
220 */
221 if_attach(ifp);
222 ether_ifattach(ifp, sc->sc_enaddr);
223
224 aprint_normal("\n");
225 aprint_normal_dev(sc->sc_dev, "hardware address %s\n",
226 ether_sprintf(sc->sc_enaddr));
227 return;
228
229 /*
230 * Free any resources we've allocated during the failed attach
231 * attempt. Do this in reverse order and fall through.
232 */
233 fail_6:
234 for (i = 0; i < RXDESCS; i++) {
235 if (sc->sc_rxmbuf[i] != NULL) {
236 bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
237 m_freem(sc->sc_rxmbuf[i]);
238 }
239 }
240 fail_5:
241 for (i = 0; i < TXDESCS; i++) {
242 if (sc->sc_xmtmap[i] != NULL)
243 bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]);
244 }
245 fail_4:
246 for (i = 0; i < RXDESCS; i++) {
247 if (sc->sc_rcvmap[i] != NULL)
248 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]);
249 }
250 bus_dmamap_unload(sc->sc_dmat, sc->sc_cmap);
251 fail_3:
252 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cmap);
253 fail_2:
254 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_zedata,
255 sizeof(struct ze_cdata));
256 fail_1:
257 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
258 fail_0:
259 return;
260 }
261
262 /*
263 * Initialization of interface.
264 */
265 void
zeinit(struct ze_softc * sc)266 zeinit(struct ze_softc *sc)
267 {
268 struct ifnet *ifp = &sc->sc_if;
269 struct ze_cdata *zc = sc->sc_zedata;
270 int i;
271
272 /*
273 * Reset the interface.
274 */
275 if (zereset(sc))
276 return;
277
278 sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = sc->sc_txcnt = 0;
279 /*
280 * Release and init transmit descriptors.
281 */
282 for (i = 0; i < TXDESCS; i++) {
283 if (sc->sc_xmtmap[i]->dm_nsegs > 0)
284 bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
285 if (sc->sc_txmbuf[i]) {
286 m_freem(sc->sc_txmbuf[i]);
287 sc->sc_txmbuf[i] = 0;
288 }
289 zc->zc_xmit[i].ze_tdr = 0; /* Clear valid bit */
290 }
291
292
293 /*
294 * Init receive descriptors.
295 */
296 for (i = 0; i < RXDESCS; i++)
297 zc->zc_recv[i].ze_framelen = ZE_FRAMELEN_OW;
298 sc->sc_nextrx = 0;
299
300 ZE_WCSR(ZE_CSR6, ZE_NICSR6_IE | ZE_NICSR6_BL_8 | ZE_NICSR6_ST |
301 ZE_NICSR6_SR | ZE_NICSR6_DC);
302
303 ifp->if_flags |= IFF_RUNNING;
304
305 /*
306 * Send a setup frame.
307 * This will start the transmit machinery as well.
308 */
309 ze_setup(sc);
310
311 }
312
313 /*
314 * Start output on interface.
315 */
316 void
zestart(struct ifnet * ifp)317 zestart(struct ifnet *ifp)
318 {
319 struct ze_softc *sc = ifp->if_softc;
320 struct ze_cdata *zc = sc->sc_zedata;
321 paddr_t buffer;
322 struct mbuf *m;
323 int nexttx, starttx;
324 int len, i, totlen, error;
325 int old_inq = sc->sc_inq;
326 uint16_t orword, tdr = 0;
327 bus_dmamap_t map;
328
329 while (sc->sc_inq < (TXDESCS - 1)) {
330
331 if (sc->sc_setup) {
332 ze_setup(sc);
333 continue;
334 }
335 nexttx = sc->sc_nexttx;
336 IFQ_POLL(&sc->sc_if.if_snd, m);
337 if (m == 0)
338 goto out;
339 /*
340 * Count number of mbufs in chain.
341 * Always do DMA directly from mbufs, therefore the transmit
342 * ring is really big.
343 */
344 map = sc->sc_xmtmap[nexttx];
345 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
346 BUS_DMA_WRITE);
347 if (error) {
348 aprint_error_dev(sc->sc_dev,
349 "zestart: load_mbuf failed: %d", error);
350 goto out;
351 }
352
353 if (map->dm_nsegs >= TXDESCS)
354 panic("zestart"); /* XXX */
355
356 if ((map->dm_nsegs + sc->sc_inq) >= (TXDESCS - 1)) {
357 bus_dmamap_unload(sc->sc_dmat, map);
358 goto out;
359 }
360
361 /*
362 * m now points to a mbuf chain that can be loaded.
363 * Loop around and set it.
364 */
365 totlen = 0;
366 orword = ZE_TDES1_FS;
367 starttx = nexttx;
368 for (i = 0; i < map->dm_nsegs; i++) {
369 buffer = map->dm_segs[i].ds_addr;
370 len = map->dm_segs[i].ds_len;
371
372 KASSERT(len > 0);
373
374 totlen += len;
375 /* Word alignment calc */
376 if (totlen == m->m_pkthdr.len) {
377 sc->sc_txcnt += map->dm_nsegs;
378 if (sc->sc_txcnt >= TXDESCS * 3 / 4) {
379 orword |= ZE_TDES1_IC;
380 sc->sc_txcnt = 0;
381 }
382 orword |= ZE_TDES1_LS;
383 sc->sc_txmbuf[nexttx] = m;
384 }
385 zc->zc_xmit[nexttx].ze_bufsize = len;
386 zc->zc_xmit[nexttx].ze_bufaddr = (char *)buffer;
387 zc->zc_xmit[nexttx].ze_tdes1 = orword;
388 zc->zc_xmit[nexttx].ze_tdr = tdr;
389
390 if (++nexttx == TXDESCS)
391 nexttx = 0;
392 orword = 0;
393 tdr = ZE_TDR_OW;
394 }
395
396 sc->sc_inq += map->dm_nsegs;
397
398 IFQ_DEQUEUE(&ifp->if_snd, m);
399 #ifdef DIAGNOSTIC
400 if (totlen != m->m_pkthdr.len)
401 panic("zestart: len fault");
402 #endif
403 /*
404 * Turn ownership of the packet over to the device.
405 */
406 zc->zc_xmit[starttx].ze_tdr = ZE_TDR_OW;
407
408 /*
409 * Kick off the transmit logic, if it is stopped.
410 */
411 if ((ZE_RCSR(ZE_CSR5) & ZE_NICSR5_TS) != ZE_NICSR5_TS_RUN)
412 ZE_WCSR(ZE_CSR1, -1);
413 sc->sc_nexttx = nexttx;
414
415 bpf_mtap(ifp, m, BPF_D_OUT);
416 }
417
418 out: if (old_inq < sc->sc_inq)
419 ifp->if_timer = 5; /* If transmit logic dies */
420 }
421
422 int
sgec_intr(struct ze_softc * sc)423 sgec_intr(struct ze_softc *sc)
424 {
425 struct ze_cdata *zc = sc->sc_zedata;
426 struct ifnet *ifp = &sc->sc_if;
427 struct mbuf *m;
428 int csr, len;
429
430 csr = ZE_RCSR(ZE_CSR5);
431 if ((csr & ZE_NICSR5_IS) == 0) { /* Wasn't we */
432 sc->sc_nointrcnt.ev_count++;
433 return 0;
434 }
435 ZE_WCSR(ZE_CSR5, csr);
436
437 if (csr & ZE_NICSR5_RU)
438 sc->sc_nobufintrcnt.ev_count++;
439
440 if (csr & ZE_NICSR5_RI) {
441 sc->sc_rxintrcnt.ev_count++;
442 while ((zc->zc_recv[sc->sc_nextrx].ze_framelen &
443 ZE_FRAMELEN_OW) == 0) {
444
445 m = sc->sc_rxmbuf[sc->sc_nextrx];
446 len = zc->zc_recv[sc->sc_nextrx].ze_framelen;
447 ze_add_rxbuf(sc, sc->sc_nextrx);
448 if (++sc->sc_nextrx == RXDESCS)
449 sc->sc_nextrx = 0;
450 if (len < ETHER_MIN_LEN) {
451 if_statinc(ifp, if_ierrors);
452 m_freem(m);
453 } else {
454 m_set_rcvif(m, ifp);
455 m->m_pkthdr.len = m->m_len =
456 len - ETHER_CRC_LEN;
457 if_percpuq_enqueue(ifp->if_percpuq, m);
458 }
459 }
460 }
461
462 if (csr & ZE_NICSR5_TI)
463 sc->sc_txintrcnt.ev_count++;
464 if (sc->sc_lastack != sc->sc_nexttx) {
465 int lastack;
466 for (lastack = sc->sc_lastack; lastack != sc->sc_nexttx; ) {
467 bus_dmamap_t map;
468 int nlastack;
469
470 if ((zc->zc_xmit[lastack].ze_tdr & ZE_TDR_OW) != 0)
471 break;
472
473 if ((zc->zc_xmit[lastack].ze_tdes1 & ZE_TDES1_DT) ==
474 ZE_TDES1_DT_SETUP) {
475 if (++lastack == TXDESCS)
476 lastack = 0;
477 sc->sc_inq--;
478 continue;
479 }
480
481 KASSERT(zc->zc_xmit[lastack].ze_tdes1 & ZE_TDES1_FS);
482 map = sc->sc_xmtmap[lastack];
483 KASSERT(map->dm_nsegs > 0);
484 nlastack = (lastack + map->dm_nsegs - 1) % TXDESCS;
485 if (zc->zc_xmit[nlastack].ze_tdr & ZE_TDR_OW)
486 break;
487 lastack = nlastack;
488 if (sc->sc_txcnt > map->dm_nsegs)
489 sc->sc_txcnt -= map->dm_nsegs;
490 else
491 sc->sc_txcnt = 0;
492 sc->sc_inq -= map->dm_nsegs;
493 KASSERT(zc->zc_xmit[lastack].ze_tdes1 & ZE_TDES1_LS);
494 if_statinc(ifp, if_opackets);
495 bus_dmamap_unload(sc->sc_dmat, map);
496 KASSERT(sc->sc_txmbuf[lastack]);
497 m_freem(sc->sc_txmbuf[lastack]);
498 sc->sc_txmbuf[lastack] = 0;
499 if (++lastack == TXDESCS)
500 lastack = 0;
501 }
502 if (lastack != sc->sc_lastack) {
503 sc->sc_txdraincnt.ev_count++;
504 sc->sc_lastack = lastack;
505 if (sc->sc_inq == 0)
506 ifp->if_timer = 0;
507 zestart(ifp); /* Put in more in queue */
508 }
509 }
510 return 1;
511 }
512
513 /*
514 * Process an ioctl request.
515 */
516 int
zeioctl(struct ifnet * ifp,u_long cmd,void * data)517 zeioctl(struct ifnet *ifp, u_long cmd, void *data)
518 {
519 struct ze_softc *sc = ifp->if_softc;
520 struct ifaddr *ifa = data;
521 int s = splnet(), error = 0;
522
523 switch (cmd) {
524
525 case SIOCINITIFADDR:
526 ifp->if_flags |= IFF_UP;
527 switch (ifa->ifa_addr->sa_family) {
528 #ifdef INET
529 case AF_INET:
530 zeinit(sc);
531 arp_ifinit(ifp, ifa);
532 break;
533 #endif
534 }
535 break;
536
537 case SIOCSIFFLAGS:
538 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
539 break;
540 /* XXX re-use ether_ioctl() */
541 switch (ifp->if_flags & (IFF_UP | IFF_RUNNING)) {
542 case IFF_RUNNING:
543 /*
544 * If interface is marked down and it is running,
545 * stop it. (by disabling receive mechanism).
546 */
547 ZE_WCSR(ZE_CSR6, ZE_RCSR(ZE_CSR6) &
548 ~(ZE_NICSR6_ST | ZE_NICSR6_SR));
549 ifp->if_flags &= ~IFF_RUNNING;
550 break;
551 case IFF_UP:
552 /*
553 * If interface it marked up and it is stopped, then
554 * start it.
555 */
556 zeinit(sc);
557 break;
558 case IFF_UP | IFF_RUNNING:
559 /*
560 * Send a new setup packet to match any new changes.
561 * (Like IFF_PROMISC etc)
562 */
563 ze_setup(sc);
564 break;
565 case 0:
566 break;
567 }
568 break;
569
570 case SIOCADDMULTI:
571 case SIOCDELMULTI:
572 /*
573 * Update our multicast list.
574 */
575 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
576 /*
577 * Multicast list has changed; set the hardware filter
578 * accordingly.
579 */
580 if (ifp->if_flags & IFF_RUNNING)
581 ze_setup(sc);
582 error = 0;
583 }
584 break;
585
586 default:
587 error = ether_ioctl(ifp, cmd, data);
588
589 }
590 splx(s);
591 return error;
592 }
593
594 /*
595 * Add a receive buffer to the indicated descriptor.
596 */
597 int
ze_add_rxbuf(struct ze_softc * sc,int i)598 ze_add_rxbuf(struct ze_softc *sc, int i)
599 {
600 struct mbuf *m;
601 struct ze_rdes *rp;
602 int error;
603
604 MGETHDR(m, M_DONTWAIT, MT_DATA);
605 if (m == NULL)
606 return ENOBUFS;
607
608 MCLAIM(m, &sc->sc_ec.ec_rx_mowner);
609 MCLGET(m, M_DONTWAIT);
610 if ((m->m_flags & M_EXT) == 0) {
611 m_freem(m);
612 return ENOBUFS;
613 }
614
615 if (sc->sc_rxmbuf[i] != NULL)
616 bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
617
618 error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i],
619 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
620 BUS_DMA_READ | BUS_DMA_NOWAIT);
621 if (error)
622 panic("%s: can't load rx DMA map %d, error = %d",
623 device_xname(sc->sc_dev), i, error);
624 sc->sc_rxmbuf[i] = m;
625
626 bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0,
627 sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
628
629 /*
630 * We know that the mbuf cluster is page aligned. Also, be sure
631 * that the IP header will be longword aligned.
632 */
633 m->m_data += 2;
634 rp = &sc->sc_zedata->zc_recv[i];
635 rp->ze_bufsize = (m->m_ext.ext_size - 2);
636 rp->ze_bufaddr = (char *)sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2;
637 rp->ze_framelen = ZE_FRAMELEN_OW;
638
639 return 0;
640 }
641
642 /*
643 * Create a setup packet and put in queue for sending.
644 */
645 void
ze_setup(struct ze_softc * sc)646 ze_setup(struct ze_softc *sc)
647 {
648 struct ethercom *ec = &sc->sc_ec;
649 struct ether_multi *enm;
650 struct ether_multistep step;
651 struct ze_cdata *zc = sc->sc_zedata;
652 struct ifnet *ifp = &sc->sc_if;
653 const uint8_t *enaddr = CLLADDR(ifp->if_sadl);
654 int j, idx, reg;
655
656 if (sc->sc_inq == (TXDESCS - 1)) {
657 sc->sc_setup = 1;
658 return;
659 }
660 sc->sc_setup = 0;
661 /*
662 * Init the setup packet with valid info.
663 */
664 memset(zc->zc_setup, 0xff, sizeof(zc->zc_setup)); /* Broadcast */
665 memcpy(zc->zc_setup, enaddr, ETHER_ADDR_LEN);
666
667 /*
668 * Multicast handling. The SGEC can handle up to 16 direct
669 * ethernet addresses.
670 */
671 j = 16;
672 ifp->if_flags &= ~IFF_ALLMULTI;
673 ETHER_LOCK(ec);
674 ETHER_FIRST_MULTI(step, ec, enm);
675 while (enm != NULL) {
676 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
677 ifp->if_flags |= IFF_ALLMULTI;
678 break;
679 }
680 memcpy(&zc->zc_setup[j], enm->enm_addrlo, ETHER_ADDR_LEN);
681 j += 8;
682 ETHER_NEXT_MULTI(step, enm);
683 if ((enm != NULL)&& (j == 128)) {
684 ifp->if_flags |= IFF_ALLMULTI;
685 break;
686 }
687 }
688 ETHER_UNLOCK(ec);
689
690 /*
691 * ALLMULTI implies PROMISC in this driver.
692 */
693 if (ifp->if_flags & IFF_ALLMULTI)
694 ifp->if_flags |= IFF_PROMISC;
695 else if (ifp->if_pcount == 0)
696 ifp->if_flags &= ~IFF_PROMISC;
697
698 /*
699 * Fiddle with the receive logic.
700 */
701 reg = ZE_RCSR(ZE_CSR6);
702 DELAY(10);
703 ZE_WCSR(ZE_CSR6, reg & ~ZE_NICSR6_SR); /* Stop rx */
704 reg &= ~ZE_NICSR6_AF;
705 if (ifp->if_flags & IFF_PROMISC)
706 reg |= ZE_NICSR6_AF_PROM;
707 else if (ifp->if_flags & IFF_ALLMULTI)
708 reg |= ZE_NICSR6_AF_ALLM;
709 DELAY(10);
710 ZE_WCSR(ZE_CSR6, reg);
711 /*
712 * Only send a setup packet if needed.
713 */
714 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) == 0) {
715 idx = sc->sc_nexttx;
716 zc->zc_xmit[idx].ze_tdes1 = ZE_TDES1_DT_SETUP;
717 zc->zc_xmit[idx].ze_bufsize = 128;
718 zc->zc_xmit[idx].ze_bufaddr = sc->sc_pzedata->zc_setup;
719 zc->zc_xmit[idx].ze_tdr = ZE_TDR_OW;
720
721 if ((ZE_RCSR(ZE_CSR5) & ZE_NICSR5_TS) != ZE_NICSR5_TS_RUN)
722 ZE_WCSR(ZE_CSR1, -1);
723
724 sc->sc_inq++;
725 if (++sc->sc_nexttx == TXDESCS)
726 sc->sc_nexttx = 0;
727 }
728 }
729
730 /*
731 * Check for dead transmit logic.
732 */
733 void
zetimeout(struct ifnet * ifp)734 zetimeout(struct ifnet *ifp)
735 {
736 struct ze_softc *sc = ifp->if_softc;
737
738 if (sc->sc_inq == 0)
739 return;
740
741 aprint_error_dev(sc->sc_dev, "xmit logic died, resetting...\n");
742 /*
743 * Do a reset of interface, to get it going again.
744 * Will it work by just restart the transmit logic?
745 */
746 zeinit(sc);
747 }
748
749 /*
750 * Reset chip:
751 * Set/reset the reset flag.
752 * Write interrupt vector.
753 * Write ring buffer addresses.
754 * Write SBR.
755 */
756 bool
zereset(struct ze_softc * sc)757 zereset(struct ze_softc *sc)
758 {
759 int reg, i;
760
761 ZE_WCSR(ZE_CSR6, ZE_NICSR6_RE);
762 DELAY(50000);
763 if (ZE_RCSR(ZE_CSR6) & ZE_NICSR5_SF) {
764 aprint_error_dev(sc->sc_dev, "selftest failed\n");
765 return true;
766 }
767
768 /*
769 * Get the vector that were set at match time, and remember it.
770 * WHICH VECTOR TO USE? Take one unused. XXX
771 * Funny way to set vector described in the programmers manual.
772 */
773 reg = ZE_NICSR0_IPL14 | sc->sc_intvec | 0x1fff0003; /* SYNC/ASYNC??? */
774 i = 10;
775 do {
776 if (i-- == 0) {
777 aprint_error_dev(sc->sc_dev,
778 "failing SGEC CSR0 init\n");
779 return true;
780 }
781 ZE_WCSR(ZE_CSR0, reg);
782 } while (ZE_RCSR(ZE_CSR0) != reg);
783
784 ZE_WCSR(ZE_CSR3, (vaddr_t)sc->sc_pzedata->zc_recv);
785 ZE_WCSR(ZE_CSR4, (vaddr_t)sc->sc_pzedata->zc_xmit);
786 return false;
787 }
788