1 /* $NetBSD: if_et.c,v 1.12 2016/06/10 13:27:14 ozaki-r Exp $ */
2 /* $OpenBSD: if_et.c,v 1.11 2008/06/08 06:18:07 jsg Exp $ */
3 /*
4 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
5 *
6 * This code is derived from software contributed to The DragonFly Project
7 * by Sepherosa Ziehau <sepherosa@gmail.com>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.1 2007/10/12 14:12:42 sephe Exp $
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: if_et.c,v 1.12 2016/06/10 13:27:14 ozaki-r Exp $");
41
42 #include "opt_inet.h"
43 #include "vlan.h"
44
45 #include <sys/param.h>
46 #include <sys/endian.h>
47 #include <sys/systm.h>
48 #include <sys/types.h>
49 #include <sys/sockio.h>
50 #include <sys/mbuf.h>
51 #include <sys/queue.h>
52 #include <sys/kernel.h>
53 #include <sys/device.h>
54 #include <sys/callout.h>
55 #include <sys/socket.h>
56
57 #include <sys/bus.h>
58
59 #include <net/if.h>
60 #include <net/if_dl.h>
61 #include <net/if_media.h>
62 #include <net/if_ether.h>
63 #include <net/if_arp.h>
64
65 #ifdef INET
66 #include <netinet/in.h>
67 #include <netinet/in_systm.h>
68 #include <netinet/in_var.h>
69 #include <netinet/ip.h>
70 #include <netinet/if_inarp.h>
71 #endif
72
73 #include <net/bpf.h>
74
75 #include <dev/mii/mii.h>
76 #include <dev/mii/miivar.h>
77
78 #include <dev/pci/pcireg.h>
79 #include <dev/pci/pcivar.h>
80 #include <dev/pci/pcidevs.h>
81
82 #include <dev/pci/if_etreg.h>
83
84 int et_match(device_t, cfdata_t, void *);
85 void et_attach(device_t, device_t, void *);
86 int et_detach(device_t, int flags);
87 int et_shutdown(device_t);
88
89 int et_miibus_readreg(device_t, int, int);
90 void et_miibus_writereg(device_t, int, int, int);
91 void et_miibus_statchg(struct ifnet *);
92
93 int et_init(struct ifnet *ifp);
94 int et_ioctl(struct ifnet *, u_long, void *);
95 void et_start(struct ifnet *);
96 void et_watchdog(struct ifnet *);
97
98 int et_intr(void *);
99 void et_enable_intrs(struct et_softc *, uint32_t);
100 void et_disable_intrs(struct et_softc *);
101 void et_rxeof(struct et_softc *);
102 void et_txeof(struct et_softc *);
103 void et_txtick(void *);
104
105 int et_dma_alloc(struct et_softc *);
106 void et_dma_free(struct et_softc *);
107 int et_dma_mem_create(struct et_softc *, bus_size_t,
108 void **, bus_addr_t *, bus_dmamap_t *, bus_dma_segment_t *);
109 void et_dma_mem_destroy(struct et_softc *, void *, bus_dmamap_t);
110 int et_dma_mbuf_create(struct et_softc *);
111 void et_dma_mbuf_destroy(struct et_softc *, int, const int[]);
112
113 int et_init_tx_ring(struct et_softc *);
114 int et_init_rx_ring(struct et_softc *);
115 void et_free_tx_ring(struct et_softc *);
116 void et_free_rx_ring(struct et_softc *);
117 int et_encap(struct et_softc *, struct mbuf **);
118 int et_newbuf(struct et_rxbuf_data *, int, int, int);
119 int et_newbuf_cluster(struct et_rxbuf_data *, int, int);
120 int et_newbuf_hdr(struct et_rxbuf_data *, int, int);
121
122 void et_stop(struct et_softc *);
123 int et_chip_init(struct et_softc *);
124 void et_chip_attach(struct et_softc *);
125 void et_init_mac(struct et_softc *);
126 void et_init_rxmac(struct et_softc *);
127 void et_init_txmac(struct et_softc *);
128 int et_init_rxdma(struct et_softc *);
129 int et_init_txdma(struct et_softc *);
130 int et_start_rxdma(struct et_softc *);
131 int et_start_txdma(struct et_softc *);
132 int et_stop_rxdma(struct et_softc *);
133 int et_stop_txdma(struct et_softc *);
134 int et_enable_txrx(struct et_softc *);
135 void et_reset(struct et_softc *);
136 int et_bus_config(struct et_softc *);
137 void et_get_eaddr(struct et_softc *, uint8_t[]);
138 void et_setmulti(struct et_softc *);
139 void et_tick(void *);
140
141 static int et_rx_intr_npkts = 32;
142 static int et_rx_intr_delay = 20; /* x10 usec */
143 static int et_tx_intr_nsegs = 128;
144 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */
145
146 struct et_bsize {
147 int bufsize;
148 et_newbuf_t newbuf;
149 };
150
151 static const struct et_bsize et_bufsize[ET_RX_NRING] = {
152 { .bufsize = 0, .newbuf = et_newbuf_hdr },
153 { .bufsize = 0, .newbuf = et_newbuf_cluster },
154 };
155
156 const struct et_product {
157 pci_vendor_id_t vendor;
158 pci_product_id_t product;
159 } et_devices[] = {
160 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310 },
161 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1301 }
162 };
163
164 CFATTACH_DECL_NEW(et, sizeof(struct et_softc), et_match, et_attach, et_detach,
165 NULL);
166
167 int
et_match(device_t dev,cfdata_t match,void * aux)168 et_match(device_t dev, cfdata_t match, void *aux)
169 {
170 struct pci_attach_args *pa = aux;
171 const struct et_product *ep;
172 int i;
173
174 for (i = 0; i < __arraycount(et_devices); i++) {
175 ep = &et_devices[i];
176 if (PCI_VENDOR(pa->pa_id) == ep->vendor &&
177 PCI_PRODUCT(pa->pa_id) == ep->product)
178 return 1;
179 }
180 return 0;
181 }
182
183 void
et_attach(device_t parent,device_t self,void * aux)184 et_attach(device_t parent, device_t self, void *aux)
185 {
186 struct et_softc *sc = device_private(self);
187 struct pci_attach_args *pa = aux;
188 pci_chipset_tag_t pc = pa->pa_pc;
189 pci_intr_handle_t ih;
190 const char *intrstr;
191 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
192 pcireg_t memtype;
193 int error;
194 char intrbuf[PCI_INTRSTR_LEN];
195
196 pci_aprint_devinfo(pa, "Ethernet controller");
197
198 sc->sc_dev = self;
199
200 /*
201 * Initialize tunables
202 */
203 sc->sc_rx_intr_npkts = et_rx_intr_npkts;
204 sc->sc_rx_intr_delay = et_rx_intr_delay;
205 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs;
206 sc->sc_timer = et_timer;
207
208 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ET_PCIR_BAR);
209 if (pci_mapreg_map(pa, ET_PCIR_BAR, memtype, 0, &sc->sc_mem_bt,
210 &sc->sc_mem_bh, NULL, &sc->sc_mem_size)) {
211 aprint_error_dev(self, "could not map mem space\n");
212 return;
213 }
214
215 if (pci_intr_map(pa, &ih) != 0) {
216 aprint_error_dev(self, "could not map interrupt\n");
217 goto fail;
218 }
219
220 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
221 sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, et_intr, sc);
222 if (sc->sc_irq_handle == NULL) {
223 aprint_error_dev(self, "could not establish interrupt");
224 if (intrstr != NULL)
225 aprint_error(" at %s", intrstr);
226 aprint_error("\n");
227 goto fail;
228 }
229 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
230
231 sc->sc_dmat = pa->pa_dmat;
232 sc->sc_pct = pa->pa_pc;
233 sc->sc_pcitag = pa->pa_tag;
234
235 error = et_bus_config(sc);
236 if (error)
237 goto fail;
238
239 et_get_eaddr(sc, sc->sc_enaddr);
240
241 aprint_normal_dev(self, "Ethernet address %s\n",
242 ether_sprintf(sc->sc_enaddr));
243
244 CSR_WRITE_4(sc, ET_PM,
245 ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE);
246
247 et_reset(sc);
248
249 et_disable_intrs(sc);
250
251 error = et_dma_alloc(sc);
252 if (error)
253 goto fail;
254
255 ifp->if_softc = sc;
256 ifp->if_mtu = ETHERMTU;
257 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
258 ifp->if_init = et_init;
259 ifp->if_ioctl = et_ioctl;
260 ifp->if_start = et_start;
261 ifp->if_watchdog = et_watchdog;
262 IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC);
263 IFQ_SET_READY(&ifp->if_snd);
264 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
265
266 et_chip_attach(sc);
267
268 sc->sc_miibus.mii_ifp = ifp;
269 sc->sc_miibus.mii_readreg = et_miibus_readreg;
270 sc->sc_miibus.mii_writereg = et_miibus_writereg;
271 sc->sc_miibus.mii_statchg = et_miibus_statchg;
272
273 sc->sc_ethercom.ec_mii = &sc->sc_miibus;
274 ifmedia_init(&sc->sc_miibus.mii_media, 0, ether_mediachange,
275 ether_mediastatus);
276 mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY,
277 MII_OFFSET_ANY, 0);
278 if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
279 aprint_error_dev(self, "no PHY found!\n");
280 ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
281 0, NULL);
282 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
283 } else
284 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
285
286 if_attach(ifp);
287 ether_ifattach(ifp, sc->sc_enaddr);
288
289 callout_init(&sc->sc_tick, 0);
290 callout_setfunc(&sc->sc_tick, et_tick, sc);
291 callout_init(&sc->sc_txtick, 0);
292 callout_setfunc(&sc->sc_txtick, et_txtick, sc);
293
294 if (pmf_device_register(self, NULL, NULL))
295 pmf_class_network_register(self, ifp);
296 else
297 aprint_error_dev(self, "couldn't establish power handler\n");
298
299 return;
300
301 fail:
302 et_dma_free(sc);
303 if (sc->sc_irq_handle != NULL) {
304 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
305 sc->sc_irq_handle = NULL;
306 }
307 if (sc->sc_mem_size) {
308 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
309 sc->sc_mem_size = 0;
310 }
311 }
312
313 int
et_detach(device_t self,int flags)314 et_detach(device_t self, int flags)
315 {
316 struct et_softc *sc = device_private(self);
317 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
318 int s;
319
320 pmf_device_deregister(self);
321 s = splnet();
322 et_stop(sc);
323 splx(s);
324
325 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
326
327 /* Delete all remaining media. */
328 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
329
330 ether_ifdetach(ifp);
331 if_detach(ifp);
332 et_dma_free(sc);
333
334 if (sc->sc_irq_handle != NULL) {
335 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
336 sc->sc_irq_handle = NULL;
337 }
338
339 if (sc->sc_mem_size) {
340 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
341 sc->sc_mem_size = 0;
342 }
343
344 return 0;
345 }
346
347 int
et_shutdown(device_t self)348 et_shutdown(device_t self)
349 {
350 struct et_softc *sc = device_private(self);
351 int s;
352
353 s = splnet();
354 et_stop(sc);
355 splx(s);
356
357 return 0;
358 }
359
360 int
et_miibus_readreg(device_t dev,int phy,int reg)361 et_miibus_readreg(device_t dev, int phy, int reg)
362 {
363 struct et_softc *sc = device_private(dev);
364 uint32_t val;
365 int i, ret;
366
367 /* Stop any pending operations */
368 CSR_WRITE_4(sc, ET_MII_CMD, 0);
369
370 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) |
371 __SHIFTIN(reg, ET_MII_ADDR_REG);
372 CSR_WRITE_4(sc, ET_MII_ADDR, val);
373
374 /* Start reading */
375 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ);
376
377 #define NRETRY 50
378
379 for (i = 0; i < NRETRY; ++i) {
380 val = CSR_READ_4(sc, ET_MII_IND);
381 if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0)
382 break;
383 DELAY(50);
384 }
385 if (i == NRETRY) {
386 aprint_error_dev(sc->sc_dev, "read phy %d, reg %d timed out\n",
387 phy, reg);
388 ret = 0;
389 goto back;
390 }
391
392 #undef NRETRY
393
394 val = CSR_READ_4(sc, ET_MII_STAT);
395 ret = __SHIFTOUT(val, ET_MII_STAT_VALUE);
396
397 back:
398 /* Make sure that the current operation is stopped */
399 CSR_WRITE_4(sc, ET_MII_CMD, 0);
400 return ret;
401 }
402
403 void
et_miibus_writereg(device_t dev,int phy,int reg,int val0)404 et_miibus_writereg(device_t dev, int phy, int reg, int val0)
405 {
406 struct et_softc *sc = device_private(dev);
407 uint32_t val;
408 int i;
409
410 /* Stop any pending operations */
411 CSR_WRITE_4(sc, ET_MII_CMD, 0);
412
413 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) |
414 __SHIFTIN(reg, ET_MII_ADDR_REG);
415 CSR_WRITE_4(sc, ET_MII_ADDR, val);
416
417 /* Start writing */
418 CSR_WRITE_4(sc, ET_MII_CTRL, __SHIFTIN(val0, ET_MII_CTRL_VALUE));
419
420 #define NRETRY 100
421
422 for (i = 0; i < NRETRY; ++i) {
423 val = CSR_READ_4(sc, ET_MII_IND);
424 if ((val & ET_MII_IND_BUSY) == 0)
425 break;
426 DELAY(50);
427 }
428 if (i == NRETRY) {
429 aprint_error_dev(sc->sc_dev, "write phy %d, reg %d timed out\n",
430 phy, reg);
431 et_miibus_readreg(dev, phy, reg);
432 }
433
434 #undef NRETRY
435
436 /* Make sure that the current operation is stopped */
437 CSR_WRITE_4(sc, ET_MII_CMD, 0);
438 }
439
440 void
et_miibus_statchg(struct ifnet * ifp)441 et_miibus_statchg(struct ifnet *ifp)
442 {
443 struct et_softc *sc = ifp->if_softc;
444 struct mii_data *mii = &sc->sc_miibus;
445 uint32_t cfg2, ctrl;
446
447 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2);
448 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII |
449 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM);
450 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC |
451 __SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN);
452
453 ctrl = CSR_READ_4(sc, ET_MAC_CTRL);
454 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII);
455
456 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
457 cfg2 |= ET_MAC_CFG2_MODE_GMII;
458 } else {
459 cfg2 |= ET_MAC_CFG2_MODE_MII;
460 ctrl |= ET_MAC_CTRL_MODE_MII;
461 }
462
463 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
464 cfg2 |= ET_MAC_CFG2_FDX;
465 else
466 ctrl |= ET_MAC_CTRL_GHDX;
467
468 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl);
469 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2);
470 }
471
472 void
et_stop(struct et_softc * sc)473 et_stop(struct et_softc *sc)
474 {
475 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
476
477 callout_stop(&sc->sc_tick);
478 callout_stop(&sc->sc_txtick);
479
480 et_stop_rxdma(sc);
481 et_stop_txdma(sc);
482
483 et_disable_intrs(sc);
484
485 et_free_tx_ring(sc);
486 et_free_rx_ring(sc);
487
488 et_reset(sc);
489
490 sc->sc_tx = 0;
491 sc->sc_tx_intr = 0;
492
493 ifp->if_timer = 0;
494 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
495 }
496
497 int
et_bus_config(struct et_softc * sc)498 et_bus_config(struct et_softc *sc)
499 {
500 uint32_t val; //, max_plsz;
501 // uint16_t ack_latency, replay_timer;
502
503 /*
504 * Test whether EEPROM is valid
505 * NOTE: Read twice to get the correct value
506 */
507 pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC);
508 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC);
509
510 if (val & ET_PCIM_EEPROM_STATUS_ERROR) {
511 aprint_error_dev(sc->sc_dev, "EEPROM status error 0x%02x\n", val);
512 return ENXIO;
513 }
514
515 /* TODO: LED */
516 #if 0
517 /*
518 * Configure ACK latency and replay timer according to
519 * max playload size
520 */
521 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CAPS);
522 max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ;
523
524 switch (max_plsz) {
525 case ET_PCIV_DEVICE_CAPS_PLSZ_128:
526 ack_latency = ET_PCIV_ACK_LATENCY_128;
527 replay_timer = ET_PCIV_REPLAY_TIMER_128;
528 break;
529
530 case ET_PCIV_DEVICE_CAPS_PLSZ_256:
531 ack_latency = ET_PCIV_ACK_LATENCY_256;
532 replay_timer = ET_PCIV_REPLAY_TIMER_256;
533 break;
534
535 default:
536 ack_latency = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
537 ET_PCIR_ACK_LATENCY) >> 16;
538 replay_timer = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
539 ET_PCIR_REPLAY_TIMER) >> 16;
540 aprint_normal_dev(sc->sc_dev, "ack latency %u, replay timer %u\n",
541 ack_latency, replay_timer);
542 break;
543 }
544 if (ack_latency != 0) {
545 pci_conf_write(sc->sc_pct, sc->sc_pcitag,
546 ET_PCIR_ACK_LATENCY, ack_latency << 16);
547 pci_conf_write(sc->sc_pct, sc->sc_pcitag,
548 ET_PCIR_REPLAY_TIMER, replay_timer << 16);
549 }
550
551 /*
552 * Set L0s and L1 latency timer to 2us
553 */
554 val = ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2);
555 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_L0S_L1_LATENCY,
556 val << 24);
557
558 /*
559 * Set max read request size to 2048 bytes
560 */
561 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
562 ET_PCIR_DEVICE_CTRL) >> 16;
563 val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ;
564 val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K;
565 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CTRL,
566 val << 16);
567 #endif
568
569 return 0;
570 }
571
572 void
et_get_eaddr(struct et_softc * sc,uint8_t eaddr[])573 et_get_eaddr(struct et_softc *sc, uint8_t eaddr[])
574 {
575 uint32_t r;
576
577 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_LO);
578 eaddr[0] = r & 0xff;
579 eaddr[1] = (r >> 8) & 0xff;
580 eaddr[2] = (r >> 16) & 0xff;
581 eaddr[3] = (r >> 24) & 0xff;
582 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_HI);
583 eaddr[4] = r & 0xff;
584 eaddr[5] = (r >> 8) & 0xff;
585 }
586
587 void
et_reset(struct et_softc * sc)588 et_reset(struct et_softc *sc)
589 {
590 CSR_WRITE_4(sc, ET_MAC_CFG1,
591 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
592 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
593 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
594
595 CSR_WRITE_4(sc, ET_SWRST,
596 ET_SWRST_TXDMA | ET_SWRST_RXDMA |
597 ET_SWRST_TXMAC | ET_SWRST_RXMAC |
598 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC);
599
600 CSR_WRITE_4(sc, ET_MAC_CFG1,
601 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
602 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC);
603 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
604 }
605
606 void
et_disable_intrs(struct et_softc * sc)607 et_disable_intrs(struct et_softc *sc)
608 {
609 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
610 }
611
612 void
et_enable_intrs(struct et_softc * sc,uint32_t intrs)613 et_enable_intrs(struct et_softc *sc, uint32_t intrs)
614 {
615 CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs);
616 }
617
618 int
et_dma_alloc(struct et_softc * sc)619 et_dma_alloc(struct et_softc *sc)
620 {
621 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
622 struct et_txstatus_data *txsd = &sc->sc_tx_status;
623 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
624 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
625 int i, error;
626
627 /*
628 * Create TX ring DMA stuffs
629 */
630 error = et_dma_mem_create(sc, ET_TX_RING_SIZE,
631 (void **)&tx_ring->tr_desc, &tx_ring->tr_paddr, &tx_ring->tr_dmap,
632 &tx_ring->tr_seg);
633 if (error) {
634 aprint_error_dev(sc->sc_dev, "can't create TX ring DMA stuffs\n");
635 return error;
636 }
637
638 /*
639 * Create TX status DMA stuffs
640 */
641 error = et_dma_mem_create(sc, sizeof(uint32_t),
642 (void **)&txsd->txsd_status,
643 &txsd->txsd_paddr, &txsd->txsd_dmap, &txsd->txsd_seg);
644 if (error) {
645 aprint_error_dev(sc->sc_dev, "can't create TX status DMA stuffs\n");
646 return error;
647 }
648
649 /*
650 * Create DMA stuffs for RX rings
651 */
652 for (i = 0; i < ET_RX_NRING; ++i) {
653 static const uint32_t rx_ring_posreg[ET_RX_NRING] =
654 { ET_RX_RING0_POS, ET_RX_RING1_POS };
655
656 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i];
657
658 error = et_dma_mem_create(sc, ET_RX_RING_SIZE,
659 (void **)&rx_ring->rr_desc,
660 &rx_ring->rr_paddr, &rx_ring->rr_dmap, &rx_ring->rr_seg);
661 if (error) {
662 aprint_error_dev(sc->sc_dev, "can't create DMA stuffs for "
663 "the %d RX ring\n", i);
664 return error;
665 }
666 rx_ring->rr_posreg = rx_ring_posreg[i];
667 }
668
669 /*
670 * Create RX stat ring DMA stuffs
671 */
672 error = et_dma_mem_create(sc, ET_RXSTAT_RING_SIZE,
673 (void **)&rxst_ring->rsr_stat,
674 &rxst_ring->rsr_paddr, &rxst_ring->rsr_dmap, &rxst_ring->rsr_seg);
675 if (error) {
676 aprint_error_dev(sc->sc_dev, "can't create RX stat ring DMA stuffs\n");
677 return error;
678 }
679
680 /*
681 * Create RX status DMA stuffs
682 */
683 error = et_dma_mem_create(sc, sizeof(struct et_rxstatus),
684 (void **)&rxsd->rxsd_status,
685 &rxsd->rxsd_paddr, &rxsd->rxsd_dmap, &rxsd->rxsd_seg);
686 if (error) {
687 aprint_error_dev(sc->sc_dev, "can't create RX status DMA stuffs\n");
688 return error;
689 }
690
691 /*
692 * Create mbuf DMA stuffs
693 */
694 error = et_dma_mbuf_create(sc);
695 if (error)
696 return error;
697
698 return 0;
699 }
700
701 void
et_dma_free(struct et_softc * sc)702 et_dma_free(struct et_softc *sc)
703 {
704 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
705 struct et_txstatus_data *txsd = &sc->sc_tx_status;
706 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
707 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
708 int i, rx_done[ET_RX_NRING];
709
710 /*
711 * Destroy TX ring DMA stuffs
712 */
713 et_dma_mem_destroy(sc, tx_ring->tr_desc, tx_ring->tr_dmap);
714
715 /*
716 * Destroy TX status DMA stuffs
717 */
718 et_dma_mem_destroy(sc, txsd->txsd_status, txsd->txsd_dmap);
719
720 /*
721 * Destroy DMA stuffs for RX rings
722 */
723 for (i = 0; i < ET_RX_NRING; ++i) {
724 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i];
725
726 et_dma_mem_destroy(sc, rx_ring->rr_desc, rx_ring->rr_dmap);
727 }
728
729 /*
730 * Destroy RX stat ring DMA stuffs
731 */
732 et_dma_mem_destroy(sc, rxst_ring->rsr_stat, rxst_ring->rsr_dmap);
733
734 /*
735 * Destroy RX status DMA stuffs
736 */
737 et_dma_mem_destroy(sc, rxsd->rxsd_status, rxsd->rxsd_dmap);
738
739 /*
740 * Destroy mbuf DMA stuffs
741 */
742 for (i = 0; i < ET_RX_NRING; ++i)
743 rx_done[i] = ET_RX_NDESC;
744 et_dma_mbuf_destroy(sc, ET_TX_NDESC, rx_done);
745 }
746
747 int
et_dma_mbuf_create(struct et_softc * sc)748 et_dma_mbuf_create(struct et_softc *sc)
749 {
750 struct et_txbuf_data *tbd = &sc->sc_tx_data;
751 int i, error, rx_done[ET_RX_NRING];
752
753 /*
754 * Create spare DMA map for RX mbufs
755 */
756 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
757 BUS_DMA_NOWAIT, &sc->sc_mbuf_tmp_dmap);
758 if (error) {
759 aprint_error_dev(sc->sc_dev, "can't create spare mbuf DMA map\n");
760 return error;
761 }
762
763 /*
764 * Create DMA maps for RX mbufs
765 */
766 bzero(rx_done, sizeof(rx_done));
767 for (i = 0; i < ET_RX_NRING; ++i) {
768 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i];
769 int j;
770
771 for (j = 0; j < ET_RX_NDESC; ++j) {
772 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
773 MCLBYTES, 0, BUS_DMA_NOWAIT,
774 &rbd->rbd_buf[j].rb_dmap);
775 if (error) {
776 aprint_error_dev(sc->sc_dev, "can't create %d RX mbuf "
777 "for %d RX ring\n", j, i);
778 rx_done[i] = j;
779 et_dma_mbuf_destroy(sc, 0, rx_done);
780 return error;
781 }
782 }
783 rx_done[i] = ET_RX_NDESC;
784
785 rbd->rbd_softc = sc;
786 rbd->rbd_ring = &sc->sc_rx_ring[i];
787 }
788
789 /*
790 * Create DMA maps for TX mbufs
791 */
792 for (i = 0; i < ET_TX_NDESC; ++i) {
793 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
794 0, BUS_DMA_NOWAIT, &tbd->tbd_buf[i].tb_dmap);
795 if (error) {
796 aprint_error_dev(sc->sc_dev, "can't create %d TX mbuf "
797 "DMA map\n", i);
798 et_dma_mbuf_destroy(sc, i, rx_done);
799 return error;
800 }
801 }
802
803 return 0;
804 }
805
806 void
et_dma_mbuf_destroy(struct et_softc * sc,int tx_done,const int rx_done[])807 et_dma_mbuf_destroy(struct et_softc *sc, int tx_done, const int rx_done[])
808 {
809 struct et_txbuf_data *tbd = &sc->sc_tx_data;
810 int i;
811
812 /*
813 * Destroy DMA maps for RX mbufs
814 */
815 for (i = 0; i < ET_RX_NRING; ++i) {
816 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i];
817 int j;
818
819 for (j = 0; j < rx_done[i]; ++j) {
820 struct et_rxbuf *rb = &rbd->rbd_buf[j];
821
822 KASSERTMSG(rb->rb_mbuf == NULL,
823 "RX mbuf in %d RX ring is not freed yet\n", i);
824 bus_dmamap_destroy(sc->sc_dmat, rb->rb_dmap);
825 }
826 }
827
828 /*
829 * Destroy DMA maps for TX mbufs
830 */
831 for (i = 0; i < tx_done; ++i) {
832 struct et_txbuf *tb = &tbd->tbd_buf[i];
833
834 KASSERTMSG(tb->tb_mbuf == NULL, "TX mbuf is not freed yet\n");
835 bus_dmamap_destroy(sc->sc_dmat, tb->tb_dmap);
836 }
837
838 /*
839 * Destroy spare mbuf DMA map
840 */
841 bus_dmamap_destroy(sc->sc_dmat, sc->sc_mbuf_tmp_dmap);
842 }
843
844 int
et_dma_mem_create(struct et_softc * sc,bus_size_t size,void ** addr,bus_addr_t * paddr,bus_dmamap_t * dmap,bus_dma_segment_t * seg)845 et_dma_mem_create(struct et_softc *sc, bus_size_t size,
846 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap, bus_dma_segment_t *seg)
847 {
848 int error, nsegs;
849
850 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT,
851 dmap);
852 if (error) {
853 aprint_error_dev(sc->sc_dev, "can't create DMA map\n");
854 return error;
855 }
856
857 error = bus_dmamem_alloc(sc->sc_dmat, size, ET_ALIGN, 0, seg,
858 1, &nsegs, BUS_DMA_WAITOK);
859 if (error) {
860 aprint_error_dev(sc->sc_dev, "can't allocate DMA mem\n");
861 return error;
862 }
863
864 error = bus_dmamem_map(sc->sc_dmat, seg, nsegs,
865 size, (void **)addr, BUS_DMA_NOWAIT);
866 if (error) {
867 aprint_error_dev(sc->sc_dev, "can't map DMA mem\n");
868 return (error);
869 }
870
871 error = bus_dmamap_load(sc->sc_dmat, *dmap, *addr, size, NULL,
872 BUS_DMA_WAITOK);
873 if (error) {
874 aprint_error_dev(sc->sc_dev, "can't load DMA mem\n");
875 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)addr, 1);
876 return error;
877 }
878
879 memset(*addr, 0, size);
880
881 *paddr = (*dmap)->dm_segs[0].ds_addr;
882
883 return 0;
884 }
885
886 void
et_dma_mem_destroy(struct et_softc * sc,void * addr,bus_dmamap_t dmap)887 et_dma_mem_destroy(struct et_softc *sc, void *addr, bus_dmamap_t dmap)
888 {
889 bus_dmamap_unload(sc->sc_dmat, dmap);
890 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)&addr, 1);
891 }
892
893 void
et_chip_attach(struct et_softc * sc)894 et_chip_attach(struct et_softc *sc)
895 {
896 uint32_t val;
897
898 /*
899 * Perform minimal initialization
900 */
901
902 /* Disable loopback */
903 CSR_WRITE_4(sc, ET_LOOPBACK, 0);
904
905 /* Reset MAC */
906 CSR_WRITE_4(sc, ET_MAC_CFG1,
907 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
908 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
909 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
910
911 /*
912 * Setup half duplex mode
913 */
914 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) |
915 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) |
916 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) |
917 ET_MAC_HDX_EXC_DEFER;
918 CSR_WRITE_4(sc, ET_MAC_HDX, val);
919
920 /* Clear MAC control */
921 CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
922
923 /* Reset MII */
924 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
925
926 /* Bring MAC out of reset state */
927 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
928
929 /* Enable memory controllers */
930 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
931 }
932
933 int
et_intr(void * xsc)934 et_intr(void *xsc)
935 {
936 struct et_softc *sc = xsc;
937 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
938 uint32_t intrs;
939
940 if ((ifp->if_flags & IFF_RUNNING) == 0)
941 return (0);
942
943 intrs = CSR_READ_4(sc, ET_INTR_STATUS);
944 if (intrs == 0 || intrs == 0xffffffff)
945 return (0);
946
947 et_disable_intrs(sc);
948 intrs &= ET_INTRS;
949 if (intrs == 0) /* Not interested */
950 goto back;
951
952 if (intrs & ET_INTR_RXEOF)
953 et_rxeof(sc);
954 if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER))
955 et_txeof(sc);
956 if (intrs & ET_INTR_TIMER)
957 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
958 back:
959 et_enable_intrs(sc, ET_INTRS);
960
961 return (1);
962 }
963
964 int
et_init(struct ifnet * ifp)965 et_init(struct ifnet *ifp)
966 {
967 struct et_softc *sc = ifp->if_softc;
968 int error, i, s;
969
970 if (ifp->if_flags & IFF_RUNNING)
971 return 0;
972
973 s = splnet();
974
975 et_stop(sc);
976
977 for (i = 0; i < ET_RX_NRING; ++i) {
978 sc->sc_rx_data[i].rbd_bufsize = et_bufsize[i].bufsize;
979 sc->sc_rx_data[i].rbd_newbuf = et_bufsize[i].newbuf;
980 }
981
982 error = et_init_tx_ring(sc);
983 if (error)
984 goto back;
985
986 error = et_init_rx_ring(sc);
987 if (error)
988 goto back;
989
990 error = et_chip_init(sc);
991 if (error)
992 goto back;
993
994 error = et_enable_txrx(sc);
995 if (error)
996 goto back;
997
998 error = et_start_rxdma(sc);
999 if (error)
1000 goto back;
1001
1002 error = et_start_txdma(sc);
1003 if (error)
1004 goto back;
1005
1006 et_enable_intrs(sc, ET_INTRS);
1007
1008 callout_schedule(&sc->sc_tick, hz);
1009
1010 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1011
1012 ifp->if_flags |= IFF_RUNNING;
1013 ifp->if_flags &= ~IFF_OACTIVE;
1014 back:
1015 if (error)
1016 et_stop(sc);
1017
1018 splx(s);
1019
1020 return (0);
1021 }
1022
1023 int
et_ioctl(struct ifnet * ifp,u_long cmd,void * data)1024 et_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1025 {
1026 struct et_softc *sc = ifp->if_softc;
1027 struct ifreq *ifr = (struct ifreq *)data;
1028 int s, error = 0;
1029
1030 s = splnet();
1031
1032 switch (cmd) {
1033 case SIOCSIFFLAGS:
1034 if (ifp->if_flags & IFF_UP) {
1035 /*
1036 * If only the PROMISC or ALLMULTI flag changes, then
1037 * don't do a full re-init of the chip, just update
1038 * the Rx filter.
1039 */
1040 if ((ifp->if_flags & IFF_RUNNING) &&
1041 ((ifp->if_flags ^ sc->sc_if_flags) &
1042 (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1043 et_setmulti(sc);
1044 } else {
1045 if (!(ifp->if_flags & IFF_RUNNING))
1046 et_init(ifp);
1047 }
1048 } else {
1049 if (ifp->if_flags & IFF_RUNNING)
1050 et_stop(sc);
1051 }
1052 sc->sc_if_flags = ifp->if_flags;
1053 break;
1054 case SIOCSIFMEDIA:
1055 case SIOCGIFMEDIA:
1056 error = ifmedia_ioctl(ifp, ifr, &sc->sc_miibus.mii_media, cmd);
1057 break;
1058 default:
1059 error = ether_ioctl(ifp, cmd, data);
1060 if (error == ENETRESET) {
1061 if (ifp->if_flags & IFF_RUNNING)
1062 et_setmulti(sc);
1063 error = 0;
1064 }
1065 break;
1066
1067 }
1068
1069 splx(s);
1070
1071 return error;
1072 }
1073
1074 void
et_start(struct ifnet * ifp)1075 et_start(struct ifnet *ifp)
1076 {
1077 struct et_softc *sc = ifp->if_softc;
1078 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1079 int trans;
1080 struct mbuf *m;
1081
1082 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1083 return;
1084
1085 trans = 0;
1086 for (;;) {
1087 IFQ_DEQUEUE(&ifp->if_snd, m);
1088 if (m == NULL)
1089 break;
1090
1091 if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) {
1092 ifp->if_flags |= IFF_OACTIVE;
1093 break;
1094 }
1095
1096 if (et_encap(sc, &m)) {
1097 ifp->if_oerrors++;
1098 ifp->if_flags |= IFF_OACTIVE;
1099 break;
1100 }
1101
1102 trans = 1;
1103
1104 bpf_mtap(ifp, m);
1105 }
1106
1107 if (trans) {
1108 callout_schedule(&sc->sc_txtick, hz);
1109 ifp->if_timer = 5;
1110 }
1111 }
1112
1113 void
et_watchdog(struct ifnet * ifp)1114 et_watchdog(struct ifnet *ifp)
1115 {
1116 struct et_softc *sc = ifp->if_softc;
1117 aprint_error_dev(sc->sc_dev, "watchdog timed out\n");
1118
1119 ifp->if_flags &= ~IFF_RUNNING;
1120 et_init(ifp);
1121 et_start(ifp);
1122 }
1123
1124 int
et_stop_rxdma(struct et_softc * sc)1125 et_stop_rxdma(struct et_softc *sc)
1126 {
1127 CSR_WRITE_4(sc, ET_RXDMA_CTRL,
1128 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE);
1129
1130 DELAY(5);
1131 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) {
1132 aprint_error_dev(sc->sc_dev, "can't stop RX DMA engine\n");
1133 return ETIMEDOUT;
1134 }
1135 return 0;
1136 }
1137
1138 int
et_stop_txdma(struct et_softc * sc)1139 et_stop_txdma(struct et_softc *sc)
1140 {
1141 CSR_WRITE_4(sc, ET_TXDMA_CTRL,
1142 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT);
1143 return 0;
1144 }
1145
1146 void
et_free_tx_ring(struct et_softc * sc)1147 et_free_tx_ring(struct et_softc *sc)
1148 {
1149 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1150 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1151 int i;
1152
1153 for (i = 0; i < ET_TX_NDESC; ++i) {
1154 struct et_txbuf *tb = &tbd->tbd_buf[i];
1155
1156 if (tb->tb_mbuf != NULL) {
1157 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap);
1158 m_freem(tb->tb_mbuf);
1159 tb->tb_mbuf = NULL;
1160 }
1161 }
1162
1163 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1164 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0,
1165 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1166 }
1167
1168 void
et_free_rx_ring(struct et_softc * sc)1169 et_free_rx_ring(struct et_softc *sc)
1170 {
1171 int n;
1172
1173 for (n = 0; n < ET_RX_NRING; ++n) {
1174 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n];
1175 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n];
1176 int i;
1177
1178 for (i = 0; i < ET_RX_NDESC; ++i) {
1179 struct et_rxbuf *rb = &rbd->rbd_buf[i];
1180
1181 if (rb->rb_mbuf != NULL) {
1182 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap);
1183 m_freem(rb->rb_mbuf);
1184 rb->rb_mbuf = NULL;
1185 }
1186 }
1187
1188 bzero(rx_ring->rr_desc, ET_RX_RING_SIZE);
1189 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0,
1190 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1191 }
1192 }
1193
1194 void
et_setmulti(struct et_softc * sc)1195 et_setmulti(struct et_softc *sc)
1196 {
1197 struct ethercom *ec = &sc->sc_ethercom;
1198 struct ifnet *ifp = &ec->ec_if;
1199 uint32_t hash[4] = { 0, 0, 0, 0 };
1200 uint32_t rxmac_ctrl, pktfilt;
1201 struct ether_multi *enm;
1202 struct ether_multistep step;
1203 uint8_t addr[ETHER_ADDR_LEN];
1204 int i, count;
1205
1206 pktfilt = CSR_READ_4(sc, ET_PKTFILT);
1207 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL);
1208
1209 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST);
1210 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1211 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT;
1212 goto back;
1213 }
1214
1215 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
1216
1217 count = 0;
1218 ETHER_FIRST_MULTI(step, ec, enm);
1219 while (enm != NULL) {
1220 uint32_t *hp, h;
1221
1222 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1223 addr[i] &= enm->enm_addrlo[i];
1224 }
1225
1226 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)addr),
1227 ETHER_ADDR_LEN);
1228 h = (h & 0x3f800000) >> 23;
1229
1230 hp = &hash[0];
1231 if (h >= 32 && h < 64) {
1232 h -= 32;
1233 hp = &hash[1];
1234 } else if (h >= 64 && h < 96) {
1235 h -= 64;
1236 hp = &hash[2];
1237 } else if (h >= 96) {
1238 h -= 96;
1239 hp = &hash[3];
1240 }
1241 *hp |= (1 << h);
1242
1243 ++count;
1244 ETHER_NEXT_MULTI(step, enm);
1245 }
1246
1247 for (i = 0; i < 4; ++i)
1248 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]);
1249
1250 if (count > 0)
1251 pktfilt |= ET_PKTFILT_MCAST;
1252 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT;
1253 back:
1254 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt);
1255 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl);
1256 }
1257
1258 int
et_chip_init(struct et_softc * sc)1259 et_chip_init(struct et_softc *sc)
1260 {
1261 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1262 uint32_t rxq_end;
1263 int error;
1264
1265 /*
1266 * Split internal memory between TX and RX according to MTU
1267 */
1268 if (ifp->if_mtu < 2048)
1269 rxq_end = 0x2bc;
1270 else if (ifp->if_mtu < 8192)
1271 rxq_end = 0x1ff;
1272 else
1273 rxq_end = 0x1b3;
1274 CSR_WRITE_4(sc, ET_RXQ_START, 0);
1275 CSR_WRITE_4(sc, ET_RXQ_END, rxq_end);
1276 CSR_WRITE_4(sc, ET_TXQ_START, rxq_end + 1);
1277 CSR_WRITE_4(sc, ET_TXQ_END, ET_INTERN_MEM_END);
1278
1279 /* No loopback */
1280 CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1281
1282 /* Clear MSI configure */
1283 CSR_WRITE_4(sc, ET_MSI_CFG, 0);
1284
1285 /* Disable timer */
1286 CSR_WRITE_4(sc, ET_TIMER, 0);
1287
1288 /* Initialize MAC */
1289 et_init_mac(sc);
1290
1291 /* Enable memory controllers */
1292 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1293
1294 /* Initialize RX MAC */
1295 et_init_rxmac(sc);
1296
1297 /* Initialize TX MAC */
1298 et_init_txmac(sc);
1299
1300 /* Initialize RX DMA engine */
1301 error = et_init_rxdma(sc);
1302 if (error)
1303 return error;
1304
1305 /* Initialize TX DMA engine */
1306 error = et_init_txdma(sc);
1307 if (error)
1308 return error;
1309
1310 return 0;
1311 }
1312
1313 int
et_init_tx_ring(struct et_softc * sc)1314 et_init_tx_ring(struct et_softc *sc)
1315 {
1316 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1317 struct et_txstatus_data *txsd = &sc->sc_tx_status;
1318 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1319
1320 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1321 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0,
1322 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1323
1324 tbd->tbd_start_index = 0;
1325 tbd->tbd_start_wrap = 0;
1326 tbd->tbd_used = 0;
1327
1328 bzero(txsd->txsd_status, sizeof(uint32_t));
1329 bus_dmamap_sync(sc->sc_dmat, txsd->txsd_dmap, 0,
1330 txsd->txsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1331 return 0;
1332 }
1333
1334 int
et_init_rx_ring(struct et_softc * sc)1335 et_init_rx_ring(struct et_softc *sc)
1336 {
1337 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1338 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1339 int n;
1340
1341 for (n = 0; n < ET_RX_NRING; ++n) {
1342 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n];
1343 int i, error;
1344
1345 for (i = 0; i < ET_RX_NDESC; ++i) {
1346 error = rbd->rbd_newbuf(rbd, i, 1);
1347 if (error) {
1348 aprint_error_dev(sc->sc_dev, "%d ring %d buf, newbuf failed: "
1349 "%d\n", n, i, error);
1350 return error;
1351 }
1352 }
1353 }
1354
1355 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus));
1356 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0,
1357 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1358
1359 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE);
1360 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0,
1361 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1362
1363 return 0;
1364 }
1365
1366 int
et_init_rxdma(struct et_softc * sc)1367 et_init_rxdma(struct et_softc *sc)
1368 {
1369 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1370 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1371 struct et_rxdesc_ring *rx_ring;
1372 int error;
1373
1374 error = et_stop_rxdma(sc);
1375 if (error) {
1376 aprint_error_dev(sc->sc_dev, "can't init RX DMA engine\n");
1377 return error;
1378 }
1379
1380 /*
1381 * Install RX status
1382 */
1383 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr));
1384 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr));
1385
1386 /*
1387 * Install RX stat ring
1388 */
1389 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr));
1390 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr));
1391 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1);
1392 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0);
1393 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1);
1394
1395 /* Match ET_RXSTAT_POS */
1396 rxst_ring->rsr_index = 0;
1397 rxst_ring->rsr_wrap = 0;
1398
1399 /*
1400 * Install the 2nd RX descriptor ring
1401 */
1402 rx_ring = &sc->sc_rx_ring[1];
1403 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1404 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1405 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1);
1406 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP);
1407 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1408
1409 /* Match ET_RX_RING1_POS */
1410 rx_ring->rr_index = 0;
1411 rx_ring->rr_wrap = 1;
1412
1413 /*
1414 * Install the 1st RX descriptor ring
1415 */
1416 rx_ring = &sc->sc_rx_ring[0];
1417 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1418 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1419 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1);
1420 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP);
1421 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1422
1423 /* Match ET_RX_RING0_POS */
1424 rx_ring->rr_index = 0;
1425 rx_ring->rr_wrap = 1;
1426
1427 /*
1428 * RX intr moderation
1429 */
1430 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts);
1431 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay);
1432
1433 return 0;
1434 }
1435
1436 int
et_init_txdma(struct et_softc * sc)1437 et_init_txdma(struct et_softc *sc)
1438 {
1439 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1440 struct et_txstatus_data *txsd = &sc->sc_tx_status;
1441 int error;
1442
1443 error = et_stop_txdma(sc);
1444 if (error) {
1445 aprint_error_dev(sc->sc_dev, "can't init TX DMA engine\n");
1446 return error;
1447 }
1448
1449 /*
1450 * Install TX descriptor ring
1451 */
1452 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr));
1453 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr));
1454 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1);
1455
1456 /*
1457 * Install TX status
1458 */
1459 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr));
1460 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr));
1461
1462 CSR_WRITE_4(sc, ET_TX_READY_POS, 0);
1463
1464 /* Match ET_TX_READY_POS */
1465 tx_ring->tr_ready_index = 0;
1466 tx_ring->tr_ready_wrap = 0;
1467
1468 return 0;
1469 }
1470
1471 void
et_init_mac(struct et_softc * sc)1472 et_init_mac(struct et_softc *sc)
1473 {
1474 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1475 const uint8_t *eaddr = CLLADDR(ifp->if_sadl);
1476 uint32_t val;
1477
1478 /* Reset MAC */
1479 CSR_WRITE_4(sc, ET_MAC_CFG1,
1480 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1481 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1482 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1483
1484 /*
1485 * Setup inter packet gap
1486 */
1487 val = __SHIFTIN(56, ET_IPG_NONB2B_1) |
1488 __SHIFTIN(88, ET_IPG_NONB2B_2) |
1489 __SHIFTIN(80, ET_IPG_MINIFG) |
1490 __SHIFTIN(96, ET_IPG_B2B);
1491 CSR_WRITE_4(sc, ET_IPG, val);
1492
1493 /*
1494 * Setup half duplex mode
1495 */
1496 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) |
1497 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) |
1498 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) |
1499 ET_MAC_HDX_EXC_DEFER;
1500 CSR_WRITE_4(sc, ET_MAC_HDX, val);
1501
1502 /* Clear MAC control */
1503 CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1504
1505 /* Reset MII */
1506 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1507
1508 /*
1509 * Set MAC address
1510 */
1511 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24);
1512 CSR_WRITE_4(sc, ET_MAC_ADDR1, val);
1513 val = (eaddr[0] << 16) | (eaddr[1] << 24);
1514 CSR_WRITE_4(sc, ET_MAC_ADDR2, val);
1515
1516 /* Set max frame length */
1517 CSR_WRITE_4(sc, ET_MAX_FRMLEN,
1518 ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ifp->if_mtu + ETHER_CRC_LEN);
1519
1520 /* Bring MAC out of reset state */
1521 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1522 }
1523
1524 void
et_init_rxmac(struct et_softc * sc)1525 et_init_rxmac(struct et_softc *sc)
1526 {
1527 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1528 const uint8_t *eaddr = CLLADDR(ifp->if_sadl);
1529 uint32_t val;
1530 int i;
1531
1532 /* Disable RX MAC and WOL */
1533 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE);
1534
1535 /*
1536 * Clear all WOL related registers
1537 */
1538 for (i = 0; i < 3; ++i)
1539 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0);
1540 for (i = 0; i < 20; ++i)
1541 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0);
1542
1543 /*
1544 * Set WOL source address. XXX is this necessary?
1545 */
1546 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5];
1547 CSR_WRITE_4(sc, ET_WOL_SA_LO, val);
1548 val = (eaddr[0] << 8) | eaddr[1];
1549 CSR_WRITE_4(sc, ET_WOL_SA_HI, val);
1550
1551 /* Clear packet filters */
1552 CSR_WRITE_4(sc, ET_PKTFILT, 0);
1553
1554 /* No ucast filtering */
1555 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0);
1556 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0);
1557 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0);
1558
1559 if (ifp->if_mtu > 8192) {
1560 /*
1561 * In order to transmit jumbo packets greater than 8k,
1562 * the FIFO between RX MAC and RX DMA needs to be reduced
1563 * in size to (16k - MTU). In order to implement this, we
1564 * must use "cut through" mode in the RX MAC, which chops
1565 * packets down into segments which are (max_size * 16).
1566 * In this case we selected 256 bytes, since this is the
1567 * size of the PCI-Express TLP's that the 1310 uses.
1568 */
1569 val = __SHIFTIN(16, ET_RXMAC_MC_SEGSZ_MAX) |
1570 ET_RXMAC_MC_SEGSZ_ENABLE;
1571 } else {
1572 val = 0;
1573 }
1574 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val);
1575
1576 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0);
1577
1578 /* Initialize RX MAC management register */
1579 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0);
1580
1581 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0);
1582
1583 CSR_WRITE_4(sc, ET_RXMAC_MGT,
1584 ET_RXMAC_MGT_PASS_ECRC |
1585 ET_RXMAC_MGT_PASS_ELEN |
1586 ET_RXMAC_MGT_PASS_ETRUNC |
1587 ET_RXMAC_MGT_CHECK_PKT);
1588
1589 /*
1590 * Configure runt filtering (may not work on certain chip generation)
1591 */
1592 val = __SHIFTIN(ETHER_MIN_LEN, ET_PKTFILT_MINLEN) | ET_PKTFILT_FRAG;
1593 CSR_WRITE_4(sc, ET_PKTFILT, val);
1594
1595 /* Enable RX MAC but leave WOL disabled */
1596 CSR_WRITE_4(sc, ET_RXMAC_CTRL,
1597 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE);
1598
1599 /*
1600 * Setup multicast hash and allmulti/promisc mode
1601 */
1602 et_setmulti(sc);
1603 }
1604
1605 void
et_init_txmac(struct et_softc * sc)1606 et_init_txmac(struct et_softc *sc)
1607 {
1608 /* Disable TX MAC and FC(?) */
1609 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE);
1610
1611 /* No flow control yet */
1612 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0);
1613
1614 /* Enable TX MAC but leave FC(?) diabled */
1615 CSR_WRITE_4(sc, ET_TXMAC_CTRL,
1616 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE);
1617 }
1618
1619 int
et_start_rxdma(struct et_softc * sc)1620 et_start_rxdma(struct et_softc *sc)
1621 {
1622 uint32_t val = 0;
1623
1624 val |= __SHIFTIN(sc->sc_rx_data[0].rbd_bufsize,
1625 ET_RXDMA_CTRL_RING0_SIZE) |
1626 ET_RXDMA_CTRL_RING0_ENABLE;
1627 val |= __SHIFTIN(sc->sc_rx_data[1].rbd_bufsize,
1628 ET_RXDMA_CTRL_RING1_SIZE) |
1629 ET_RXDMA_CTRL_RING1_ENABLE;
1630
1631 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val);
1632
1633 DELAY(5);
1634
1635 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) {
1636 aprint_error_dev(sc->sc_dev, "can't start RX DMA engine\n");
1637 return ETIMEDOUT;
1638 }
1639 return 0;
1640 }
1641
1642 int
et_start_txdma(struct et_softc * sc)1643 et_start_txdma(struct et_softc *sc)
1644 {
1645 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT);
1646 return 0;
1647 }
1648
1649 int
et_enable_txrx(struct et_softc * sc)1650 et_enable_txrx(struct et_softc *sc)
1651 {
1652 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1653 uint32_t val;
1654 int i, rc = 0;
1655
1656 val = CSR_READ_4(sc, ET_MAC_CFG1);
1657 val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN;
1658 val &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW |
1659 ET_MAC_CFG1_LOOPBACK);
1660 CSR_WRITE_4(sc, ET_MAC_CFG1, val);
1661
1662 if ((rc = ether_mediachange(ifp)) != 0)
1663 goto out;
1664
1665 #define NRETRY 100
1666
1667 for (i = 0; i < NRETRY; ++i) {
1668 val = CSR_READ_4(sc, ET_MAC_CFG1);
1669 if ((val & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) ==
1670 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN))
1671 break;
1672
1673 DELAY(10);
1674 }
1675 if (i == NRETRY) {
1676 aprint_error_dev(sc->sc_dev, "can't enable RX/TX\n");
1677 return ETIMEDOUT;
1678 }
1679
1680 #undef NRETRY
1681 return 0;
1682 out:
1683 return rc;
1684 }
1685
1686 void
et_rxeof(struct et_softc * sc)1687 et_rxeof(struct et_softc *sc)
1688 {
1689 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1690 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1691 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1692 uint32_t rxs_stat_ring;
1693 int rxst_wrap, rxst_index;
1694
1695 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0,
1696 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1697 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0,
1698 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1699
1700 rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring;
1701 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0;
1702 rxst_index = __SHIFTOUT(rxs_stat_ring, ET_RXS_STATRING_INDEX);
1703
1704 while (rxst_index != rxst_ring->rsr_index ||
1705 rxst_wrap != rxst_ring->rsr_wrap) {
1706 struct et_rxbuf_data *rbd;
1707 struct et_rxdesc_ring *rx_ring;
1708 struct et_rxstat *st;
1709 struct et_rxbuf *rb;
1710 struct mbuf *m;
1711 int buflen, buf_idx, ring_idx;
1712 uint32_t rxstat_pos, rxring_pos;
1713
1714 KASSERT(rxst_ring->rsr_index < ET_RX_NSTAT);
1715 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index];
1716
1717 buflen = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_LEN);
1718 buf_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_BUFIDX);
1719 ring_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_RINGIDX);
1720
1721 if (++rxst_ring->rsr_index == ET_RX_NSTAT) {
1722 rxst_ring->rsr_index = 0;
1723 rxst_ring->rsr_wrap ^= 1;
1724 }
1725 rxstat_pos = __SHIFTIN(rxst_ring->rsr_index,
1726 ET_RXSTAT_POS_INDEX);
1727 if (rxst_ring->rsr_wrap)
1728 rxstat_pos |= ET_RXSTAT_POS_WRAP;
1729 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos);
1730
1731 if (ring_idx >= ET_RX_NRING) {
1732 ifp->if_ierrors++;
1733 aprint_error_dev(sc->sc_dev, "invalid ring index %d\n",
1734 ring_idx);
1735 continue;
1736 }
1737 if (buf_idx >= ET_RX_NDESC) {
1738 ifp->if_ierrors++;
1739 aprint_error_dev(sc->sc_dev, "invalid buf index %d\n",
1740 buf_idx);
1741 continue;
1742 }
1743
1744 rbd = &sc->sc_rx_data[ring_idx];
1745 rb = &rbd->rbd_buf[buf_idx];
1746 m = rb->rb_mbuf;
1747 bus_dmamap_sync(sc->sc_dmat, rb->rb_dmap, 0,
1748 rb->rb_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1749
1750 if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) {
1751 if (buflen < ETHER_CRC_LEN) {
1752 m_freem(m);
1753 ifp->if_ierrors++;
1754 } else {
1755 m->m_pkthdr.len = m->m_len = buflen -
1756 ETHER_CRC_LEN;
1757 m_set_rcvif(m, ifp);
1758
1759 bpf_mtap(ifp, m);
1760
1761 ifp->if_ipackets++;
1762 if_percpuq_enqueue(ifp->if_percpuq, m);
1763 }
1764 } else {
1765 ifp->if_ierrors++;
1766 }
1767
1768 rx_ring = &sc->sc_rx_ring[ring_idx];
1769
1770 if (buf_idx != rx_ring->rr_index) {
1771 aprint_error_dev(sc->sc_dev, "WARNING!! ring %d, "
1772 "buf_idx %d, rr_idx %d\n",
1773 ring_idx, buf_idx, rx_ring->rr_index);
1774 }
1775
1776 KASSERT(rx_ring->rr_index < ET_RX_NDESC);
1777 if (++rx_ring->rr_index == ET_RX_NDESC) {
1778 rx_ring->rr_index = 0;
1779 rx_ring->rr_wrap ^= 1;
1780 }
1781 rxring_pos = __SHIFTIN(rx_ring->rr_index, ET_RX_RING_POS_INDEX);
1782 if (rx_ring->rr_wrap)
1783 rxring_pos |= ET_RX_RING_POS_WRAP;
1784 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos);
1785 }
1786 }
1787
1788 int
et_encap(struct et_softc * sc,struct mbuf ** m0)1789 et_encap(struct et_softc *sc, struct mbuf **m0)
1790 {
1791 struct mbuf *m = *m0;
1792 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1793 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1794 struct et_txdesc *td;
1795 bus_dmamap_t map;
1796 int error, maxsegs, first_idx, last_idx, i;
1797 uint32_t tx_ready_pos, last_td_ctrl2;
1798
1799 maxsegs = ET_TX_NDESC - tbd->tbd_used;
1800 if (maxsegs > ET_NSEG_MAX)
1801 maxsegs = ET_NSEG_MAX;
1802 KASSERTMSG(maxsegs >= ET_NSEG_SPARE,
1803 "not enough spare TX desc (%d)\n", maxsegs);
1804
1805 KASSERT(tx_ring->tr_ready_index < ET_TX_NDESC);
1806 first_idx = tx_ring->tr_ready_index;
1807 map = tbd->tbd_buf[first_idx].tb_dmap;
1808
1809 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1810 BUS_DMA_NOWAIT);
1811 if (!error && map->dm_nsegs == 0) {
1812 bus_dmamap_unload(sc->sc_dmat, map);
1813 error = EFBIG;
1814 }
1815 if (error && error != EFBIG) {
1816 aprint_error_dev(sc->sc_dev, "can't load TX mbuf");
1817 goto back;
1818 }
1819 if (error) { /* error == EFBIG */
1820 struct mbuf *m_new;
1821
1822 error = 0;
1823
1824 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1825 if (m_new == NULL) {
1826 aprint_error_dev(sc->sc_dev, "can't defrag TX mbuf\n");
1827 error = ENOBUFS;
1828 goto back;
1829 }
1830
1831 M_COPY_PKTHDR(m_new, m);
1832 if (m->m_pkthdr.len > MHLEN) {
1833 MCLGET(m_new, M_DONTWAIT);
1834 if (!(m_new->m_flags & M_EXT)) {
1835 m_freem(m_new);
1836 error = ENOBUFS;
1837 }
1838 }
1839
1840 if (error) {
1841 aprint_error_dev(sc->sc_dev, "can't defrag TX buffer\n");
1842 goto back;
1843 }
1844
1845 m_copydata(m, 0, m->m_pkthdr.len, mtod(m_new, void *));
1846 m_freem(m);
1847 m_new->m_len = m_new->m_pkthdr.len;
1848 *m0 = m = m_new;
1849
1850 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1851 BUS_DMA_NOWAIT);
1852 if (error || map->dm_nsegs == 0) {
1853 if (map->dm_nsegs == 0) {
1854 bus_dmamap_unload(sc->sc_dmat, map);
1855 error = EFBIG;
1856 }
1857 aprint_error_dev(sc->sc_dev, "can't load defraged TX mbuf\n");
1858 goto back;
1859 }
1860 }
1861
1862 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1863 BUS_DMASYNC_PREWRITE);
1864
1865 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG;
1866 sc->sc_tx += map->dm_nsegs;
1867 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) {
1868 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs;
1869 last_td_ctrl2 |= ET_TDCTRL2_INTR;
1870 }
1871
1872 last_idx = -1;
1873 for (i = 0; i < map->dm_nsegs; ++i) {
1874 int idx;
1875
1876 idx = (first_idx + i) % ET_TX_NDESC;
1877 td = &tx_ring->tr_desc[idx];
1878 td->td_addr_hi = ET_ADDR_HI(map->dm_segs[i].ds_addr);
1879 td->td_addr_lo = ET_ADDR_LO(map->dm_segs[i].ds_addr);
1880 td->td_ctrl1 =
1881 __SHIFTIN(map->dm_segs[i].ds_len, ET_TDCTRL1_LEN);
1882
1883 if (i == map->dm_nsegs - 1) { /* Last frag */
1884 td->td_ctrl2 = last_td_ctrl2;
1885 last_idx = idx;
1886 }
1887
1888 KASSERT(tx_ring->tr_ready_index < ET_TX_NDESC);
1889 if (++tx_ring->tr_ready_index == ET_TX_NDESC) {
1890 tx_ring->tr_ready_index = 0;
1891 tx_ring->tr_ready_wrap ^= 1;
1892 }
1893 }
1894 td = &tx_ring->tr_desc[first_idx];
1895 td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG; /* First frag */
1896
1897 KASSERT(last_idx >= 0);
1898 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap;
1899 tbd->tbd_buf[last_idx].tb_dmap = map;
1900 tbd->tbd_buf[last_idx].tb_mbuf = m;
1901
1902 tbd->tbd_used += map->dm_nsegs;
1903 KASSERT(tbd->tbd_used <= ET_TX_NDESC);
1904
1905 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0,
1906 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1907
1908
1909 tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index,
1910 ET_TX_READY_POS_INDEX);
1911 if (tx_ring->tr_ready_wrap)
1912 tx_ready_pos |= ET_TX_READY_POS_WRAP;
1913 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos);
1914
1915 error = 0;
1916 back:
1917 if (error) {
1918 m_freem(m);
1919 *m0 = NULL;
1920 }
1921 return error;
1922 }
1923
1924 void
et_txeof(struct et_softc * sc)1925 et_txeof(struct et_softc *sc)
1926 {
1927 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1928 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1929 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1930 uint32_t tx_done;
1931 int end, wrap;
1932
1933 if (tbd->tbd_used == 0)
1934 return;
1935
1936 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS);
1937 end = __SHIFTOUT(tx_done, ET_TX_DONE_POS_INDEX);
1938 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0;
1939
1940 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) {
1941 struct et_txbuf *tb;
1942
1943 KASSERT(tbd->tbd_start_index < ET_TX_NDESC);
1944 tb = &tbd->tbd_buf[tbd->tbd_start_index];
1945
1946 bzero(&tx_ring->tr_desc[tbd->tbd_start_index],
1947 sizeof(struct et_txdesc));
1948 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0,
1949 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1950
1951 if (tb->tb_mbuf != NULL) {
1952 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap);
1953 m_freem(tb->tb_mbuf);
1954 tb->tb_mbuf = NULL;
1955 ifp->if_opackets++;
1956 }
1957
1958 if (++tbd->tbd_start_index == ET_TX_NDESC) {
1959 tbd->tbd_start_index = 0;
1960 tbd->tbd_start_wrap ^= 1;
1961 }
1962
1963 KASSERT(tbd->tbd_used > 0);
1964 tbd->tbd_used--;
1965 }
1966
1967 if (tbd->tbd_used == 0) {
1968 callout_stop(&sc->sc_txtick);
1969 ifp->if_timer = 0;
1970 }
1971 if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC)
1972 ifp->if_flags &= ~IFF_OACTIVE;
1973
1974 et_start(ifp);
1975 }
1976
1977 void
et_txtick(void * xsc)1978 et_txtick(void *xsc)
1979 {
1980 struct et_softc *sc = xsc;
1981 int s;
1982
1983 s = splnet();
1984 et_txeof(sc);
1985 splx(s);
1986 }
1987
1988 void
et_tick(void * xsc)1989 et_tick(void *xsc)
1990 {
1991 struct et_softc *sc = xsc;
1992 int s;
1993
1994 s = splnet();
1995 mii_tick(&sc->sc_miibus);
1996 callout_schedule(&sc->sc_tick, hz);
1997 splx(s);
1998 }
1999
2000 int
et_newbuf_cluster(struct et_rxbuf_data * rbd,int buf_idx,int init)2001 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init)
2002 {
2003 return et_newbuf(rbd, buf_idx, init, MCLBYTES);
2004 }
2005
2006 int
et_newbuf_hdr(struct et_rxbuf_data * rbd,int buf_idx,int init)2007 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init)
2008 {
2009 return et_newbuf(rbd, buf_idx, init, MHLEN);
2010 }
2011
2012 int
et_newbuf(struct et_rxbuf_data * rbd,int buf_idx,int init,int len0)2013 et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0)
2014 {
2015 struct et_softc *sc = rbd->rbd_softc;
2016 struct et_rxdesc_ring *rx_ring;
2017 struct et_rxdesc *desc;
2018 struct et_rxbuf *rb;
2019 struct mbuf *m;
2020 bus_dmamap_t dmap;
2021 int error, len;
2022
2023 KASSERT(buf_idx < ET_RX_NDESC);
2024 rb = &rbd->rbd_buf[buf_idx];
2025
2026 if (len0 >= MINCLSIZE) {
2027 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA);
2028 if (m == NULL)
2029 return (ENOBUFS);
2030 MCLGET(m, init ? M_WAITOK : M_DONTWAIT);
2031 len = MCLBYTES;
2032 } else {
2033 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA);
2034 len = MHLEN;
2035 }
2036
2037 if (m == NULL) {
2038 error = ENOBUFS;
2039
2040 /* XXX for debug */
2041 aprint_error_dev(sc->sc_dev, "M_CLGET failed, size %d\n", len0);
2042 if (init) {
2043 return error;
2044 } else {
2045 goto back;
2046 }
2047 }
2048 m->m_len = m->m_pkthdr.len = len;
2049
2050 /*
2051 * Try load RX mbuf into temporary DMA tag
2052 */
2053 error = bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_mbuf_tmp_dmap, m,
2054 init ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT);
2055 if (error) {
2056 m_freem(m);
2057
2058 /* XXX for debug */
2059 aprint_error_dev(sc->sc_dev, "can't load RX mbuf\n");
2060 if (init) {
2061 return error;
2062 } else {
2063 goto back;
2064 }
2065 }
2066
2067 if (!init)
2068 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap);
2069 rb->rb_mbuf = m;
2070
2071 /*
2072 * Swap RX buf's DMA map with the loaded temporary one
2073 */
2074 dmap = rb->rb_dmap;
2075 rb->rb_dmap = sc->sc_mbuf_tmp_dmap;
2076 rb->rb_paddr = rb->rb_dmap->dm_segs[0].ds_addr;
2077 sc->sc_mbuf_tmp_dmap = dmap;
2078
2079 error = 0;
2080 back:
2081 rx_ring = rbd->rbd_ring;
2082 desc = &rx_ring->rr_desc[buf_idx];
2083
2084 desc->rd_addr_hi = ET_ADDR_HI(rb->rb_paddr);
2085 desc->rd_addr_lo = ET_ADDR_LO(rb->rb_paddr);
2086 desc->rd_ctrl = __SHIFTIN(buf_idx, ET_RDCTRL_BUFIDX);
2087
2088 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0,
2089 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
2090 return error;
2091 }
2092