1 /* $OpenBSD: hme.c,v 1.83 2020/12/12 11:48:52 jan Exp $ */
2 /* $NetBSD: hme.c,v 1.21 2001/07/07 15:59:37 thorpej Exp $ */
3
4 /*-
5 * Copyright (c) 1999 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Paul Kranenburg.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * HME Ethernet module driver.
35 */
36
37 #include "bpfilter.h"
38
39 #undef HMEDEBUG
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/mbuf.h>
45 #include <sys/syslog.h>
46 #include <sys/socket.h>
47 #include <sys/device.h>
48 #include <sys/malloc.h>
49 #include <sys/ioctl.h>
50 #include <sys/errno.h>
51
52 #include <net/if.h>
53 #include <net/if_media.h>
54
55 #include <netinet/in.h>
56 #include <netinet/if_ether.h>
57
58 #if NBPFILTER > 0
59 #include <net/bpf.h>
60 #endif
61
62 #include <dev/mii/mii.h>
63 #include <dev/mii/miivar.h>
64
65 #include <machine/bus.h>
66
67 #include <dev/ic/hmereg.h>
68 #include <dev/ic/hmevar.h>
69
70 struct cfdriver hme_cd = {
71 NULL, "hme", DV_IFNET
72 };
73
74 #define HME_RX_OFFSET 2
75
76 void hme_start(struct ifnet *);
77 void hme_stop(struct hme_softc *, int);
78 int hme_ioctl(struct ifnet *, u_long, caddr_t);
79 void hme_tick(void *);
80 void hme_watchdog(struct ifnet *);
81 void hme_init(struct hme_softc *);
82 void hme_meminit(struct hme_softc *);
83 void hme_mifinit(struct hme_softc *);
84 void hme_reset(struct hme_softc *);
85 void hme_iff(struct hme_softc *);
86 void hme_fill_rx_ring(struct hme_softc *);
87 int hme_newbuf(struct hme_softc *, struct hme_sxd *);
88
89 /* MII methods & callbacks */
90 static int hme_mii_readreg(struct device *, int, int);
91 static void hme_mii_writereg(struct device *, int, int, int);
92 static void hme_mii_statchg(struct device *);
93
94 int hme_mediachange(struct ifnet *);
95 void hme_mediastatus(struct ifnet *, struct ifmediareq *);
96
97 int hme_eint(struct hme_softc *, u_int);
98 int hme_rint(struct hme_softc *);
99 int hme_tint(struct hme_softc *);
100
101 void
hme_config(struct hme_softc * sc)102 hme_config(struct hme_softc *sc)
103 {
104 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
105 struct mii_data *mii = &sc->sc_mii;
106 struct mii_softc *child;
107 bus_dma_tag_t dmatag = sc->sc_dmatag;
108 bus_dma_segment_t seg;
109 bus_size_t size;
110 int rseg, error, i;
111
112 /*
113 * HME common initialization.
114 *
115 * hme_softc fields that must be initialized by the front-end:
116 *
117 * the bus tag:
118 * sc_bustag
119 *
120 * the dma bus tag:
121 * sc_dmatag
122 *
123 * the bus handles:
124 * sc_seb (Shared Ethernet Block registers)
125 * sc_erx (Receiver Unit registers)
126 * sc_etx (Transmitter Unit registers)
127 * sc_mac (MAC registers)
128 * sc_mif (Management Interface registers)
129 *
130 * the maximum bus burst size:
131 * sc_burst
132 *
133 * the local Ethernet address:
134 * sc_arpcom.ac_enaddr
135 *
136 */
137
138 /* Make sure the chip is stopped. */
139 hme_stop(sc, 0);
140
141 for (i = 0; i < HME_TX_RING_SIZE; i++) {
142 if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, HME_TX_NSEGS,
143 MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
144 &sc->sc_txd[i].sd_map) != 0) {
145 sc->sc_txd[i].sd_map = NULL;
146 goto fail;
147 }
148 }
149 for (i = 0; i < HME_RX_RING_SIZE; i++) {
150 if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1,
151 MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
152 &sc->sc_rxd[i].sd_map) != 0) {
153 sc->sc_rxd[i].sd_map = NULL;
154 goto fail;
155 }
156 }
157 if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1, MCLBYTES, 0,
158 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_rxmap_spare) != 0) {
159 sc->sc_rxmap_spare = NULL;
160 goto fail;
161 }
162
163 /*
164 * Allocate DMA capable memory
165 * Buffer descriptors must be aligned on a 2048 byte boundary;
166 * take this into account when calculating the size. Note that
167 * the maximum number of descriptors (256) occupies 2048 bytes,
168 * so we allocate that much regardless of the number of descriptors.
169 */
170 size = (HME_XD_SIZE * HME_RX_RING_MAX) + /* RX descriptors */
171 (HME_XD_SIZE * HME_TX_RING_MAX); /* TX descriptors */
172
173 /* Allocate DMA buffer */
174 if ((error = bus_dmamem_alloc(dmatag, size, 2048, 0, &seg, 1, &rseg,
175 BUS_DMA_NOWAIT)) != 0) {
176 printf("\n%s: DMA buffer alloc error %d\n",
177 sc->sc_dev.dv_xname, error);
178 return;
179 }
180
181 /* Map DMA memory in CPU addressable space */
182 if ((error = bus_dmamem_map(dmatag, &seg, rseg, size,
183 &sc->sc_rb.rb_membase, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
184 printf("\n%s: DMA buffer map error %d\n",
185 sc->sc_dev.dv_xname, error);
186 bus_dmamap_unload(dmatag, sc->sc_dmamap);
187 bus_dmamem_free(dmatag, &seg, rseg);
188 return;
189 }
190
191 if ((error = bus_dmamap_create(dmatag, size, 1, size, 0,
192 BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
193 printf("\n%s: DMA map create error %d\n",
194 sc->sc_dev.dv_xname, error);
195 return;
196 }
197
198 /* Load the buffer */
199 if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap,
200 sc->sc_rb.rb_membase, size, NULL,
201 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
202 printf("\n%s: DMA buffer map load error %d\n",
203 sc->sc_dev.dv_xname, error);
204 bus_dmamem_free(dmatag, &seg, rseg);
205 return;
206 }
207 sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr;
208
209 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
210
211 /* Initialize ifnet structure. */
212 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof ifp->if_xname);
213 ifp->if_softc = sc;
214 ifp->if_start = hme_start;
215 ifp->if_ioctl = hme_ioctl;
216 ifp->if_watchdog = hme_watchdog;
217 ifp->if_flags =
218 IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
219 ifp->if_capabilities = IFCAP_VLAN_MTU;
220
221 /* Initialize ifmedia structures and MII info */
222 mii->mii_ifp = ifp;
223 mii->mii_readreg = hme_mii_readreg;
224 mii->mii_writereg = hme_mii_writereg;
225 mii->mii_statchg = hme_mii_statchg;
226
227 ifmedia_init(&mii->mii_media, IFM_IMASK,
228 hme_mediachange, hme_mediastatus);
229
230 hme_mifinit(sc);
231
232 if (sc->sc_tcvr == -1)
233 mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
234 MII_OFFSET_ANY, 0);
235 else
236 mii_attach(&sc->sc_dev, mii, 0xffffffff, sc->sc_tcvr,
237 MII_OFFSET_ANY, 0);
238
239 child = LIST_FIRST(&mii->mii_phys);
240 if (child == NULL) {
241 /* No PHY attached */
242 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
243 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
244 } else {
245 /*
246 * Walk along the list of attached MII devices and
247 * establish an `MII instance' to `phy number'
248 * mapping. We'll use this mapping in media change
249 * requests to determine which phy to use to program
250 * the MIF configuration register.
251 */
252 for (; child != NULL; child = LIST_NEXT(child, mii_list)) {
253 /*
254 * Note: we support just two PHYs: the built-in
255 * internal device and an external on the MII
256 * connector.
257 */
258 if (child->mii_phy > 1 || child->mii_inst > 1) {
259 printf("%s: cannot accommodate MII device %s"
260 " at phy %d, instance %lld\n",
261 sc->sc_dev.dv_xname,
262 child->mii_dev.dv_xname,
263 child->mii_phy, child->mii_inst);
264 continue;
265 }
266
267 sc->sc_phys[child->mii_inst] = child->mii_phy;
268 }
269
270 /*
271 * XXX - we can really do the following ONLY if the
272 * phy indeed has the auto negotiation capability!!
273 */
274 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
275 }
276
277 /* Attach the interface. */
278 if_attach(ifp);
279 ether_ifattach(ifp);
280
281 timeout_set(&sc->sc_tick_ch, hme_tick, sc);
282 return;
283
284 fail:
285 if (sc->sc_rxmap_spare != NULL)
286 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_rxmap_spare);
287 for (i = 0; i < HME_TX_RING_SIZE; i++)
288 if (sc->sc_txd[i].sd_map != NULL)
289 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_txd[i].sd_map);
290 for (i = 0; i < HME_RX_RING_SIZE; i++)
291 if (sc->sc_rxd[i].sd_map != NULL)
292 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_rxd[i].sd_map);
293 }
294
295 void
hme_unconfig(struct hme_softc * sc)296 hme_unconfig(struct hme_softc *sc)
297 {
298 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
299 int i;
300
301 hme_stop(sc, 1);
302
303 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_rxmap_spare);
304 for (i = 0; i < HME_TX_RING_SIZE; i++)
305 if (sc->sc_txd[i].sd_map != NULL)
306 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_txd[i].sd_map);
307 for (i = 0; i < HME_RX_RING_SIZE; i++)
308 if (sc->sc_rxd[i].sd_map != NULL)
309 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_rxd[i].sd_map);
310
311 /* Detach all PHYs */
312 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
313
314 /* Delete all remaining media. */
315 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
316
317 ether_ifdetach(ifp);
318 if_detach(ifp);
319 }
320
321 void
hme_tick(void * arg)322 hme_tick(void *arg)
323 {
324 struct hme_softc *sc = arg;
325 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
326 bus_space_tag_t t = sc->sc_bustag;
327 bus_space_handle_t mac = sc->sc_mac;
328 int s;
329
330 s = splnet();
331 /*
332 * Unload collision counters
333 */
334 ifp->if_collisions +=
335 bus_space_read_4(t, mac, HME_MACI_NCCNT) +
336 bus_space_read_4(t, mac, HME_MACI_FCCNT) +
337 bus_space_read_4(t, mac, HME_MACI_EXCNT) +
338 bus_space_read_4(t, mac, HME_MACI_LTCNT);
339
340 /*
341 * then clear the hardware counters.
342 */
343 bus_space_write_4(t, mac, HME_MACI_NCCNT, 0);
344 bus_space_write_4(t, mac, HME_MACI_FCCNT, 0);
345 bus_space_write_4(t, mac, HME_MACI_EXCNT, 0);
346 bus_space_write_4(t, mac, HME_MACI_LTCNT, 0);
347
348 /*
349 * If buffer allocation fails, the receive ring may become
350 * empty. There is no receive interrupt to recover from that.
351 */
352 if (if_rxr_inuse(&sc->sc_rx_ring) == 0)
353 hme_fill_rx_ring(sc);
354
355 mii_tick(&sc->sc_mii);
356 splx(s);
357
358 timeout_add_sec(&sc->sc_tick_ch, 1);
359 }
360
361 void
hme_reset(struct hme_softc * sc)362 hme_reset(struct hme_softc *sc)
363 {
364 int s;
365
366 s = splnet();
367 hme_init(sc);
368 splx(s);
369 }
370
371 void
hme_stop(struct hme_softc * sc,int softonly)372 hme_stop(struct hme_softc *sc, int softonly)
373 {
374 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
375 bus_space_tag_t t = sc->sc_bustag;
376 bus_space_handle_t seb = sc->sc_seb;
377 int n;
378
379 timeout_del(&sc->sc_tick_ch);
380
381 /*
382 * Mark the interface down and cancel the watchdog timer.
383 */
384 ifp->if_flags &= ~IFF_RUNNING;
385 ifq_clr_oactive(&ifp->if_snd);
386 ifp->if_timer = 0;
387
388 if (!softonly) {
389 mii_down(&sc->sc_mii);
390
391 /* Mask all interrupts */
392 bus_space_write_4(t, seb, HME_SEBI_IMASK, 0xffffffff);
393
394 /* Reset transmitter and receiver */
395 bus_space_write_4(t, seb, HME_SEBI_RESET,
396 (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX));
397
398 for (n = 0; n < 20; n++) {
399 u_int32_t v = bus_space_read_4(t, seb, HME_SEBI_RESET);
400 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
401 break;
402 DELAY(20);
403 }
404 if (n >= 20)
405 printf("%s: hme_stop: reset failed\n", sc->sc_dev.dv_xname);
406 }
407
408 for (n = 0; n < HME_TX_RING_SIZE; n++) {
409 if (sc->sc_txd[n].sd_mbuf != NULL) {
410 bus_dmamap_sync(sc->sc_dmatag, sc->sc_txd[n].sd_map,
411 0, sc->sc_txd[n].sd_map->dm_mapsize,
412 BUS_DMASYNC_POSTWRITE);
413 bus_dmamap_unload(sc->sc_dmatag, sc->sc_txd[n].sd_map);
414 m_freem(sc->sc_txd[n].sd_mbuf);
415 sc->sc_txd[n].sd_mbuf = NULL;
416 }
417 }
418 sc->sc_tx_prod = sc->sc_tx_cons = sc->sc_tx_cnt = 0;
419
420 for (n = 0; n < HME_RX_RING_SIZE; n++) {
421 if (sc->sc_rxd[n].sd_mbuf != NULL) {
422 bus_dmamap_sync(sc->sc_dmatag, sc->sc_rxd[n].sd_map,
423 0, sc->sc_rxd[n].sd_map->dm_mapsize,
424 BUS_DMASYNC_POSTREAD);
425 bus_dmamap_unload(sc->sc_dmatag, sc->sc_rxd[n].sd_map);
426 m_freem(sc->sc_rxd[n].sd_mbuf);
427 sc->sc_rxd[n].sd_mbuf = NULL;
428 }
429 }
430 sc->sc_rx_prod = sc->sc_rx_cons = 0;
431 }
432
433 void
hme_meminit(struct hme_softc * sc)434 hme_meminit(struct hme_softc *sc)
435 {
436 bus_addr_t dma;
437 caddr_t p;
438 unsigned int i;
439 struct hme_ring *hr = &sc->sc_rb;
440
441 p = hr->rb_membase;
442 dma = hr->rb_dmabase;
443
444 /*
445 * Allocate transmit descriptors
446 */
447 hr->rb_txd = p;
448 hr->rb_txddma = dma;
449 p += HME_TX_RING_SIZE * HME_XD_SIZE;
450 dma += HME_TX_RING_SIZE * HME_XD_SIZE;
451 /* We have reserved descriptor space until the next 2048 byte boundary.*/
452 dma = (bus_addr_t)roundup((u_long)dma, 2048);
453 p = (caddr_t)roundup((u_long)p, 2048);
454
455 /*
456 * Allocate receive descriptors
457 */
458 hr->rb_rxd = p;
459 hr->rb_rxddma = dma;
460 p += HME_RX_RING_SIZE * HME_XD_SIZE;
461 dma += HME_RX_RING_SIZE * HME_XD_SIZE;
462 /* Again move forward to the next 2048 byte boundary.*/
463 dma = (bus_addr_t)roundup((u_long)dma, 2048);
464 p = (caddr_t)roundup((u_long)p, 2048);
465
466 /*
467 * Initialize transmit descriptors
468 */
469 for (i = 0; i < HME_TX_RING_SIZE; i++) {
470 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, 0);
471 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0);
472 sc->sc_txd[i].sd_mbuf = NULL;
473 }
474
475 /*
476 * Initialize receive descriptors
477 */
478 for (i = 0; i < HME_RX_RING_SIZE; i++) {
479 HME_XD_SETADDR(sc->sc_pci, hr->rb_rxd, i, 0);
480 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_rxd, i, 0);
481 sc->sc_rxd[i].sd_mbuf = NULL;
482 }
483
484 if_rxr_init(&sc->sc_rx_ring, 2, HME_RX_RING_SIZE);
485 hme_fill_rx_ring(sc);
486 }
487
488 /*
489 * Initialization of interface; set up initialization block
490 * and transmit/receive descriptor rings.
491 */
492 void
hme_init(struct hme_softc * sc)493 hme_init(struct hme_softc *sc)
494 {
495 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
496 bus_space_tag_t t = sc->sc_bustag;
497 bus_space_handle_t seb = sc->sc_seb;
498 bus_space_handle_t etx = sc->sc_etx;
499 bus_space_handle_t erx = sc->sc_erx;
500 bus_space_handle_t mac = sc->sc_mac;
501 u_int8_t *ea;
502 u_int32_t v;
503
504 /*
505 * Initialization sequence. The numbered steps below correspond
506 * to the sequence outlined in section 6.3.5.1 in the Ethernet
507 * Channel Engine manual (part of the PCIO manual).
508 * See also the STP2002-STQ document from Sun Microsystems.
509 */
510
511 /* step 1 & 2. Reset the Ethernet Channel */
512 hme_stop(sc, 0);
513
514 /* Re-initialize the MIF */
515 hme_mifinit(sc);
516
517 /* step 3. Setup data structures in host memory */
518 hme_meminit(sc);
519
520 /* step 4. TX MAC registers & counters */
521 bus_space_write_4(t, mac, HME_MACI_NCCNT, 0);
522 bus_space_write_4(t, mac, HME_MACI_FCCNT, 0);
523 bus_space_write_4(t, mac, HME_MACI_EXCNT, 0);
524 bus_space_write_4(t, mac, HME_MACI_LTCNT, 0);
525 bus_space_write_4(t, mac, HME_MACI_TXSIZE, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
526
527 /* Load station MAC address */
528 ea = sc->sc_arpcom.ac_enaddr;
529 bus_space_write_4(t, mac, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
530 bus_space_write_4(t, mac, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
531 bus_space_write_4(t, mac, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
532
533 /*
534 * Init seed for backoff
535 * (source suggested by manual: low 10 bits of MAC address)
536 */
537 v = ((ea[4] << 8) | ea[5]) & 0x3fff;
538 bus_space_write_4(t, mac, HME_MACI_RANDSEED, v);
539
540
541 /* Note: Accepting power-on default for other MAC registers here.. */
542
543
544 /* step 5. RX MAC registers & counters */
545 hme_iff(sc);
546
547 /* step 6 & 7. Program Descriptor Ring Base Addresses */
548 bus_space_write_4(t, etx, HME_ETXI_RING, sc->sc_rb.rb_txddma);
549 bus_space_write_4(t, etx, HME_ETXI_RSIZE, HME_TX_RING_SIZE);
550
551 bus_space_write_4(t, erx, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
552 bus_space_write_4(t, mac, HME_MACI_RXSIZE, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
553
554 /* step 8. Global Configuration & Interrupt Mask */
555 bus_space_write_4(t, seb, HME_SEBI_IMASK,
556 ~(HME_SEB_STAT_HOSTTOTX | HME_SEB_STAT_RXTOHOST |
557 HME_SEB_STAT_TXALL | HME_SEB_STAT_TXPERR |
558 HME_SEB_STAT_RCNTEXP | HME_SEB_STAT_ALL_ERRORS));
559
560 switch (sc->sc_burst) {
561 default:
562 v = 0;
563 break;
564 case 16:
565 v = HME_SEB_CFG_BURST16;
566 break;
567 case 32:
568 v = HME_SEB_CFG_BURST32;
569 break;
570 case 64:
571 v = HME_SEB_CFG_BURST64;
572 break;
573 }
574 bus_space_write_4(t, seb, HME_SEBI_CFG, v);
575
576 /* step 9. ETX Configuration: use mostly default values */
577
578 /* Enable DMA */
579 v = bus_space_read_4(t, etx, HME_ETXI_CFG);
580 v |= HME_ETX_CFG_DMAENABLE;
581 bus_space_write_4(t, etx, HME_ETXI_CFG, v);
582
583 /* Transmit Descriptor ring size: in increments of 16 */
584 bus_space_write_4(t, etx, HME_ETXI_RSIZE, HME_TX_RING_SIZE / 16 - 1);
585
586 /* step 10. ERX Configuration */
587 v = bus_space_read_4(t, erx, HME_ERXI_CFG);
588 v &= ~HME_ERX_CFG_RINGSIZE256;
589 #if HME_RX_RING_SIZE == 32
590 v |= HME_ERX_CFG_RINGSIZE32;
591 #elif HME_RX_RING_SIZE == 64
592 v |= HME_ERX_CFG_RINGSIZE64;
593 #elif HME_RX_RING_SIZE == 128
594 v |= HME_ERX_CFG_RINGSIZE128;
595 #elif HME_RX_RING_SIZE == 256
596 v |= HME_ERX_CFG_RINGSIZE256;
597 #else
598 # error "RX ring size must be 32, 64, 128, or 256"
599 #endif
600 /* Enable DMA */
601 v |= HME_ERX_CFG_DMAENABLE | (HME_RX_OFFSET << 3);
602 bus_space_write_4(t, erx, HME_ERXI_CFG, v);
603
604 /* step 11. XIF Configuration */
605 v = bus_space_read_4(t, mac, HME_MACI_XIF);
606 v |= HME_MAC_XIF_OE;
607 bus_space_write_4(t, mac, HME_MACI_XIF, v);
608
609 /* step 12. RX_MAC Configuration Register */
610 v = bus_space_read_4(t, mac, HME_MACI_RXCFG);
611 v |= HME_MAC_RXCFG_ENABLE;
612 bus_space_write_4(t, mac, HME_MACI_RXCFG, v);
613
614 /* step 13. TX_MAC Configuration Register */
615 v = bus_space_read_4(t, mac, HME_MACI_TXCFG);
616 v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
617 bus_space_write_4(t, mac, HME_MACI_TXCFG, v);
618
619 /* Set the current media. */
620 mii_mediachg(&sc->sc_mii);
621
622 /* Start the one second timer. */
623 timeout_add_sec(&sc->sc_tick_ch, 1);
624
625 ifp->if_flags |= IFF_RUNNING;
626 ifq_clr_oactive(&ifp->if_snd);
627
628 hme_start(ifp);
629 }
630
631 void
hme_start(struct ifnet * ifp)632 hme_start(struct ifnet *ifp)
633 {
634 struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
635 struct hme_ring *hr = &sc->sc_rb;
636 struct mbuf *m;
637 u_int32_t flags;
638 bus_dmamap_t map;
639 u_int32_t frag, cur, i;
640 int error;
641
642 if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
643 return;
644
645 while (sc->sc_txd[sc->sc_tx_prod].sd_mbuf == NULL) {
646 m = ifq_deq_begin(&ifp->if_snd);
647 if (m == NULL)
648 break;
649
650 /*
651 * Encapsulate this packet and start it going...
652 * or fail...
653 */
654
655 cur = frag = sc->sc_tx_prod;
656 map = sc->sc_txd[cur].sd_map;
657
658 error = bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,
659 BUS_DMA_NOWAIT);
660 if (error != 0 && error != EFBIG)
661 goto drop;
662 if (error != 0) {
663 /* Too many fragments, linearize. */
664 if (m_defrag(m, M_DONTWAIT))
665 goto drop;
666 error = bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,
667 BUS_DMA_NOWAIT);
668 if (error != 0)
669 goto drop;
670 }
671
672 if ((HME_TX_RING_SIZE - (sc->sc_tx_cnt + map->dm_nsegs)) < 5) {
673 bus_dmamap_unload(sc->sc_dmatag, map);
674 ifq_deq_rollback(&ifp->if_snd, m);
675 ifq_set_oactive(&ifp->if_snd);
676 break;
677 }
678
679 /* We are now committed to transmitting the packet. */
680 ifq_deq_commit(&ifp->if_snd, m);
681
682 #if NBPFILTER > 0
683 /*
684 * If BPF is listening on this interface, let it see the
685 * packet before we commit it to the wire.
686 */
687 if (ifp->if_bpf)
688 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
689 #endif
690
691 bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize,
692 BUS_DMASYNC_PREWRITE);
693
694 for (i = 0; i < map->dm_nsegs; i++) {
695 flags = HME_XD_ENCODE_TSIZE(map->dm_segs[i].ds_len);
696 if (i == 0)
697 flags |= HME_XD_SOP;
698 else
699 flags |= HME_XD_OWN;
700
701 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, frag,
702 map->dm_segs[i].ds_addr);
703 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, frag, flags);
704
705 cur = frag;
706 if (++frag == HME_TX_RING_SIZE)
707 frag = 0;
708 }
709
710 /* Set end of packet on last descriptor. */
711 flags = HME_XD_GETFLAGS(sc->sc_pci, hr->rb_txd, cur);
712 flags |= HME_XD_EOP;
713 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, cur, flags);
714
715 sc->sc_tx_cnt += map->dm_nsegs;
716 sc->sc_txd[sc->sc_tx_prod].sd_map = sc->sc_txd[cur].sd_map;
717 sc->sc_txd[cur].sd_map = map;
718 sc->sc_txd[cur].sd_mbuf = m;
719
720 /* Give first frame over to the hardware. */
721 flags = HME_XD_GETFLAGS(sc->sc_pci, hr->rb_txd, sc->sc_tx_prod);
722 flags |= HME_XD_OWN;
723 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, sc->sc_tx_prod, flags);
724
725 bus_space_write_4(sc->sc_bustag, sc->sc_etx, HME_ETXI_PENDING,
726 HME_ETX_TP_DMAWAKEUP);
727 sc->sc_tx_prod = frag;
728
729 ifp->if_timer = 5;
730 }
731
732 return;
733
734 drop:
735 ifq_deq_commit(&ifp->if_snd, m);
736 m_freem(m);
737 ifp->if_oerrors++;
738 }
739
740 /*
741 * Transmit interrupt.
742 */
743 int
hme_tint(struct hme_softc * sc)744 hme_tint(struct hme_softc *sc)
745 {
746 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
747 unsigned int ri, txflags;
748 struct hme_sxd *sd;
749 int cnt = sc->sc_tx_cnt;
750
751 /* Fetch current position in the transmit ring */
752 ri = sc->sc_tx_cons;
753 sd = &sc->sc_txd[ri];
754
755 for (;;) {
756 if (cnt <= 0)
757 break;
758
759 txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri);
760
761 if (txflags & HME_XD_OWN)
762 break;
763
764 ifq_clr_oactive(&ifp->if_snd);
765
766 if (sd->sd_mbuf != NULL) {
767 bus_dmamap_sync(sc->sc_dmatag, sd->sd_map,
768 0, sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
769 bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
770 m_freem(sd->sd_mbuf);
771 sd->sd_mbuf = NULL;
772 }
773
774 if (++ri == HME_TX_RING_SIZE) {
775 ri = 0;
776 sd = sc->sc_txd;
777 } else
778 sd++;
779
780 --cnt;
781 }
782
783 sc->sc_tx_cnt = cnt;
784 ifp->if_timer = cnt > 0 ? 5 : 0;
785
786 /* Update ring */
787 sc->sc_tx_cons = ri;
788
789 hme_start(ifp);
790
791 return (1);
792 }
793
794 /*
795 * Receive interrupt.
796 */
797 int
hme_rint(struct hme_softc * sc)798 hme_rint(struct hme_softc *sc)
799 {
800 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
801 struct mbuf_list ml = MBUF_LIST_INITIALIZER();
802 struct mbuf *m;
803 struct hme_sxd *sd;
804 unsigned int ri, len;
805 u_int32_t flags;
806
807 ri = sc->sc_rx_cons;
808 sd = &sc->sc_rxd[ri];
809
810 /*
811 * Process all buffers with valid data.
812 */
813 while (if_rxr_inuse(&sc->sc_rx_ring) > 0) {
814 flags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri);
815 if (flags & HME_XD_OWN)
816 break;
817
818 bus_dmamap_sync(sc->sc_dmatag, sd->sd_map,
819 0, sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
820 bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
821
822 m = sd->sd_mbuf;
823 sd->sd_mbuf = NULL;
824
825 if (++ri == HME_RX_RING_SIZE) {
826 ri = 0;
827 sd = sc->sc_rxd;
828 } else
829 sd++;
830
831 if_rxr_put(&sc->sc_rx_ring, 1);
832
833 if (flags & HME_XD_OFL) {
834 ifp->if_ierrors++;
835 printf("%s: buffer overflow, ri=%d; flags=0x%x\n",
836 sc->sc_dev.dv_xname, ri, flags);
837 m_freem(m);
838 continue;
839 }
840
841 len = HME_XD_DECODE_RSIZE(flags);
842 m->m_pkthdr.len = m->m_len = len;
843
844 ml_enqueue(&ml, m);
845 }
846
847 if (ifiq_input(&ifp->if_rcv, &ml))
848 if_rxr_livelocked(&sc->sc_rx_ring);
849
850 sc->sc_rx_cons = ri;
851 hme_fill_rx_ring(sc);
852 return (1);
853 }
854
855 int
hme_eint(struct hme_softc * sc,u_int status)856 hme_eint(struct hme_softc *sc, u_int status)
857 {
858 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
859
860 if (status & HME_SEB_STAT_MIFIRQ) {
861 printf("%s: XXXlink status changed\n", sc->sc_dev.dv_xname);
862 status &= ~HME_SEB_STAT_MIFIRQ;
863 }
864
865 if (status & HME_SEB_STAT_DTIMEXP) {
866 ifp->if_oerrors++;
867 status &= ~HME_SEB_STAT_DTIMEXP;
868 }
869
870 if (status & HME_SEB_STAT_NORXD) {
871 ifp->if_ierrors++;
872 status &= ~HME_SEB_STAT_NORXD;
873 }
874
875 status &= ~(HME_SEB_STAT_RXTOHOST | HME_SEB_STAT_GOTFRAME |
876 HME_SEB_STAT_SENTFRAME | HME_SEB_STAT_HOSTTOTX |
877 HME_SEB_STAT_TXALL);
878
879 if (status == 0)
880 return (1);
881
882 #ifdef HME_DEBUG
883 printf("%s: status=%b\n", sc->sc_dev.dv_xname, status, HME_SEB_STAT_BITS);
884 #endif
885 return (1);
886 }
887
888 int
hme_intr(void * v)889 hme_intr(void *v)
890 {
891 struct hme_softc *sc = (struct hme_softc *)v;
892 bus_space_tag_t t = sc->sc_bustag;
893 bus_space_handle_t seb = sc->sc_seb;
894 u_int32_t status;
895 int r = 0;
896
897 status = bus_space_read_4(t, seb, HME_SEBI_STAT);
898 if (status == 0xffffffff)
899 return (0);
900
901 if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
902 r |= hme_eint(sc, status);
903
904 if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
905 r |= hme_tint(sc);
906
907 if ((status & HME_SEB_STAT_RXTOHOST) != 0)
908 r |= hme_rint(sc);
909
910 return (r);
911 }
912
913
914 void
hme_watchdog(struct ifnet * ifp)915 hme_watchdog(struct ifnet *ifp)
916 {
917 struct hme_softc *sc = ifp->if_softc;
918
919 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
920 ifp->if_oerrors++;
921
922 hme_reset(sc);
923 }
924
925 /*
926 * Initialize the MII Management Interface
927 */
928 void
hme_mifinit(struct hme_softc * sc)929 hme_mifinit(struct hme_softc *sc)
930 {
931 bus_space_tag_t t = sc->sc_bustag;
932 bus_space_handle_t mif = sc->sc_mif;
933 bus_space_handle_t mac = sc->sc_mac;
934 int phy;
935 u_int32_t v;
936
937 v = bus_space_read_4(t, mif, HME_MIFI_CFG);
938 phy = HME_PHYAD_EXTERNAL;
939 if (v & HME_MIF_CFG_MDI1)
940 phy = sc->sc_tcvr = HME_PHYAD_EXTERNAL;
941 else if (v & HME_MIF_CFG_MDI0)
942 phy = sc->sc_tcvr = HME_PHYAD_INTERNAL;
943 else
944 sc->sc_tcvr = -1;
945
946 /* Configure the MIF in frame mode, no poll, current phy select */
947 v = 0;
948 if (phy == HME_PHYAD_EXTERNAL)
949 v |= HME_MIF_CFG_PHY;
950 bus_space_write_4(t, mif, HME_MIFI_CFG, v);
951
952 /* If an external transceiver is selected, enable its MII drivers */
953 v = bus_space_read_4(t, mac, HME_MACI_XIF);
954 v &= ~HME_MAC_XIF_MIIENABLE;
955 if (phy == HME_PHYAD_EXTERNAL)
956 v |= HME_MAC_XIF_MIIENABLE;
957 bus_space_write_4(t, mac, HME_MACI_XIF, v);
958 }
959
960 /*
961 * MII interface
962 */
963 static int
hme_mii_readreg(struct device * self,int phy,int reg)964 hme_mii_readreg(struct device *self, int phy, int reg)
965 {
966 struct hme_softc *sc = (struct hme_softc *)self;
967 bus_space_tag_t t = sc->sc_bustag;
968 bus_space_handle_t mif = sc->sc_mif;
969 bus_space_handle_t mac = sc->sc_mac;
970 u_int32_t v, xif_cfg, mifi_cfg;
971 int n;
972
973 if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL)
974 return (0);
975
976 /* Select the desired PHY in the MIF configuration register */
977 v = mifi_cfg = bus_space_read_4(t, mif, HME_MIFI_CFG);
978 v &= ~HME_MIF_CFG_PHY;
979 if (phy == HME_PHYAD_EXTERNAL)
980 v |= HME_MIF_CFG_PHY;
981 bus_space_write_4(t, mif, HME_MIFI_CFG, v);
982
983 /* Enable MII drivers on external transceiver */
984 v = xif_cfg = bus_space_read_4(t, mac, HME_MACI_XIF);
985 if (phy == HME_PHYAD_EXTERNAL)
986 v |= HME_MAC_XIF_MIIENABLE;
987 else
988 v &= ~HME_MAC_XIF_MIIENABLE;
989 bus_space_write_4(t, mac, HME_MACI_XIF, v);
990
991 /* Construct the frame command */
992 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
993 HME_MIF_FO_TAMSB |
994 (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
995 (phy << HME_MIF_FO_PHYAD_SHIFT) |
996 (reg << HME_MIF_FO_REGAD_SHIFT);
997
998 bus_space_write_4(t, mif, HME_MIFI_FO, v);
999 for (n = 0; n < 100; n++) {
1000 DELAY(1);
1001 v = bus_space_read_4(t, mif, HME_MIFI_FO);
1002 if (v & HME_MIF_FO_TALSB) {
1003 v &= HME_MIF_FO_DATA;
1004 goto out;
1005 }
1006 }
1007
1008 v = 0;
1009 printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
1010
1011 out:
1012 /* Restore MIFI_CFG register */
1013 bus_space_write_4(t, mif, HME_MIFI_CFG, mifi_cfg);
1014 /* Restore XIF register */
1015 bus_space_write_4(t, mac, HME_MACI_XIF, xif_cfg);
1016 return (v);
1017 }
1018
1019 static void
hme_mii_writereg(struct device * self,int phy,int reg,int val)1020 hme_mii_writereg(struct device *self, int phy, int reg, int val)
1021 {
1022 struct hme_softc *sc = (void *)self;
1023 bus_space_tag_t t = sc->sc_bustag;
1024 bus_space_handle_t mif = sc->sc_mif;
1025 bus_space_handle_t mac = sc->sc_mac;
1026 u_int32_t v, xif_cfg, mifi_cfg;
1027 int n;
1028
1029 /* We can at most have two PHYs */
1030 if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL)
1031 return;
1032
1033 /* Select the desired PHY in the MIF configuration register */
1034 v = mifi_cfg = bus_space_read_4(t, mif, HME_MIFI_CFG);
1035 v &= ~HME_MIF_CFG_PHY;
1036 if (phy == HME_PHYAD_EXTERNAL)
1037 v |= HME_MIF_CFG_PHY;
1038 bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1039
1040 /* Enable MII drivers on external transceiver */
1041 v = xif_cfg = bus_space_read_4(t, mac, HME_MACI_XIF);
1042 if (phy == HME_PHYAD_EXTERNAL)
1043 v |= HME_MAC_XIF_MIIENABLE;
1044 else
1045 v &= ~HME_MAC_XIF_MIIENABLE;
1046 bus_space_write_4(t, mac, HME_MACI_XIF, v);
1047
1048 /* Construct the frame command */
1049 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1050 HME_MIF_FO_TAMSB |
1051 (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) |
1052 (phy << HME_MIF_FO_PHYAD_SHIFT) |
1053 (reg << HME_MIF_FO_REGAD_SHIFT) |
1054 (val & HME_MIF_FO_DATA);
1055
1056 bus_space_write_4(t, mif, HME_MIFI_FO, v);
1057 for (n = 0; n < 100; n++) {
1058 DELAY(1);
1059 v = bus_space_read_4(t, mif, HME_MIFI_FO);
1060 if (v & HME_MIF_FO_TALSB)
1061 goto out;
1062 }
1063
1064 printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname);
1065 out:
1066 /* Restore MIFI_CFG register */
1067 bus_space_write_4(t, mif, HME_MIFI_CFG, mifi_cfg);
1068 /* Restore XIF register */
1069 bus_space_write_4(t, mac, HME_MACI_XIF, xif_cfg);
1070 }
1071
1072 static void
hme_mii_statchg(struct device * dev)1073 hme_mii_statchg(struct device *dev)
1074 {
1075 struct hme_softc *sc = (void *)dev;
1076 bus_space_tag_t t = sc->sc_bustag;
1077 bus_space_handle_t mac = sc->sc_mac;
1078 u_int32_t v;
1079
1080 #ifdef HMEDEBUG
1081 if (sc->sc_debug)
1082 printf("hme_mii_statchg: status change\n", phy);
1083 #endif
1084
1085 /* Set the MAC Full Duplex bit appropriately */
1086 /* Apparently the hme chip is SIMPLEX if working in full duplex mode,
1087 but not otherwise. */
1088 v = bus_space_read_4(t, mac, HME_MACI_TXCFG);
1089 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) {
1090 v |= HME_MAC_TXCFG_FULLDPLX;
1091 sc->sc_arpcom.ac_if.if_flags |= IFF_SIMPLEX;
1092 } else {
1093 v &= ~HME_MAC_TXCFG_FULLDPLX;
1094 sc->sc_arpcom.ac_if.if_flags &= ~IFF_SIMPLEX;
1095 }
1096 bus_space_write_4(t, mac, HME_MACI_TXCFG, v);
1097 }
1098
1099 int
hme_mediachange(struct ifnet * ifp)1100 hme_mediachange(struct ifnet *ifp)
1101 {
1102 struct hme_softc *sc = ifp->if_softc;
1103 bus_space_tag_t t = sc->sc_bustag;
1104 bus_space_handle_t mif = sc->sc_mif;
1105 bus_space_handle_t mac = sc->sc_mac;
1106 uint64_t instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media);
1107 int phy = sc->sc_phys[instance];
1108 u_int32_t v;
1109
1110 #ifdef HMEDEBUG
1111 if (sc->sc_debug)
1112 printf("hme_mediachange: phy = %d\n", phy);
1113 #endif
1114 if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
1115 return (EINVAL);
1116
1117 /* Select the current PHY in the MIF configuration register */
1118 v = bus_space_read_4(t, mif, HME_MIFI_CFG);
1119 v &= ~HME_MIF_CFG_PHY;
1120 if (phy == HME_PHYAD_EXTERNAL)
1121 v |= HME_MIF_CFG_PHY;
1122 bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1123
1124 /* If an external transceiver is selected, enable its MII drivers */
1125 v = bus_space_read_4(t, mac, HME_MACI_XIF);
1126 v &= ~HME_MAC_XIF_MIIENABLE;
1127 if (phy == HME_PHYAD_EXTERNAL)
1128 v |= HME_MAC_XIF_MIIENABLE;
1129 bus_space_write_4(t, mac, HME_MACI_XIF, v);
1130
1131 return (mii_mediachg(&sc->sc_mii));
1132 }
1133
1134 void
hme_mediastatus(struct ifnet * ifp,struct ifmediareq * ifmr)1135 hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1136 {
1137 struct hme_softc *sc = ifp->if_softc;
1138
1139 if ((ifp->if_flags & IFF_UP) == 0)
1140 return;
1141
1142 mii_pollstat(&sc->sc_mii);
1143 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1144 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1145 }
1146
1147 /*
1148 * Process an ioctl request.
1149 */
1150 int
hme_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)1151 hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1152 {
1153 struct hme_softc *sc = ifp->if_softc;
1154 struct ifreq *ifr = (struct ifreq *)data;
1155 int s, error = 0;
1156
1157 s = splnet();
1158
1159 switch (cmd) {
1160 case SIOCSIFADDR:
1161 ifp->if_flags |= IFF_UP;
1162 if (!(ifp->if_flags & IFF_RUNNING))
1163 hme_init(sc);
1164 break;
1165
1166 case SIOCSIFFLAGS:
1167 if (ifp->if_flags & IFF_UP) {
1168 if (ifp->if_flags & IFF_RUNNING)
1169 error = ENETRESET;
1170 else
1171 hme_init(sc);
1172 } else {
1173 if (ifp->if_flags & IFF_RUNNING)
1174 hme_stop(sc, 0);
1175 }
1176 #ifdef HMEDEBUG
1177 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
1178 #endif
1179 break;
1180
1181 case SIOCGIFMEDIA:
1182 case SIOCSIFMEDIA:
1183 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1184 break;
1185
1186 case SIOCGIFRXR:
1187 error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
1188 NULL, MCLBYTES, &sc->sc_rx_ring);
1189 break;
1190
1191 default:
1192 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1193 }
1194
1195 if (error == ENETRESET) {
1196 if (ifp->if_flags & IFF_RUNNING)
1197 hme_iff(sc);
1198 error = 0;
1199 }
1200
1201 splx(s);
1202 return (error);
1203 }
1204
1205 void
hme_iff(struct hme_softc * sc)1206 hme_iff(struct hme_softc *sc)
1207 {
1208 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1209 struct arpcom *ac = &sc->sc_arpcom;
1210 struct ether_multi *enm;
1211 struct ether_multistep step;
1212 bus_space_tag_t t = sc->sc_bustag;
1213 bus_space_handle_t mac = sc->sc_mac;
1214 u_int32_t hash[4];
1215 u_int32_t rxcfg, crc;
1216
1217 rxcfg = bus_space_read_4(t, mac, HME_MACI_RXCFG);
1218 rxcfg &= ~(HME_MAC_RXCFG_HENABLE | HME_MAC_RXCFG_PMISC);
1219 ifp->if_flags &= ~IFF_ALLMULTI;
1220 /* Clear hash table */
1221 hash[0] = hash[1] = hash[2] = hash[3] = 0;
1222
1223 if (ifp->if_flags & IFF_PROMISC) {
1224 ifp->if_flags |= IFF_ALLMULTI;
1225 rxcfg |= HME_MAC_RXCFG_PMISC;
1226 } else if (ac->ac_multirangecnt > 0) {
1227 ifp->if_flags |= IFF_ALLMULTI;
1228 rxcfg |= HME_MAC_RXCFG_HENABLE;
1229 hash[0] = hash[1] = hash[2] = hash[3] = 0xffff;
1230 } else {
1231 rxcfg |= HME_MAC_RXCFG_HENABLE;
1232
1233 ETHER_FIRST_MULTI(step, ac, enm);
1234 while (enm != NULL) {
1235 crc = ether_crc32_le(enm->enm_addrlo,
1236 ETHER_ADDR_LEN) >> 26;
1237
1238 /* Set the corresponding bit in the filter. */
1239 hash[crc >> 4] |= 1 << (crc & 0xf);
1240
1241 ETHER_NEXT_MULTI(step, enm);
1242 }
1243 }
1244
1245 /* Now load the hash table into the chip */
1246 bus_space_write_4(t, mac, HME_MACI_HASHTAB0, hash[0]);
1247 bus_space_write_4(t, mac, HME_MACI_HASHTAB1, hash[1]);
1248 bus_space_write_4(t, mac, HME_MACI_HASHTAB2, hash[2]);
1249 bus_space_write_4(t, mac, HME_MACI_HASHTAB3, hash[3]);
1250 bus_space_write_4(t, mac, HME_MACI_RXCFG, rxcfg);
1251 }
1252
1253 void
hme_fill_rx_ring(struct hme_softc * sc)1254 hme_fill_rx_ring(struct hme_softc *sc)
1255 {
1256 struct hme_sxd *sd;
1257 u_int slots;
1258
1259 for (slots = if_rxr_get(&sc->sc_rx_ring, HME_RX_RING_SIZE);
1260 slots > 0; slots--) {
1261 if (hme_newbuf(sc, &sc->sc_rxd[sc->sc_rx_prod]))
1262 break;
1263
1264 sd = &sc->sc_rxd[sc->sc_rx_prod];
1265 HME_XD_SETADDR(sc->sc_pci, sc->sc_rb.rb_rxd, sc->sc_rx_prod,
1266 sd->sd_map->dm_segs[0].ds_addr);
1267 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, sc->sc_rx_prod,
1268 HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_RX_PKTSIZE));
1269
1270 if (++sc->sc_rx_prod == HME_RX_RING_SIZE)
1271 sc->sc_rx_prod = 0;
1272 }
1273 if_rxr_put(&sc->sc_rx_ring, slots);
1274 }
1275
1276 int
hme_newbuf(struct hme_softc * sc,struct hme_sxd * d)1277 hme_newbuf(struct hme_softc *sc, struct hme_sxd *d)
1278 {
1279 struct mbuf *m;
1280 bus_dmamap_t map;
1281
1282 /*
1283 * All operations should be on local variables and/or rx spare map
1284 * until we're sure everything is a success.
1285 */
1286
1287 m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
1288 if (!m)
1289 return (ENOBUFS);
1290
1291 if (bus_dmamap_load(sc->sc_dmatag, sc->sc_rxmap_spare,
1292 mtod(m, caddr_t), MCLBYTES - HME_RX_OFFSET, NULL,
1293 BUS_DMA_NOWAIT) != 0) {
1294 m_freem(m);
1295 return (ENOBUFS);
1296 }
1297
1298 /*
1299 * At this point we have a new buffer loaded into the spare map.
1300 * Just need to clear out the old mbuf/map and put the new one
1301 * in place.
1302 */
1303
1304 map = d->sd_map;
1305 d->sd_map = sc->sc_rxmap_spare;
1306 sc->sc_rxmap_spare = map;
1307
1308 bus_dmamap_sync(sc->sc_dmatag, d->sd_map, 0, d->sd_map->dm_mapsize,
1309 BUS_DMASYNC_PREREAD);
1310
1311 m->m_data += HME_RX_OFFSET;
1312 d->sd_mbuf = m;
1313 return (0);
1314 }
1315