1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (C) 2001 Eduardo Horvath.
5 * Copyright (c) 2001-2003 Thomas Moestl
6 * Copyright (c) 2007 Marius Strobl <marius@FreeBSD.org>
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp
31 */
32
33 #include <sys/cdefs.h>
34 /*
35 * Driver for Apple GMAC, Sun ERI and Sun GEM Ethernet controllers
36 */
37
38 #if 0
39 #define GEM_DEBUG
40 #endif
41
42 #if 0 /* XXX: In case of emergency, re-enable this. */
43 #define GEM_RINT_TIMEOUT
44 #endif
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/bus.h>
49 #include <sys/callout.h>
50 #include <sys/endian.h>
51 #include <sys/mbuf.h>
52 #include <sys/malloc.h>
53 #include <sys/kernel.h>
54 #include <sys/lock.h>
55 #include <sys/module.h>
56 #include <sys/mutex.h>
57 #include <sys/socket.h>
58 #include <sys/sockio.h>
59 #include <sys/rman.h>
60
61 #include <net/bpf.h>
62 #include <net/ethernet.h>
63 #include <net/if.h>
64 #include <net/if_var.h>
65 #include <net/if_arp.h>
66 #include <net/if_dl.h>
67 #include <net/if_media.h>
68 #include <net/if_types.h>
69 #include <net/if_vlan_var.h>
70
71 #include <netinet/in.h>
72 #include <netinet/in_systm.h>
73 #include <netinet/ip.h>
74 #include <netinet/tcp.h>
75 #include <netinet/udp.h>
76
77 #include <machine/bus.h>
78
79 #include <dev/mii/mii.h>
80 #include <dev/mii/miivar.h>
81
82 #include <dev/gem/if_gemreg.h>
83 #include <dev/gem/if_gemvar.h>
84
85 CTASSERT(powerof2(GEM_NRXDESC) && GEM_NRXDESC >= 32 && GEM_NRXDESC <= 8192);
86 CTASSERT(powerof2(GEM_NTXDESC) && GEM_NTXDESC >= 32 && GEM_NTXDESC <= 8192);
87
88 #define GEM_TRIES 10000
89
90 /*
91 * The hardware supports basic TCP/UDP checksum offloading. However,
92 * the hardware doesn't compensate the checksum for UDP datagram which
93 * can yield to 0x0. As a safe guard, UDP checksum offload is disabled
94 * by default. It can be reactivated by setting special link option
95 * link0 with ifconfig(8).
96 */
97 #define GEM_CSUM_FEATURES (CSUM_TCP)
98
99 static int gem_add_rxbuf(struct gem_softc *sc, int idx);
100 static int gem_bitwait(struct gem_softc *sc, bus_addr_t r, uint32_t clr,
101 uint32_t set);
102 static void gem_cddma_callback(void *xsc, bus_dma_segment_t *segs,
103 int nsegs, int error);
104 static int gem_disable_rx(struct gem_softc *sc);
105 static int gem_disable_tx(struct gem_softc *sc);
106 static void gem_eint(struct gem_softc *sc, u_int status);
107 static void gem_init(void *xsc);
108 static void gem_init_locked(struct gem_softc *sc);
109 static void gem_init_regs(struct gem_softc *sc);
110 static int gem_ioctl(if_t ifp, u_long cmd, caddr_t data);
111 static int gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head);
112 static int gem_meminit(struct gem_softc *sc);
113 static void gem_mifinit(struct gem_softc *sc);
114 static void gem_reset(struct gem_softc *sc);
115 static int gem_reset_rx(struct gem_softc *sc);
116 static void gem_reset_rxdma(struct gem_softc *sc);
117 static int gem_reset_tx(struct gem_softc *sc);
118 static u_int gem_ringsize(u_int sz);
119 static void gem_rint(struct gem_softc *sc);
120 #ifdef GEM_RINT_TIMEOUT
121 static void gem_rint_timeout(void *arg);
122 #endif
123 static inline void gem_rxcksum(struct mbuf *m, uint64_t flags);
124 static void gem_rxdrain(struct gem_softc *sc);
125 static void gem_setladrf(struct gem_softc *sc);
126 static void gem_start(if_t ifp);
127 static void gem_start_locked(if_t ifp);
128 static void gem_stop(if_t ifp, int disable);
129 static void gem_tick(void *arg);
130 static void gem_tint(struct gem_softc *sc);
131 static inline void gem_txkick(struct gem_softc *sc);
132 static int gem_watchdog(struct gem_softc *sc);
133
134 DRIVER_MODULE(miibus, gem, miibus_driver, 0, 0);
135 MODULE_DEPEND(gem, miibus, 1, 1, 1);
136
137 #ifdef GEM_DEBUG
138 #include <sys/ktr.h>
139 #define KTR_GEM KTR_SPARE2
140 #endif
141
142 int
gem_attach(struct gem_softc * sc)143 gem_attach(struct gem_softc *sc)
144 {
145 struct gem_txsoft *txs;
146 if_t ifp;
147 int error, i, phy;
148 uint32_t v;
149
150 if (bootverbose)
151 device_printf(sc->sc_dev, "flags=0x%x\n", sc->sc_flags);
152
153 /* Set up ifnet structure. */
154 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
155 if (ifp == NULL)
156 return (ENOSPC);
157 sc->sc_csum_features = GEM_CSUM_FEATURES;
158 if_setsoftc(ifp, sc);
159 if_initname(ifp, device_get_name(sc->sc_dev),
160 device_get_unit(sc->sc_dev));
161 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
162 if_setstartfn(ifp, gem_start);
163 if_setioctlfn(ifp, gem_ioctl);
164 if_setinitfn(ifp, gem_init);
165 if_setsendqlen(ifp, GEM_TXQUEUELEN);
166 if_setsendqready(ifp);
167
168 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
169 #ifdef GEM_RINT_TIMEOUT
170 callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0);
171 #endif
172
173 /* Make sure the chip is stopped. */
174 gem_reset(sc);
175
176 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
177 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
178 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL,
179 NULL, &sc->sc_pdmatag);
180 if (error != 0)
181 goto fail_ifnet;
182
183 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
184 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
185 1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag);
186 if (error != 0)
187 goto fail_ptag;
188
189 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
190 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
191 MCLBYTES * GEM_NTXSEGS, GEM_NTXSEGS, MCLBYTES,
192 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag);
193 if (error != 0)
194 goto fail_rtag;
195
196 error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0,
197 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
198 sizeof(struct gem_control_data), 1,
199 sizeof(struct gem_control_data), 0,
200 NULL, NULL, &sc->sc_cdmatag);
201 if (error != 0)
202 goto fail_ttag;
203
204 /*
205 * Allocate the control data structures, create and load the
206 * DMA map for it.
207 */
208 if ((error = bus_dmamem_alloc(sc->sc_cdmatag,
209 (void **)&sc->sc_control_data,
210 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
211 &sc->sc_cddmamap)) != 0) {
212 device_printf(sc->sc_dev,
213 "unable to allocate control data, error = %d\n", error);
214 goto fail_ctag;
215 }
216
217 sc->sc_cddma = 0;
218 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap,
219 sc->sc_control_data, sizeof(struct gem_control_data),
220 gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) {
221 device_printf(sc->sc_dev,
222 "unable to load control data DMA map, error = %d\n",
223 error);
224 goto fail_cmem;
225 }
226
227 /*
228 * Initialize the transmit job descriptors.
229 */
230 STAILQ_INIT(&sc->sc_txfreeq);
231 STAILQ_INIT(&sc->sc_txdirtyq);
232
233 /*
234 * Create the transmit buffer DMA maps.
235 */
236 error = ENOMEM;
237 for (i = 0; i < GEM_TXQUEUELEN; i++) {
238 txs = &sc->sc_txsoft[i];
239 txs->txs_mbuf = NULL;
240 txs->txs_ndescs = 0;
241 if ((error = bus_dmamap_create(sc->sc_tdmatag, 0,
242 &txs->txs_dmamap)) != 0) {
243 device_printf(sc->sc_dev,
244 "unable to create TX DMA map %d, error = %d\n",
245 i, error);
246 goto fail_txd;
247 }
248 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
249 }
250
251 /*
252 * Create the receive buffer DMA maps.
253 */
254 for (i = 0; i < GEM_NRXDESC; i++) {
255 if ((error = bus_dmamap_create(sc->sc_rdmatag, 0,
256 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
257 device_printf(sc->sc_dev,
258 "unable to create RX DMA map %d, error = %d\n",
259 i, error);
260 goto fail_rxd;
261 }
262 sc->sc_rxsoft[i].rxs_mbuf = NULL;
263 }
264
265 /* Bypass probing PHYs if we already know for sure to use a SERDES. */
266 if ((sc->sc_flags & GEM_SERDES) != 0)
267 goto serdes;
268
269 GEM_WRITE_4(sc, GEM_MII_DATAPATH_MODE, GEM_MII_DATAPATH_MII);
270 GEM_BARRIER(sc, GEM_MII_DATAPATH_MODE, 4,
271 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
272
273 gem_mifinit(sc);
274
275 /*
276 * Look for an external PHY.
277 */
278 error = ENXIO;
279 v = GEM_READ_4(sc, GEM_MIF_CONFIG);
280 if ((v & GEM_MIF_CONFIG_MDI1) != 0) {
281 v |= GEM_MIF_CONFIG_PHY_SEL;
282 GEM_WRITE_4(sc, GEM_MIF_CONFIG, v);
283 GEM_BARRIER(sc, GEM_MIF_CONFIG, 4,
284 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
285 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp,
286 gem_mediachange, gem_mediastatus, BMSR_DEFCAPMASK,
287 MII_PHY_ANY, MII_OFFSET_ANY, MIIF_DOPAUSE);
288 }
289
290 /*
291 * Fall back on an internal PHY if no external PHY was found.
292 * Note that with Apple (K2) GMACs GEM_MIF_CONFIG_MDI0 can't be
293 * trusted when the firmware has powered down the chip.
294 */
295 if (error != 0 &&
296 ((v & GEM_MIF_CONFIG_MDI0) != 0 || GEM_IS_APPLE(sc))) {
297 v &= ~GEM_MIF_CONFIG_PHY_SEL;
298 GEM_WRITE_4(sc, GEM_MIF_CONFIG, v);
299 GEM_BARRIER(sc, GEM_MIF_CONFIG, 4,
300 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
301 switch (sc->sc_variant) {
302 case GEM_APPLE_K2_GMAC:
303 phy = GEM_PHYAD_INTERNAL;
304 break;
305 case GEM_APPLE_GMAC:
306 phy = GEM_PHYAD_EXTERNAL;
307 break;
308 default:
309 phy = MII_PHY_ANY;
310 break;
311 }
312 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp,
313 gem_mediachange, gem_mediastatus, BMSR_DEFCAPMASK, phy,
314 MII_OFFSET_ANY, MIIF_DOPAUSE);
315 }
316
317 /*
318 * Try the external PCS SERDES if we didn't find any PHYs.
319 */
320 if (error != 0 && sc->sc_variant == GEM_SUN_GEM) {
321 serdes:
322 GEM_WRITE_4(sc, GEM_MII_DATAPATH_MODE,
323 GEM_MII_DATAPATH_SERDES);
324 GEM_BARRIER(sc, GEM_MII_DATAPATH_MODE, 4,
325 BUS_SPACE_BARRIER_WRITE);
326 GEM_WRITE_4(sc, GEM_MII_SLINK_CONTROL,
327 GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D);
328 GEM_BARRIER(sc, GEM_MII_SLINK_CONTROL, 4,
329 BUS_SPACE_BARRIER_WRITE);
330 GEM_WRITE_4(sc, GEM_MII_CONFIG, GEM_MII_CONFIG_ENABLE);
331 GEM_BARRIER(sc, GEM_MII_CONFIG, 4,
332 BUS_SPACE_BARRIER_WRITE);
333 sc->sc_flags |= GEM_SERDES;
334 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp,
335 gem_mediachange, gem_mediastatus, BMSR_DEFCAPMASK,
336 GEM_PHYAD_EXTERNAL, MII_OFFSET_ANY, MIIF_DOPAUSE);
337 }
338 if (error != 0) {
339 device_printf(sc->sc_dev, "attaching PHYs failed\n");
340 goto fail_rxd;
341 }
342 sc->sc_mii = device_get_softc(sc->sc_miibus);
343
344 /*
345 * From this point forward, the attachment cannot fail. A failure
346 * before this point releases all resources that may have been
347 * allocated.
348 */
349
350 /* Get RX FIFO size. */
351 sc->sc_rxfifosize = 64 *
352 GEM_READ_4(sc, GEM_RX_FIFO_SIZE);
353
354 /* Get TX FIFO size. */
355 v = GEM_READ_4(sc, GEM_TX_FIFO_SIZE);
356 device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n",
357 sc->sc_rxfifosize / 1024, v / 16);
358
359 /* Attach the interface. */
360 ether_ifattach(ifp, sc->sc_enaddr);
361
362 /*
363 * Tell the upper layer(s) we support long frames/checksum offloads.
364 */
365 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
366 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM, 0);
367 if_sethwassistbits(ifp, sc->sc_csum_features, 0);
368 if_setcapenablebit(ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM, 0);
369
370 return (0);
371
372 /*
373 * Free any resources we've allocated during the failed attach
374 * attempt. Do this in reverse order and fall through.
375 */
376 fail_rxd:
377 for (i = 0; i < GEM_NRXDESC; i++)
378 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
379 bus_dmamap_destroy(sc->sc_rdmatag,
380 sc->sc_rxsoft[i].rxs_dmamap);
381 fail_txd:
382 for (i = 0; i < GEM_TXQUEUELEN; i++)
383 if (sc->sc_txsoft[i].txs_dmamap != NULL)
384 bus_dmamap_destroy(sc->sc_tdmatag,
385 sc->sc_txsoft[i].txs_dmamap);
386 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
387 fail_cmem:
388 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
389 sc->sc_cddmamap);
390 fail_ctag:
391 bus_dma_tag_destroy(sc->sc_cdmatag);
392 fail_ttag:
393 bus_dma_tag_destroy(sc->sc_tdmatag);
394 fail_rtag:
395 bus_dma_tag_destroy(sc->sc_rdmatag);
396 fail_ptag:
397 bus_dma_tag_destroy(sc->sc_pdmatag);
398 fail_ifnet:
399 if_free(ifp);
400 return (error);
401 }
402
403 void
gem_detach(struct gem_softc * sc)404 gem_detach(struct gem_softc *sc)
405 {
406 if_t ifp = sc->sc_ifp;
407 int i;
408
409 ether_ifdetach(ifp);
410 GEM_LOCK(sc);
411 gem_stop(ifp, 1);
412 GEM_UNLOCK(sc);
413 callout_drain(&sc->sc_tick_ch);
414 #ifdef GEM_RINT_TIMEOUT
415 callout_drain(&sc->sc_rx_ch);
416 #endif
417 if_free(ifp);
418 device_delete_child(sc->sc_dev, sc->sc_miibus);
419
420 for (i = 0; i < GEM_NRXDESC; i++)
421 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
422 bus_dmamap_destroy(sc->sc_rdmatag,
423 sc->sc_rxsoft[i].rxs_dmamap);
424 for (i = 0; i < GEM_TXQUEUELEN; i++)
425 if (sc->sc_txsoft[i].txs_dmamap != NULL)
426 bus_dmamap_destroy(sc->sc_tdmatag,
427 sc->sc_txsoft[i].txs_dmamap);
428 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
429 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
430 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
431 sc->sc_cddmamap);
432 bus_dma_tag_destroy(sc->sc_cdmatag);
433 bus_dma_tag_destroy(sc->sc_tdmatag);
434 bus_dma_tag_destroy(sc->sc_rdmatag);
435 bus_dma_tag_destroy(sc->sc_pdmatag);
436 }
437
438 void
gem_suspend(struct gem_softc * sc)439 gem_suspend(struct gem_softc *sc)
440 {
441 if_t ifp = sc->sc_ifp;
442
443 GEM_LOCK(sc);
444 gem_stop(ifp, 0);
445 GEM_UNLOCK(sc);
446 }
447
448 void
gem_resume(struct gem_softc * sc)449 gem_resume(struct gem_softc *sc)
450 {
451 if_t ifp = sc->sc_ifp;
452
453 GEM_LOCK(sc);
454 /*
455 * On resume all registers have to be initialized again like
456 * after power-on.
457 */
458 sc->sc_flags &= ~GEM_INITED;
459 if (if_getflags(ifp) & IFF_UP)
460 gem_init_locked(sc);
461 GEM_UNLOCK(sc);
462 }
463
464 static inline void
gem_rxcksum(struct mbuf * m,uint64_t flags)465 gem_rxcksum(struct mbuf *m, uint64_t flags)
466 {
467 struct ether_header *eh;
468 struct ip *ip;
469 struct udphdr *uh;
470 uint16_t *opts;
471 int32_t hlen, len, pktlen;
472 uint32_t temp32;
473 uint16_t cksum;
474
475 pktlen = m->m_pkthdr.len;
476 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
477 return;
478 eh = mtod(m, struct ether_header *);
479 if (eh->ether_type != htons(ETHERTYPE_IP))
480 return;
481 ip = (struct ip *)(eh + 1);
482 if (ip->ip_v != IPVERSION)
483 return;
484
485 hlen = ip->ip_hl << 2;
486 pktlen -= sizeof(struct ether_header);
487 if (hlen < sizeof(struct ip))
488 return;
489 if (ntohs(ip->ip_len) < hlen)
490 return;
491 if (ntohs(ip->ip_len) != pktlen)
492 return;
493 if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
494 return; /* Cannot handle fragmented packet. */
495
496 switch (ip->ip_p) {
497 case IPPROTO_TCP:
498 if (pktlen < (hlen + sizeof(struct tcphdr)))
499 return;
500 break;
501 case IPPROTO_UDP:
502 if (pktlen < (hlen + sizeof(struct udphdr)))
503 return;
504 uh = (struct udphdr *)((uint8_t *)ip + hlen);
505 if (uh->uh_sum == 0)
506 return; /* no checksum */
507 break;
508 default:
509 return;
510 }
511
512 cksum = ~(flags & GEM_RD_CHECKSUM);
513 /* checksum fixup for IP options */
514 len = hlen - sizeof(struct ip);
515 if (len > 0) {
516 opts = (uint16_t *)(ip + 1);
517 for (; len > 0; len -= sizeof(uint16_t), opts++) {
518 temp32 = cksum - *opts;
519 temp32 = (temp32 >> 16) + (temp32 & 65535);
520 cksum = temp32 & 65535;
521 }
522 }
523 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
524 m->m_pkthdr.csum_data = cksum;
525 }
526
527 static void
gem_cddma_callback(void * xsc,bus_dma_segment_t * segs,int nsegs,int error)528 gem_cddma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
529 {
530 struct gem_softc *sc = xsc;
531
532 if (error != 0)
533 return;
534 if (nsegs != 1)
535 panic("%s: bad control buffer segment count", __func__);
536 sc->sc_cddma = segs[0].ds_addr;
537 }
538
539 static void
gem_tick(void * arg)540 gem_tick(void *arg)
541 {
542 struct gem_softc *sc = arg;
543 if_t ifp = sc->sc_ifp;
544 uint32_t v;
545
546 GEM_LOCK_ASSERT(sc, MA_OWNED);
547
548 /*
549 * Unload collision and error counters.
550 */
551 if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
552 GEM_READ_4(sc, GEM_MAC_NORM_COLL_CNT) +
553 GEM_READ_4(sc, GEM_MAC_FIRST_COLL_CNT));
554 v = GEM_READ_4(sc, GEM_MAC_EXCESS_COLL_CNT) +
555 GEM_READ_4(sc, GEM_MAC_LATE_COLL_CNT);
556 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, v);
557 if_inc_counter(ifp, IFCOUNTER_OERRORS, v);
558 if_inc_counter(ifp, IFCOUNTER_IERRORS,
559 GEM_READ_4(sc, GEM_MAC_RX_LEN_ERR_CNT) +
560 GEM_READ_4(sc, GEM_MAC_RX_ALIGN_ERR) +
561 GEM_READ_4(sc, GEM_MAC_RX_CRC_ERR_CNT) +
562 GEM_READ_4(sc, GEM_MAC_RX_CODE_VIOL));
563
564 /*
565 * Then clear the hardware counters.
566 */
567 GEM_WRITE_4(sc, GEM_MAC_NORM_COLL_CNT, 0);
568 GEM_WRITE_4(sc, GEM_MAC_FIRST_COLL_CNT, 0);
569 GEM_WRITE_4(sc, GEM_MAC_EXCESS_COLL_CNT, 0);
570 GEM_WRITE_4(sc, GEM_MAC_LATE_COLL_CNT, 0);
571 GEM_WRITE_4(sc, GEM_MAC_RX_LEN_ERR_CNT, 0);
572 GEM_WRITE_4(sc, GEM_MAC_RX_ALIGN_ERR, 0);
573 GEM_WRITE_4(sc, GEM_MAC_RX_CRC_ERR_CNT, 0);
574 GEM_WRITE_4(sc, GEM_MAC_RX_CODE_VIOL, 0);
575
576 mii_tick(sc->sc_mii);
577
578 if (gem_watchdog(sc) == EJUSTRETURN)
579 return;
580
581 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
582 }
583
584 static int
gem_bitwait(struct gem_softc * sc,bus_addr_t r,uint32_t clr,uint32_t set)585 gem_bitwait(struct gem_softc *sc, bus_addr_t r, uint32_t clr, uint32_t set)
586 {
587 int i;
588 uint32_t reg;
589
590 for (i = GEM_TRIES; i--; DELAY(100)) {
591 reg = GEM_READ_4(sc, r);
592 if ((reg & clr) == 0 && (reg & set) == set)
593 return (1);
594 }
595 return (0);
596 }
597
598 static void
gem_reset(struct gem_softc * sc)599 gem_reset(struct gem_softc *sc)
600 {
601
602 #ifdef GEM_DEBUG
603 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
604 #endif
605 gem_reset_rx(sc);
606 gem_reset_tx(sc);
607
608 /* Do a full reset. */
609 GEM_WRITE_4(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX);
610 GEM_BARRIER(sc, GEM_RESET, 4,
611 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
612 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0))
613 device_printf(sc->sc_dev, "cannot reset device\n");
614 }
615
616 static void
gem_rxdrain(struct gem_softc * sc)617 gem_rxdrain(struct gem_softc *sc)
618 {
619 struct gem_rxsoft *rxs;
620 int i;
621
622 for (i = 0; i < GEM_NRXDESC; i++) {
623 rxs = &sc->sc_rxsoft[i];
624 if (rxs->rxs_mbuf != NULL) {
625 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
626 BUS_DMASYNC_POSTREAD);
627 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap);
628 m_freem(rxs->rxs_mbuf);
629 rxs->rxs_mbuf = NULL;
630 }
631 }
632 }
633
634 static void
gem_stop(if_t ifp,int disable)635 gem_stop(if_t ifp, int disable)
636 {
637 struct gem_softc *sc = if_getsoftc(ifp);
638 struct gem_txsoft *txs;
639
640 #ifdef GEM_DEBUG
641 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
642 #endif
643
644 callout_stop(&sc->sc_tick_ch);
645 #ifdef GEM_RINT_TIMEOUT
646 callout_stop(&sc->sc_rx_ch);
647 #endif
648
649 gem_reset_tx(sc);
650 gem_reset_rx(sc);
651
652 /*
653 * Release any queued transmit buffers.
654 */
655 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
656 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
657 if (txs->txs_ndescs != 0) {
658 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
659 BUS_DMASYNC_POSTWRITE);
660 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
661 if (txs->txs_mbuf != NULL) {
662 m_freem(txs->txs_mbuf);
663 txs->txs_mbuf = NULL;
664 }
665 }
666 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
667 }
668
669 if (disable)
670 gem_rxdrain(sc);
671
672 /*
673 * Mark the interface down and cancel the watchdog timer.
674 */
675 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
676 sc->sc_flags &= ~GEM_LINK;
677 sc->sc_wdog_timer = 0;
678 }
679
680 static int
gem_reset_rx(struct gem_softc * sc)681 gem_reset_rx(struct gem_softc *sc)
682 {
683
684 /*
685 * Resetting while DMA is in progress can cause a bus hang, so we
686 * disable DMA first.
687 */
688 (void)gem_disable_rx(sc);
689 GEM_WRITE_4(sc, GEM_RX_CONFIG, 0);
690 GEM_BARRIER(sc, GEM_RX_CONFIG, 4,
691 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
692 if (!gem_bitwait(sc, GEM_RX_CONFIG, GEM_RX_CONFIG_RXDMA_EN, 0))
693 device_printf(sc->sc_dev, "cannot disable RX DMA\n");
694
695 /* Wait 5ms extra. */
696 DELAY(5000);
697
698 /* Reset the ERX. */
699 GEM_WRITE_4(sc, GEM_RESET, GEM_RESET_RX);
700 GEM_BARRIER(sc, GEM_RESET, 4,
701 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
702 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX, 0)) {
703 device_printf(sc->sc_dev, "cannot reset receiver\n");
704 return (1);
705 }
706
707 /* Finally, reset RX MAC. */
708 GEM_WRITE_4(sc, GEM_MAC_RXRESET, 1);
709 GEM_BARRIER(sc, GEM_MAC_RXRESET, 4,
710 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
711 if (!gem_bitwait(sc, GEM_MAC_RXRESET, 1, 0)) {
712 device_printf(sc->sc_dev, "cannot reset RX MAC\n");
713 return (1);
714 }
715
716 return (0);
717 }
718
719 /*
720 * Reset the receiver DMA engine.
721 *
722 * Intended to be used in case of GEM_INTR_RX_TAG_ERR, GEM_MAC_RX_OVERFLOW
723 * etc in order to reset the receiver DMA engine only and not do a full
724 * reset which amongst others also downs the link and clears the FIFOs.
725 */
726 static void
gem_reset_rxdma(struct gem_softc * sc)727 gem_reset_rxdma(struct gem_softc *sc)
728 {
729 int i;
730
731 if (gem_reset_rx(sc) != 0) {
732 if_setdrvflagbits(sc->sc_ifp, 0, IFF_DRV_RUNNING);
733 return (gem_init_locked(sc));
734 }
735 for (i = 0; i < GEM_NRXDESC; i++)
736 if (sc->sc_rxsoft[i].rxs_mbuf != NULL)
737 GEM_UPDATE_RXDESC(sc, i);
738 sc->sc_rxptr = 0;
739 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
740
741 /* NOTE: we use only 32-bit DMA addresses here. */
742 GEM_WRITE_4(sc, GEM_RX_RING_PTR_HI, 0);
743 GEM_WRITE_4(sc, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
744 GEM_WRITE_4(sc, GEM_RX_KICK, GEM_NRXDESC - 4);
745 GEM_WRITE_4(sc, GEM_RX_CONFIG,
746 gem_ringsize(GEM_NRXDESC /* XXX */) |
747 ((ETHER_HDR_LEN + sizeof(struct ip)) <<
748 GEM_RX_CONFIG_CXM_START_SHFT) |
749 (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) |
750 (ETHER_ALIGN << GEM_RX_CONFIG_FBOFF_SHFT));
751 GEM_WRITE_4(sc, GEM_RX_BLANKING,
752 ((6 * (sc->sc_flags & GEM_PCI66) != 0 ? 2 : 1) <<
753 GEM_RX_BLANKING_TIME_SHIFT) | 6);
754 GEM_WRITE_4(sc, GEM_RX_PAUSE_THRESH,
755 (3 * sc->sc_rxfifosize / 256) |
756 ((sc->sc_rxfifosize / 256) << 12));
757 GEM_WRITE_4(sc, GEM_RX_CONFIG,
758 GEM_READ_4(sc, GEM_RX_CONFIG) | GEM_RX_CONFIG_RXDMA_EN);
759 GEM_WRITE_4(sc, GEM_MAC_RX_MASK,
760 GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT);
761 /*
762 * Clear the RX filter and reprogram it. This will also set the
763 * current RX MAC configuration and enable it.
764 */
765 gem_setladrf(sc);
766 }
767
768 static int
gem_reset_tx(struct gem_softc * sc)769 gem_reset_tx(struct gem_softc *sc)
770 {
771
772 /*
773 * Resetting while DMA is in progress can cause a bus hang, so we
774 * disable DMA first.
775 */
776 (void)gem_disable_tx(sc);
777 GEM_WRITE_4(sc, GEM_TX_CONFIG, 0);
778 GEM_BARRIER(sc, GEM_TX_CONFIG, 4,
779 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
780 if (!gem_bitwait(sc, GEM_TX_CONFIG, GEM_TX_CONFIG_TXDMA_EN, 0))
781 device_printf(sc->sc_dev, "cannot disable TX DMA\n");
782
783 /* Wait 5ms extra. */
784 DELAY(5000);
785
786 /* Finally, reset the ETX. */
787 GEM_WRITE_4(sc, GEM_RESET, GEM_RESET_TX);
788 GEM_BARRIER(sc, GEM_RESET, 4,
789 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
790 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) {
791 device_printf(sc->sc_dev, "cannot reset transmitter\n");
792 return (1);
793 }
794 return (0);
795 }
796
797 static int
gem_disable_rx(struct gem_softc * sc)798 gem_disable_rx(struct gem_softc *sc)
799 {
800
801 GEM_WRITE_4(sc, GEM_MAC_RX_CONFIG,
802 GEM_READ_4(sc, GEM_MAC_RX_CONFIG) & ~GEM_MAC_RX_ENABLE);
803 GEM_BARRIER(sc, GEM_MAC_RX_CONFIG, 4,
804 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
805 if (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0))
806 return (1);
807 device_printf(sc->sc_dev, "cannot disable RX MAC\n");
808 return (0);
809 }
810
811 static int
gem_disable_tx(struct gem_softc * sc)812 gem_disable_tx(struct gem_softc *sc)
813 {
814
815 GEM_WRITE_4(sc, GEM_MAC_TX_CONFIG,
816 GEM_READ_4(sc, GEM_MAC_TX_CONFIG) & ~GEM_MAC_TX_ENABLE);
817 GEM_BARRIER(sc, GEM_MAC_TX_CONFIG, 4,
818 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
819 if (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0))
820 return (1);
821 device_printf(sc->sc_dev, "cannot disable TX MAC\n");
822 return (0);
823 }
824
825 static int
gem_meminit(struct gem_softc * sc)826 gem_meminit(struct gem_softc *sc)
827 {
828 struct gem_rxsoft *rxs;
829 int error, i;
830
831 GEM_LOCK_ASSERT(sc, MA_OWNED);
832
833 /*
834 * Initialize the transmit descriptor ring.
835 */
836 for (i = 0; i < GEM_NTXDESC; i++) {
837 sc->sc_txdescs[i].gd_flags = 0;
838 sc->sc_txdescs[i].gd_addr = 0;
839 }
840 sc->sc_txfree = GEM_MAXTXFREE;
841 sc->sc_txnext = 0;
842 sc->sc_txwin = 0;
843
844 /*
845 * Initialize the receive descriptor and receive job
846 * descriptor rings.
847 */
848 for (i = 0; i < GEM_NRXDESC; i++) {
849 rxs = &sc->sc_rxsoft[i];
850 if (rxs->rxs_mbuf == NULL) {
851 if ((error = gem_add_rxbuf(sc, i)) != 0) {
852 device_printf(sc->sc_dev,
853 "unable to allocate or map RX buffer %d, "
854 "error = %d\n", i, error);
855 /*
856 * XXX we should attempt to run with fewer
857 * receive buffers instead of just failing.
858 */
859 gem_rxdrain(sc);
860 return (1);
861 }
862 } else
863 GEM_INIT_RXDESC(sc, i);
864 }
865 sc->sc_rxptr = 0;
866
867 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
868
869 return (0);
870 }
871
872 static u_int
gem_ringsize(u_int sz)873 gem_ringsize(u_int sz)
874 {
875
876 switch (sz) {
877 case 32:
878 return (GEM_RING_SZ_32);
879 case 64:
880 return (GEM_RING_SZ_64);
881 case 128:
882 return (GEM_RING_SZ_128);
883 case 256:
884 return (GEM_RING_SZ_256);
885 case 512:
886 return (GEM_RING_SZ_512);
887 case 1024:
888 return (GEM_RING_SZ_1024);
889 case 2048:
890 return (GEM_RING_SZ_2048);
891 case 4096:
892 return (GEM_RING_SZ_4096);
893 case 8192:
894 return (GEM_RING_SZ_8192);
895 default:
896 printf("%s: invalid ring size %d\n", __func__, sz);
897 return (GEM_RING_SZ_32);
898 }
899 }
900
901 static void
gem_init(void * xsc)902 gem_init(void *xsc)
903 {
904 struct gem_softc *sc = xsc;
905
906 GEM_LOCK(sc);
907 gem_init_locked(sc);
908 GEM_UNLOCK(sc);
909 }
910
911 /*
912 * Initialization of interface; set up initialization block
913 * and transmit/receive descriptor rings.
914 */
915 static void
gem_init_locked(struct gem_softc * sc)916 gem_init_locked(struct gem_softc *sc)
917 {
918 if_t ifp = sc->sc_ifp;
919 uint32_t v;
920
921 GEM_LOCK_ASSERT(sc, MA_OWNED);
922
923 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
924 return;
925
926 #ifdef GEM_DEBUG
927 CTR2(KTR_GEM, "%s: %s: calling stop", device_get_name(sc->sc_dev),
928 __func__);
929 #endif
930 /*
931 * Initialization sequence. The numbered steps below correspond
932 * to the sequence outlined in section 6.3.5.1 in the Ethernet
933 * Channel Engine manual (part of the PCIO manual).
934 * See also the STP2002-STQ document from Sun Microsystems.
935 */
936
937 /* step 1 & 2. Reset the Ethernet Channel. */
938 gem_stop(ifp, 0);
939 gem_reset(sc);
940 #ifdef GEM_DEBUG
941 CTR2(KTR_GEM, "%s: %s: restarting", device_get_name(sc->sc_dev),
942 __func__);
943 #endif
944
945 if ((sc->sc_flags & GEM_SERDES) == 0)
946 /* Re-initialize the MIF. */
947 gem_mifinit(sc);
948
949 /* step 3. Setup data structures in host memory. */
950 if (gem_meminit(sc) != 0)
951 return;
952
953 /* step 4. TX MAC registers & counters */
954 gem_init_regs(sc);
955
956 /* step 5. RX MAC registers & counters */
957
958 /* step 6 & 7. Program Descriptor Ring Base Addresses. */
959 /* NOTE: we use only 32-bit DMA addresses here. */
960 GEM_WRITE_4(sc, GEM_TX_RING_PTR_HI, 0);
961 GEM_WRITE_4(sc, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0));
962
963 GEM_WRITE_4(sc, GEM_RX_RING_PTR_HI, 0);
964 GEM_WRITE_4(sc, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
965 #ifdef GEM_DEBUG
966 CTR3(KTR_GEM, "loading RX ring %lx, TX ring %lx, cddma %lx",
967 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma);
968 #endif
969
970 /* step 8. Global Configuration & Interrupt Mask */
971
972 /*
973 * Set the internal arbitration to "infinite" bursts of the
974 * maximum length of 31 * 64 bytes so DMA transfers aren't
975 * split up in cache line size chunks. This greatly improves
976 * RX performance.
977 * Enable silicon bug workarounds for the Apple variants.
978 */
979 GEM_WRITE_4(sc, GEM_CONFIG,
980 GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT |
981 GEM_CONFIG_BURST_INF | (GEM_IS_APPLE(sc) ?
982 GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX : 0));
983
984 GEM_WRITE_4(sc, GEM_INTMASK,
985 ~(GEM_INTR_TX_INTME | GEM_INTR_TX_EMPTY | GEM_INTR_RX_DONE |
986 GEM_INTR_RX_NOBUF | GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR |
987 GEM_INTR_BERR
988 #ifdef GEM_DEBUG
989 | GEM_INTR_PCS | GEM_INTR_MIF
990 #endif
991 ));
992 GEM_WRITE_4(sc, GEM_MAC_RX_MASK,
993 GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT);
994 GEM_WRITE_4(sc, GEM_MAC_TX_MASK,
995 GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP |
996 GEM_MAC_TX_PEAK_EXP);
997 #ifdef GEM_DEBUG
998 GEM_WRITE_4(sc, GEM_MAC_CONTROL_MASK,
999 ~(GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME));
1000 #else
1001 GEM_WRITE_4(sc, GEM_MAC_CONTROL_MASK,
1002 GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME);
1003 #endif
1004
1005 /* step 9. ETX Configuration: use mostly default values. */
1006
1007 /* Enable DMA. */
1008 v = gem_ringsize(GEM_NTXDESC);
1009 /* Set TX FIFO threshold and enable DMA. */
1010 v |= (0x4ff << 10) & GEM_TX_CONFIG_TXFIFO_TH;
1011 GEM_WRITE_4(sc, GEM_TX_CONFIG, v | GEM_TX_CONFIG_TXDMA_EN);
1012
1013 /* step 10. ERX Configuration */
1014
1015 /* Encode Receive Descriptor ring size. */
1016 v = gem_ringsize(GEM_NRXDESC /* XXX */);
1017 /* RX TCP/UDP checksum offset */
1018 v |= ((ETHER_HDR_LEN + sizeof(struct ip)) <<
1019 GEM_RX_CONFIG_CXM_START_SHFT);
1020 /* Set RX FIFO threshold, set first byte offset and enable DMA. */
1021 GEM_WRITE_4(sc, GEM_RX_CONFIG,
1022 v | (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) |
1023 (ETHER_ALIGN << GEM_RX_CONFIG_FBOFF_SHFT) |
1024 GEM_RX_CONFIG_RXDMA_EN);
1025
1026 GEM_WRITE_4(sc, GEM_RX_BLANKING,
1027 ((6 * (sc->sc_flags & GEM_PCI66) != 0 ? 2 : 1) <<
1028 GEM_RX_BLANKING_TIME_SHIFT) | 6);
1029
1030 /*
1031 * The following value is for an OFF Threshold of about 3/4 full
1032 * and an ON Threshold of 1/4 full.
1033 */
1034 GEM_WRITE_4(sc, GEM_RX_PAUSE_THRESH,
1035 (3 * sc->sc_rxfifosize / 256) |
1036 ((sc->sc_rxfifosize / 256) << 12));
1037
1038 /* step 11. Configure Media. */
1039
1040 /* step 12. RX_MAC Configuration Register */
1041 v = GEM_READ_4(sc, GEM_MAC_RX_CONFIG);
1042 v &= ~GEM_MAC_RX_ENABLE;
1043 v |= GEM_MAC_RX_STRIP_CRC;
1044 sc->sc_mac_rxcfg = v;
1045 /*
1046 * Clear the RX filter and reprogram it. This will also set the
1047 * current RX MAC configuration and enable it.
1048 */
1049 gem_setladrf(sc);
1050
1051 /* step 13. TX_MAC Configuration Register */
1052 v = GEM_READ_4(sc, GEM_MAC_TX_CONFIG);
1053 v |= GEM_MAC_TX_ENABLE;
1054 (void)gem_disable_tx(sc);
1055 GEM_WRITE_4(sc, GEM_MAC_TX_CONFIG, v);
1056
1057 /* step 14. Issue Transmit Pending command. */
1058
1059 /* step 15. Give the receiver a swift kick. */
1060 GEM_WRITE_4(sc, GEM_RX_KICK, GEM_NRXDESC - 4);
1061
1062 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
1063 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1064
1065 mii_mediachg(sc->sc_mii);
1066
1067 /* Start the one second timer. */
1068 sc->sc_wdog_timer = 0;
1069 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
1070 }
1071
1072 static int
gem_load_txmbuf(struct gem_softc * sc,struct mbuf ** m_head)1073 gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head)
1074 {
1075 bus_dma_segment_t txsegs[GEM_NTXSEGS];
1076 struct gem_txsoft *txs;
1077 struct ip *ip;
1078 struct mbuf *m;
1079 uint64_t cflags, flags;
1080 int error, nexttx, nsegs, offset, seg;
1081
1082 GEM_LOCK_ASSERT(sc, MA_OWNED);
1083
1084 /* Get a work queue entry. */
1085 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) {
1086 /* Ran out of descriptors. */
1087 return (ENOBUFS);
1088 }
1089
1090 cflags = 0;
1091 if (((*m_head)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) {
1092 if (M_WRITABLE(*m_head) == 0) {
1093 m = m_dup(*m_head, M_NOWAIT);
1094 m_freem(*m_head);
1095 *m_head = m;
1096 if (m == NULL)
1097 return (ENOBUFS);
1098 }
1099 offset = sizeof(struct ether_header);
1100 m = m_pullup(*m_head, offset + sizeof(struct ip));
1101 if (m == NULL) {
1102 *m_head = NULL;
1103 return (ENOBUFS);
1104 }
1105 ip = (struct ip *)(mtod(m, caddr_t) + offset);
1106 offset += (ip->ip_hl << 2);
1107 cflags = offset << GEM_TD_CXSUM_STARTSHFT |
1108 ((offset + m->m_pkthdr.csum_data) <<
1109 GEM_TD_CXSUM_STUFFSHFT) | GEM_TD_CXSUM_ENABLE;
1110 *m_head = m;
1111 }
1112
1113 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap,
1114 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
1115 if (error == EFBIG) {
1116 m = m_collapse(*m_head, M_NOWAIT, GEM_NTXSEGS);
1117 if (m == NULL) {
1118 m_freem(*m_head);
1119 *m_head = NULL;
1120 return (ENOBUFS);
1121 }
1122 *m_head = m;
1123 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag,
1124 txs->txs_dmamap, *m_head, txsegs, &nsegs,
1125 BUS_DMA_NOWAIT);
1126 if (error != 0) {
1127 m_freem(*m_head);
1128 *m_head = NULL;
1129 return (error);
1130 }
1131 } else if (error != 0)
1132 return (error);
1133 /* If nsegs is wrong then the stack is corrupt. */
1134 KASSERT(nsegs <= GEM_NTXSEGS,
1135 ("%s: too many DMA segments (%d)", __func__, nsegs));
1136 if (nsegs == 0) {
1137 m_freem(*m_head);
1138 *m_head = NULL;
1139 return (EIO);
1140 }
1141
1142 /*
1143 * Ensure we have enough descriptors free to describe
1144 * the packet. Note, we always reserve one descriptor
1145 * at the end of the ring as a termination point, in
1146 * order to prevent wrap-around.
1147 */
1148 if (nsegs > sc->sc_txfree - 1) {
1149 txs->txs_ndescs = 0;
1150 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
1151 return (ENOBUFS);
1152 }
1153
1154 txs->txs_ndescs = nsegs;
1155 txs->txs_firstdesc = sc->sc_txnext;
1156 nexttx = txs->txs_firstdesc;
1157 for (seg = 0; seg < nsegs; seg++, nexttx = GEM_NEXTTX(nexttx)) {
1158 #ifdef GEM_DEBUG
1159 CTR6(KTR_GEM,
1160 "%s: mapping seg %d (txd %d), len %lx, addr %#lx (%#lx)",
1161 __func__, seg, nexttx, txsegs[seg].ds_len,
1162 txsegs[seg].ds_addr, htole64(txsegs[seg].ds_addr));
1163 #endif
1164 sc->sc_txdescs[nexttx].gd_addr = htole64(txsegs[seg].ds_addr);
1165 KASSERT(txsegs[seg].ds_len < GEM_TD_BUFSIZE,
1166 ("%s: segment size too large!", __func__));
1167 flags = txsegs[seg].ds_len & GEM_TD_BUFSIZE;
1168 sc->sc_txdescs[nexttx].gd_flags = htole64(flags | cflags);
1169 txs->txs_lastdesc = nexttx;
1170 }
1171
1172 /* Set EOP on the last descriptor. */
1173 #ifdef GEM_DEBUG
1174 CTR3(KTR_GEM, "%s: end of packet at segment %d, TX %d",
1175 __func__, seg, nexttx);
1176 #endif
1177 sc->sc_txdescs[txs->txs_lastdesc].gd_flags |=
1178 htole64(GEM_TD_END_OF_PACKET);
1179
1180 /* Lastly set SOP on the first descriptor. */
1181 #ifdef GEM_DEBUG
1182 CTR3(KTR_GEM, "%s: start of packet at segment %d, TX %d",
1183 __func__, seg, nexttx);
1184 #endif
1185 if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) {
1186 sc->sc_txwin = 0;
1187 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |=
1188 htole64(GEM_TD_INTERRUPT_ME | GEM_TD_START_OF_PACKET);
1189 } else
1190 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |=
1191 htole64(GEM_TD_START_OF_PACKET);
1192
1193 /* Sync the DMA map. */
1194 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
1195 BUS_DMASYNC_PREWRITE);
1196
1197 #ifdef GEM_DEBUG
1198 CTR4(KTR_GEM, "%s: setting firstdesc=%d, lastdesc=%d, ndescs=%d",
1199 __func__, txs->txs_firstdesc, txs->txs_lastdesc,
1200 txs->txs_ndescs);
1201 #endif
1202 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
1203 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
1204 txs->txs_mbuf = *m_head;
1205
1206 sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc);
1207 sc->sc_txfree -= txs->txs_ndescs;
1208
1209 return (0);
1210 }
1211
1212 static void
gem_init_regs(struct gem_softc * sc)1213 gem_init_regs(struct gem_softc *sc)
1214 {
1215 const u_char *laddr = if_getlladdr(sc->sc_ifp);
1216
1217 GEM_LOCK_ASSERT(sc, MA_OWNED);
1218
1219 /* These registers are not cleared on reset. */
1220 if ((sc->sc_flags & GEM_INITED) == 0) {
1221 /* magic values */
1222 GEM_WRITE_4(sc, GEM_MAC_IPG0, 0);
1223 GEM_WRITE_4(sc, GEM_MAC_IPG1, 8);
1224 GEM_WRITE_4(sc, GEM_MAC_IPG2, 4);
1225
1226 /* min frame length */
1227 GEM_WRITE_4(sc, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN);
1228 /* max frame length and max burst size */
1229 GEM_WRITE_4(sc, GEM_MAC_MAC_MAX_FRAME,
1230 (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) | (0x2000 << 16));
1231
1232 /* more magic values */
1233 GEM_WRITE_4(sc, GEM_MAC_PREAMBLE_LEN, 0x7);
1234 GEM_WRITE_4(sc, GEM_MAC_JAM_SIZE, 0x4);
1235 GEM_WRITE_4(sc, GEM_MAC_ATTEMPT_LIMIT, 0x10);
1236 GEM_WRITE_4(sc, GEM_MAC_CONTROL_TYPE, 0x8808);
1237
1238 /* random number seed */
1239 GEM_WRITE_4(sc, GEM_MAC_RANDOM_SEED,
1240 ((laddr[5] << 8) | laddr[4]) & 0x3ff);
1241
1242 /* secondary MAC address: 0:0:0:0:0:0 */
1243 GEM_WRITE_4(sc, GEM_MAC_ADDR3, 0);
1244 GEM_WRITE_4(sc, GEM_MAC_ADDR4, 0);
1245 GEM_WRITE_4(sc, GEM_MAC_ADDR5, 0);
1246
1247 /* MAC control address: 01:80:c2:00:00:01 */
1248 GEM_WRITE_4(sc, GEM_MAC_ADDR6, 0x0001);
1249 GEM_WRITE_4(sc, GEM_MAC_ADDR7, 0xc200);
1250 GEM_WRITE_4(sc, GEM_MAC_ADDR8, 0x0180);
1251
1252 /* MAC filter address: 0:0:0:0:0:0 */
1253 GEM_WRITE_4(sc, GEM_MAC_ADDR_FILTER0, 0);
1254 GEM_WRITE_4(sc, GEM_MAC_ADDR_FILTER1, 0);
1255 GEM_WRITE_4(sc, GEM_MAC_ADDR_FILTER2, 0);
1256 GEM_WRITE_4(sc, GEM_MAC_ADR_FLT_MASK1_2, 0);
1257 GEM_WRITE_4(sc, GEM_MAC_ADR_FLT_MASK0, 0);
1258
1259 sc->sc_flags |= GEM_INITED;
1260 }
1261
1262 /* Counters need to be zeroed. */
1263 GEM_WRITE_4(sc, GEM_MAC_NORM_COLL_CNT, 0);
1264 GEM_WRITE_4(sc, GEM_MAC_FIRST_COLL_CNT, 0);
1265 GEM_WRITE_4(sc, GEM_MAC_EXCESS_COLL_CNT, 0);
1266 GEM_WRITE_4(sc, GEM_MAC_LATE_COLL_CNT, 0);
1267 GEM_WRITE_4(sc, GEM_MAC_DEFER_TMR_CNT, 0);
1268 GEM_WRITE_4(sc, GEM_MAC_PEAK_ATTEMPTS, 0);
1269 GEM_WRITE_4(sc, GEM_MAC_RX_FRAME_COUNT, 0);
1270 GEM_WRITE_4(sc, GEM_MAC_RX_LEN_ERR_CNT, 0);
1271 GEM_WRITE_4(sc, GEM_MAC_RX_ALIGN_ERR, 0);
1272 GEM_WRITE_4(sc, GEM_MAC_RX_CRC_ERR_CNT, 0);
1273 GEM_WRITE_4(sc, GEM_MAC_RX_CODE_VIOL, 0);
1274
1275 /* Set XOFF PAUSE time. */
1276 GEM_WRITE_4(sc, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0);
1277
1278 /* Set the station address. */
1279 GEM_WRITE_4(sc, GEM_MAC_ADDR0, (laddr[4] << 8) | laddr[5]);
1280 GEM_WRITE_4(sc, GEM_MAC_ADDR1, (laddr[2] << 8) | laddr[3]);
1281 GEM_WRITE_4(sc, GEM_MAC_ADDR2, (laddr[0] << 8) | laddr[1]);
1282
1283 /* Enable MII outputs. */
1284 GEM_WRITE_4(sc, GEM_MAC_XIF_CONFIG, GEM_MAC_XIF_TX_MII_ENA);
1285 }
1286
1287 static void
gem_start(if_t ifp)1288 gem_start(if_t ifp)
1289 {
1290 struct gem_softc *sc = if_getsoftc(ifp);
1291
1292 GEM_LOCK(sc);
1293 gem_start_locked(ifp);
1294 GEM_UNLOCK(sc);
1295 }
1296
1297 static inline void
gem_txkick(struct gem_softc * sc)1298 gem_txkick(struct gem_softc *sc)
1299 {
1300
1301 /*
1302 * Update the TX kick register. This register has to point to the
1303 * descriptor after the last valid one and for optimum performance
1304 * should be incremented in multiples of 4 (the DMA engine fetches/
1305 * updates descriptors in batches of 4).
1306 */
1307 #ifdef GEM_DEBUG
1308 CTR3(KTR_GEM, "%s: %s: kicking TX %d",
1309 device_get_name(sc->sc_dev), __func__, sc->sc_txnext);
1310 #endif
1311 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1312 GEM_WRITE_4(sc, GEM_TX_KICK, sc->sc_txnext);
1313 }
1314
1315 static void
gem_start_locked(if_t ifp)1316 gem_start_locked(if_t ifp)
1317 {
1318 struct gem_softc *sc = if_getsoftc(ifp);
1319 struct mbuf *m;
1320 int kicked, ntx;
1321
1322 GEM_LOCK_ASSERT(sc, MA_OWNED);
1323
1324 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1325 IFF_DRV_RUNNING || (sc->sc_flags & GEM_LINK) == 0)
1326 return;
1327
1328 #ifdef GEM_DEBUG
1329 CTR4(KTR_GEM, "%s: %s: txfree %d, txnext %d",
1330 device_get_name(sc->sc_dev), __func__, sc->sc_txfree,
1331 sc->sc_txnext);
1332 #endif
1333 ntx = 0;
1334 kicked = 0;
1335 for (; !if_sendq_empty(ifp) && sc->sc_txfree > 1;) {
1336 m = if_dequeue(ifp);
1337 if (m == NULL)
1338 break;
1339 if (gem_load_txmbuf(sc, &m) != 0) {
1340 if (m == NULL)
1341 break;
1342 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1343 if_sendq_prepend(ifp, m);
1344 break;
1345 }
1346 if ((sc->sc_txnext % 4) == 0) {
1347 gem_txkick(sc);
1348 kicked = 1;
1349 } else
1350 kicked = 0;
1351 ntx++;
1352 BPF_MTAP(ifp, m);
1353 }
1354
1355 if (ntx > 0) {
1356 if (kicked == 0)
1357 gem_txkick(sc);
1358 #ifdef GEM_DEBUG
1359 CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d",
1360 device_get_name(sc->sc_dev), sc->sc_txnext);
1361 #endif
1362
1363 /* Set a watchdog timer in case the chip flakes out. */
1364 sc->sc_wdog_timer = 5;
1365 #ifdef GEM_DEBUG
1366 CTR3(KTR_GEM, "%s: %s: watchdog %d",
1367 device_get_name(sc->sc_dev), __func__,
1368 sc->sc_wdog_timer);
1369 #endif
1370 }
1371 }
1372
1373 static void
gem_tint(struct gem_softc * sc)1374 gem_tint(struct gem_softc *sc)
1375 {
1376 if_t ifp = sc->sc_ifp;
1377 struct gem_txsoft *txs;
1378 int progress;
1379 uint32_t txlast;
1380 #ifdef GEM_DEBUG
1381 int i;
1382
1383 GEM_LOCK_ASSERT(sc, MA_OWNED);
1384
1385 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
1386 #endif
1387
1388 /*
1389 * Go through our TX list and free mbufs for those
1390 * frames that have been transmitted.
1391 */
1392 progress = 0;
1393 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
1394 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1395 #ifdef GEM_DEBUG
1396 if ((if_getflags(ifp) & IFF_DEBUG) != 0) {
1397 printf(" txsoft %p transmit chain:\n", txs);
1398 for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) {
1399 printf("descriptor %d: ", i);
1400 printf("gd_flags: 0x%016llx\t",
1401 (long long)le64toh(
1402 sc->sc_txdescs[i].gd_flags));
1403 printf("gd_addr: 0x%016llx\n",
1404 (long long)le64toh(
1405 sc->sc_txdescs[i].gd_addr));
1406 if (i == txs->txs_lastdesc)
1407 break;
1408 }
1409 }
1410 #endif
1411
1412 /*
1413 * In theory, we could harvest some descriptors before
1414 * the ring is empty, but that's a bit complicated.
1415 *
1416 * GEM_TX_COMPLETION points to the last descriptor
1417 * processed + 1.
1418 */
1419 txlast = GEM_READ_4(sc, GEM_TX_COMPLETION);
1420 #ifdef GEM_DEBUG
1421 CTR4(KTR_GEM, "%s: txs->txs_firstdesc = %d, "
1422 "txs->txs_lastdesc = %d, txlast = %d",
1423 __func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast);
1424 #endif
1425 if (txs->txs_firstdesc <= txs->txs_lastdesc) {
1426 if ((txlast >= txs->txs_firstdesc) &&
1427 (txlast <= txs->txs_lastdesc))
1428 break;
1429 } else {
1430 /* Ick -- this command wraps. */
1431 if ((txlast >= txs->txs_firstdesc) ||
1432 (txlast <= txs->txs_lastdesc))
1433 break;
1434 }
1435
1436 #ifdef GEM_DEBUG
1437 CTR1(KTR_GEM, "%s: releasing a descriptor", __func__);
1438 #endif
1439 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1440
1441 sc->sc_txfree += txs->txs_ndescs;
1442
1443 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
1444 BUS_DMASYNC_POSTWRITE);
1445 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
1446 if (txs->txs_mbuf != NULL) {
1447 m_freem(txs->txs_mbuf);
1448 txs->txs_mbuf = NULL;
1449 }
1450
1451 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1452
1453 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1454 progress = 1;
1455 }
1456
1457 #ifdef GEM_DEBUG
1458 CTR4(KTR_GEM, "%s: GEM_TX_STATE_MACHINE %x GEM_TX_DATA_PTR %llx "
1459 "GEM_TX_COMPLETION %x",
1460 __func__, GEM_READ_4(sc, GEM_TX_STATE_MACHINE),
1461 ((long long)GEM_READ_4(sc, GEM_TX_DATA_PTR_HI) << 32) |
1462 GEM_READ_4(sc, GEM_TX_DATA_PTR_LO),
1463 GEM_READ_4(sc, GEM_TX_COMPLETION));
1464 #endif
1465
1466 if (progress) {
1467 if (sc->sc_txfree == GEM_NTXDESC - 1)
1468 sc->sc_txwin = 0;
1469
1470 /*
1471 * We freed some descriptors, so reset IFF_DRV_OACTIVE
1472 * and restart.
1473 */
1474 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1475 if (STAILQ_EMPTY(&sc->sc_txdirtyq))
1476 sc->sc_wdog_timer = 0;
1477 gem_start_locked(ifp);
1478 }
1479
1480 #ifdef GEM_DEBUG
1481 CTR3(KTR_GEM, "%s: %s: watchdog %d",
1482 device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer);
1483 #endif
1484 }
1485
1486 #ifdef GEM_RINT_TIMEOUT
1487 static void
gem_rint_timeout(void * arg)1488 gem_rint_timeout(void *arg)
1489 {
1490 struct gem_softc *sc = arg;
1491
1492 GEM_LOCK_ASSERT(sc, MA_OWNED);
1493
1494 gem_rint(sc);
1495 }
1496 #endif
1497
1498 static void
gem_rint(struct gem_softc * sc)1499 gem_rint(struct gem_softc *sc)
1500 {
1501 if_t ifp = sc->sc_ifp;
1502 struct mbuf *m;
1503 uint64_t rxstat;
1504 uint32_t rxcomp;
1505
1506 GEM_LOCK_ASSERT(sc, MA_OWNED);
1507
1508 #ifdef GEM_RINT_TIMEOUT
1509 callout_stop(&sc->sc_rx_ch);
1510 #endif
1511 #ifdef GEM_DEBUG
1512 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
1513 #endif
1514
1515 /*
1516 * Read the completion register once. This limits
1517 * how long the following loop can execute.
1518 */
1519 rxcomp = GEM_READ_4(sc, GEM_RX_COMPLETION);
1520 #ifdef GEM_DEBUG
1521 CTR3(KTR_GEM, "%s: sc->sc_rxptr %d, complete %d",
1522 __func__, sc->sc_rxptr, rxcomp);
1523 #endif
1524 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1525 for (; sc->sc_rxptr != rxcomp;) {
1526 m = sc->sc_rxsoft[sc->sc_rxptr].rxs_mbuf;
1527 rxstat = le64toh(sc->sc_rxdescs[sc->sc_rxptr].gd_flags);
1528
1529 if (rxstat & GEM_RD_OWN) {
1530 #ifdef GEM_RINT_TIMEOUT
1531 /*
1532 * The descriptor is still marked as owned, although
1533 * it is supposed to have completed. This has been
1534 * observed on some machines. Just exiting here
1535 * might leave the packet sitting around until another
1536 * one arrives to trigger a new interrupt, which is
1537 * generally undesirable, so set up a timeout.
1538 */
1539 callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS,
1540 gem_rint_timeout, sc);
1541 #endif
1542 m = NULL;
1543 goto kickit;
1544 }
1545
1546 if (rxstat & GEM_RD_BAD_CRC) {
1547 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1548 device_printf(sc->sc_dev, "receive error: CRC error\n");
1549 GEM_INIT_RXDESC(sc, sc->sc_rxptr);
1550 m = NULL;
1551 goto kickit;
1552 }
1553
1554 #ifdef GEM_DEBUG
1555 if ((if_getflags(ifp) & IFF_DEBUG) != 0) {
1556 printf(" rxsoft %p descriptor %d: ",
1557 &sc->sc_rxsoft[sc->sc_rxptr], sc->sc_rxptr);
1558 printf("gd_flags: 0x%016llx\t",
1559 (long long)le64toh(
1560 sc->sc_rxdescs[sc->sc_rxptr].gd_flags));
1561 printf("gd_addr: 0x%016llx\n",
1562 (long long)le64toh(
1563 sc->sc_rxdescs[sc->sc_rxptr].gd_addr));
1564 }
1565 #endif
1566
1567 /*
1568 * Allocate a new mbuf cluster. If that fails, we are
1569 * out of memory, and must drop the packet and recycle
1570 * the buffer that's already attached to this descriptor.
1571 */
1572 if (gem_add_rxbuf(sc, sc->sc_rxptr) != 0) {
1573 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1574 GEM_INIT_RXDESC(sc, sc->sc_rxptr);
1575 m = NULL;
1576 }
1577
1578 kickit:
1579 /*
1580 * Update the RX kick register. This register has to point
1581 * to the descriptor after the last valid one (before the
1582 * current batch) and for optimum performance should be
1583 * incremented in multiples of 4 (the DMA engine fetches/
1584 * updates descriptors in batches of 4).
1585 */
1586 sc->sc_rxptr = GEM_NEXTRX(sc->sc_rxptr);
1587 if ((sc->sc_rxptr % 4) == 0) {
1588 GEM_CDSYNC(sc,
1589 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1590 GEM_WRITE_4(sc, GEM_RX_KICK,
1591 (sc->sc_rxptr + GEM_NRXDESC - 4) &
1592 GEM_NRXDESC_MASK);
1593 }
1594
1595 if (m == NULL) {
1596 if (rxstat & GEM_RD_OWN)
1597 break;
1598 continue;
1599 }
1600
1601 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1602 m->m_data += ETHER_ALIGN; /* first byte offset */
1603 m->m_pkthdr.rcvif = ifp;
1604 m->m_pkthdr.len = m->m_len = GEM_RD_BUFLEN(rxstat);
1605
1606 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
1607 gem_rxcksum(m, rxstat);
1608
1609 /* Pass it on. */
1610 GEM_UNLOCK(sc);
1611 if_input(ifp, m);
1612 GEM_LOCK(sc);
1613 }
1614
1615 #ifdef GEM_DEBUG
1616 CTR3(KTR_GEM, "%s: done sc->sc_rxptr %d, complete %d", __func__,
1617 sc->sc_rxptr, GEM_READ_4(sc, GEM_RX_COMPLETION));
1618 #endif
1619 }
1620
1621 static int
gem_add_rxbuf(struct gem_softc * sc,int idx)1622 gem_add_rxbuf(struct gem_softc *sc, int idx)
1623 {
1624 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx];
1625 struct mbuf *m;
1626 bus_dma_segment_t segs[1];
1627 int error, nsegs;
1628
1629 GEM_LOCK_ASSERT(sc, MA_OWNED);
1630
1631 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1632 if (m == NULL)
1633 return (ENOBUFS);
1634 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
1635
1636 #ifdef GEM_DEBUG
1637 /* Bzero the packet to check DMA. */
1638 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size);
1639 #endif
1640
1641 if (rxs->rxs_mbuf != NULL) {
1642 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
1643 BUS_DMASYNC_POSTREAD);
1644 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap);
1645 }
1646
1647 error = bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, rxs->rxs_dmamap,
1648 m, segs, &nsegs, BUS_DMA_NOWAIT);
1649 if (error != 0) {
1650 device_printf(sc->sc_dev,
1651 "cannot load RS DMA map %d, error = %d\n", idx, error);
1652 m_freem(m);
1653 return (error);
1654 }
1655 /* If nsegs is wrong then the stack is corrupt. */
1656 KASSERT(nsegs == 1,
1657 ("%s: too many DMA segments (%d)", __func__, nsegs));
1658 rxs->rxs_mbuf = m;
1659 rxs->rxs_paddr = segs[0].ds_addr;
1660
1661 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
1662 BUS_DMASYNC_PREREAD);
1663
1664 GEM_INIT_RXDESC(sc, idx);
1665
1666 return (0);
1667 }
1668
1669 static void
gem_eint(struct gem_softc * sc,u_int status)1670 gem_eint(struct gem_softc *sc, u_int status)
1671 {
1672
1673 if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1);
1674 if ((status & GEM_INTR_RX_TAG_ERR) != 0) {
1675 gem_reset_rxdma(sc);
1676 return;
1677 }
1678
1679 device_printf(sc->sc_dev, "%s: status 0x%x", __func__, status);
1680 if ((status & GEM_INTR_BERR) != 0) {
1681 printf(", PCI bus error 0x%x",
1682 GEM_READ_4(sc, GEM_PCI_ERROR_STATUS));
1683 }
1684 printf("\n");
1685 }
1686
1687 void
gem_intr(void * v)1688 gem_intr(void *v)
1689 {
1690 struct gem_softc *sc = v;
1691 uint32_t status, status2;
1692
1693 GEM_LOCK(sc);
1694 status = GEM_READ_4(sc, GEM_STATUS);
1695
1696 #ifdef GEM_DEBUG
1697 CTR4(KTR_GEM, "%s: %s: cplt %x, status %x",
1698 device_get_name(sc->sc_dev), __func__,
1699 (status >> GEM_STATUS_TX_COMPLETION_SHFT), (u_int)status);
1700
1701 /*
1702 * PCS interrupts must be cleared, otherwise no traffic is passed!
1703 */
1704 if ((status & GEM_INTR_PCS) != 0) {
1705 status2 =
1706 GEM_READ_4(sc, GEM_MII_INTERRUP_STATUS) |
1707 GEM_READ_4(sc, GEM_MII_INTERRUP_STATUS);
1708 if ((status2 & GEM_MII_INTERRUP_LINK) != 0)
1709 device_printf(sc->sc_dev,
1710 "%s: PCS link status changed\n", __func__);
1711 }
1712 if ((status & GEM_MAC_CONTROL_STATUS) != 0) {
1713 status2 = GEM_READ_4(sc, GEM_MAC_CONTROL_STATUS);
1714 if ((status2 & GEM_MAC_PAUSED) != 0)
1715 device_printf(sc->sc_dev,
1716 "%s: PAUSE received (PAUSE time %d slots)\n",
1717 __func__, GEM_MAC_PAUSE_TIME(status2));
1718 if ((status2 & GEM_MAC_PAUSE) != 0)
1719 device_printf(sc->sc_dev,
1720 "%s: transited to PAUSE state\n", __func__);
1721 if ((status2 & GEM_MAC_RESUME) != 0)
1722 device_printf(sc->sc_dev,
1723 "%s: transited to non-PAUSE state\n", __func__);
1724 }
1725 if ((status & GEM_INTR_MIF) != 0)
1726 device_printf(sc->sc_dev, "%s: MIF interrupt\n", __func__);
1727 #endif
1728
1729 if (__predict_false(status &
1730 (GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR | GEM_INTR_BERR)) != 0)
1731 gem_eint(sc, status);
1732
1733 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0)
1734 gem_rint(sc);
1735
1736 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0)
1737 gem_tint(sc);
1738
1739 if (__predict_false((status & GEM_INTR_TX_MAC) != 0)) {
1740 status2 = GEM_READ_4(sc, GEM_MAC_TX_STATUS);
1741 if ((status2 &
1742 ~(GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP |
1743 GEM_MAC_TX_PEAK_EXP)) != 0)
1744 device_printf(sc->sc_dev,
1745 "MAC TX fault, status %x\n", status2);
1746 if ((status2 &
1747 (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) != 0) {
1748 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
1749 if_setdrvflagbits(sc->sc_ifp, 0, IFF_DRV_RUNNING);
1750 gem_init_locked(sc);
1751 }
1752 }
1753 if (__predict_false((status & GEM_INTR_RX_MAC) != 0)) {
1754 status2 = GEM_READ_4(sc, GEM_MAC_RX_STATUS);
1755 /*
1756 * At least with GEM_SUN_GEM revisions GEM_MAC_RX_OVERFLOW
1757 * happen often due to a silicon bug so handle them silently.
1758 * Moreover, it's likely that the receiver has hung so we
1759 * reset it.
1760 */
1761 if ((status2 & GEM_MAC_RX_OVERFLOW) != 0) {
1762 if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1);
1763 gem_reset_rxdma(sc);
1764 } else if ((status2 &
1765 ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) != 0)
1766 device_printf(sc->sc_dev,
1767 "MAC RX fault, status %x\n", status2);
1768 }
1769 GEM_UNLOCK(sc);
1770 }
1771
1772 static int
gem_watchdog(struct gem_softc * sc)1773 gem_watchdog(struct gem_softc *sc)
1774 {
1775 if_t ifp = sc->sc_ifp;
1776
1777 GEM_LOCK_ASSERT(sc, MA_OWNED);
1778
1779 #ifdef GEM_DEBUG
1780 CTR4(KTR_GEM,
1781 "%s: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x GEM_MAC_RX_CONFIG %x",
1782 __func__, GEM_READ_4(sc, GEM_RX_CONFIG),
1783 GEM_READ_4(sc, GEM_MAC_RX_STATUS),
1784 GEM_READ_4(sc, GEM_MAC_RX_CONFIG));
1785 CTR4(KTR_GEM,
1786 "%s: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x GEM_MAC_TX_CONFIG %x",
1787 __func__, GEM_READ_4(sc, GEM_TX_CONFIG),
1788 GEM_READ_4(sc, GEM_MAC_TX_STATUS),
1789 GEM_READ_4(sc, GEM_MAC_TX_CONFIG));
1790 #endif
1791
1792 if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0)
1793 return (0);
1794
1795 if ((sc->sc_flags & GEM_LINK) != 0)
1796 device_printf(sc->sc_dev, "device timeout\n");
1797 else if (bootverbose)
1798 device_printf(sc->sc_dev, "device timeout (no link)\n");
1799 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1800
1801 /* Try to get more packets going. */
1802 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1803 gem_init_locked(sc);
1804 gem_start_locked(ifp);
1805 return (EJUSTRETURN);
1806 }
1807
1808 static void
gem_mifinit(struct gem_softc * sc)1809 gem_mifinit(struct gem_softc *sc)
1810 {
1811
1812 /* Configure the MIF in frame mode. */
1813 GEM_WRITE_4(sc, GEM_MIF_CONFIG,
1814 GEM_READ_4(sc, GEM_MIF_CONFIG) & ~GEM_MIF_CONFIG_BB_ENA);
1815 GEM_BARRIER(sc, GEM_MIF_CONFIG, 4,
1816 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1817 }
1818
1819 /*
1820 * MII interface
1821 *
1822 * The MII interface supports at least three different operating modes:
1823 *
1824 * Bitbang mode is implemented using data, clock and output enable registers.
1825 *
1826 * Frame mode is implemented by loading a complete frame into the frame
1827 * register and polling the valid bit for completion.
1828 *
1829 * Polling mode uses the frame register but completion is indicated by
1830 * an interrupt.
1831 *
1832 */
1833 int
gem_mii_readreg(device_t dev,int phy,int reg)1834 gem_mii_readreg(device_t dev, int phy, int reg)
1835 {
1836 struct gem_softc *sc;
1837 int n;
1838 uint32_t v;
1839
1840 #ifdef GEM_DEBUG_PHY
1841 printf("%s: phy %d reg %d\n", __func__, phy, reg);
1842 #endif
1843
1844 sc = device_get_softc(dev);
1845 if ((sc->sc_flags & GEM_SERDES) != 0) {
1846 switch (reg) {
1847 case MII_BMCR:
1848 reg = GEM_MII_CONTROL;
1849 break;
1850 case MII_BMSR:
1851 reg = GEM_MII_STATUS;
1852 break;
1853 case MII_PHYIDR1:
1854 case MII_PHYIDR2:
1855 return (0);
1856 case MII_ANAR:
1857 reg = GEM_MII_ANAR;
1858 break;
1859 case MII_ANLPAR:
1860 reg = GEM_MII_ANLPAR;
1861 break;
1862 case MII_EXTSR:
1863 return (EXTSR_1000XFDX | EXTSR_1000XHDX);
1864 default:
1865 device_printf(sc->sc_dev,
1866 "%s: unhandled register %d\n", __func__, reg);
1867 return (0);
1868 }
1869 return (GEM_READ_4(sc, reg));
1870 }
1871
1872 /* Construct the frame command. */
1873 v = GEM_MIF_FRAME_READ |
1874 (phy << GEM_MIF_PHY_SHIFT) |
1875 (reg << GEM_MIF_REG_SHIFT);
1876
1877 GEM_WRITE_4(sc, GEM_MIF_FRAME, v);
1878 GEM_BARRIER(sc, GEM_MIF_FRAME, 4,
1879 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1880 for (n = 0; n < 100; n++) {
1881 DELAY(1);
1882 v = GEM_READ_4(sc, GEM_MIF_FRAME);
1883 if (v & GEM_MIF_FRAME_TA0)
1884 return (v & GEM_MIF_FRAME_DATA);
1885 }
1886
1887 device_printf(sc->sc_dev, "%s: timed out\n", __func__);
1888 return (0);
1889 }
1890
1891 int
gem_mii_writereg(device_t dev,int phy,int reg,int val)1892 gem_mii_writereg(device_t dev, int phy, int reg, int val)
1893 {
1894 struct gem_softc *sc;
1895 int n;
1896 uint32_t v;
1897
1898 #ifdef GEM_DEBUG_PHY
1899 printf("%s: phy %d reg %d val %x\n", phy, reg, val, __func__);
1900 #endif
1901
1902 sc = device_get_softc(dev);
1903 if ((sc->sc_flags & GEM_SERDES) != 0) {
1904 switch (reg) {
1905 case MII_BMSR:
1906 reg = GEM_MII_STATUS;
1907 break;
1908 case MII_BMCR:
1909 reg = GEM_MII_CONTROL;
1910 if ((val & GEM_MII_CONTROL_RESET) == 0)
1911 break;
1912 GEM_WRITE_4(sc, GEM_MII_CONTROL, val);
1913 GEM_BARRIER(sc, GEM_MII_CONTROL, 4,
1914 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1915 if (!gem_bitwait(sc, GEM_MII_CONTROL,
1916 GEM_MII_CONTROL_RESET, 0))
1917 device_printf(sc->sc_dev,
1918 "cannot reset PCS\n");
1919 /* FALLTHROUGH */
1920 case MII_ANAR:
1921 GEM_WRITE_4(sc, GEM_MII_CONFIG, 0);
1922 GEM_BARRIER(sc, GEM_MII_CONFIG, 4,
1923 BUS_SPACE_BARRIER_WRITE);
1924 GEM_WRITE_4(sc, GEM_MII_ANAR, val);
1925 GEM_BARRIER(sc, GEM_MII_ANAR, 4,
1926 BUS_SPACE_BARRIER_WRITE);
1927 GEM_WRITE_4(sc, GEM_MII_SLINK_CONTROL,
1928 GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D);
1929 GEM_BARRIER(sc, GEM_MII_SLINK_CONTROL, 4,
1930 BUS_SPACE_BARRIER_WRITE);
1931 GEM_WRITE_4(sc, GEM_MII_CONFIG,
1932 GEM_MII_CONFIG_ENABLE);
1933 GEM_BARRIER(sc, GEM_MII_CONFIG, 4,
1934 BUS_SPACE_BARRIER_WRITE);
1935 return (0);
1936 case MII_ANLPAR:
1937 reg = GEM_MII_ANLPAR;
1938 break;
1939 default:
1940 device_printf(sc->sc_dev,
1941 "%s: unhandled register %d\n", __func__, reg);
1942 return (0);
1943 }
1944 GEM_WRITE_4(sc, reg, val);
1945 GEM_BARRIER(sc, reg, 4,
1946 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1947 return (0);
1948 }
1949
1950 /* Construct the frame command. */
1951 v = GEM_MIF_FRAME_WRITE |
1952 (phy << GEM_MIF_PHY_SHIFT) |
1953 (reg << GEM_MIF_REG_SHIFT) |
1954 (val & GEM_MIF_FRAME_DATA);
1955
1956 GEM_WRITE_4(sc, GEM_MIF_FRAME, v);
1957 GEM_BARRIER(sc, GEM_MIF_FRAME, 4,
1958 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1959 for (n = 0; n < 100; n++) {
1960 DELAY(1);
1961 v = GEM_READ_4(sc, GEM_MIF_FRAME);
1962 if (v & GEM_MIF_FRAME_TA0)
1963 return (1);
1964 }
1965
1966 device_printf(sc->sc_dev, "%s: timed out\n", __func__);
1967 return (0);
1968 }
1969
1970 void
gem_mii_statchg(device_t dev)1971 gem_mii_statchg(device_t dev)
1972 {
1973 struct gem_softc *sc;
1974 int gigabit;
1975 uint32_t rxcfg, txcfg, v;
1976
1977 sc = device_get_softc(dev);
1978
1979 GEM_LOCK_ASSERT(sc, MA_OWNED);
1980
1981 #ifdef GEM_DEBUG
1982 if ((sc->sc_if_getflags(ifp) & IFF_DEBUG) != 0)
1983 device_printf(sc->sc_dev, "%s: status change\n", __func__);
1984 #endif
1985
1986 if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 &&
1987 IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE)
1988 sc->sc_flags |= GEM_LINK;
1989 else
1990 sc->sc_flags &= ~GEM_LINK;
1991
1992 switch (IFM_SUBTYPE(sc->sc_mii->mii_media_active)) {
1993 case IFM_1000_SX:
1994 case IFM_1000_LX:
1995 case IFM_1000_CX:
1996 case IFM_1000_T:
1997 gigabit = 1;
1998 break;
1999 default:
2000 gigabit = 0;
2001 }
2002
2003 /*
2004 * The configuration done here corresponds to the steps F) and
2005 * G) and as far as enabling of RX and TX MAC goes also step H)
2006 * of the initialization sequence outlined in section 3.2.1 of
2007 * the GEM Gigabit Ethernet ASIC Specification.
2008 */
2009
2010 rxcfg = sc->sc_mac_rxcfg;
2011 rxcfg &= ~GEM_MAC_RX_CARR_EXTEND;
2012 txcfg = GEM_MAC_TX_ENA_IPG0 | GEM_MAC_TX_NGU | GEM_MAC_TX_NGU_LIMIT;
2013 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
2014 txcfg |= GEM_MAC_TX_IGN_CARRIER | GEM_MAC_TX_IGN_COLLIS;
2015 else if (gigabit != 0) {
2016 rxcfg |= GEM_MAC_RX_CARR_EXTEND;
2017 txcfg |= GEM_MAC_TX_CARR_EXTEND;
2018 }
2019 (void)gem_disable_tx(sc);
2020 GEM_WRITE_4(sc, GEM_MAC_TX_CONFIG, txcfg);
2021 (void)gem_disable_rx(sc);
2022 GEM_WRITE_4(sc, GEM_MAC_RX_CONFIG, rxcfg);
2023
2024 v = GEM_READ_4(sc, GEM_MAC_CONTROL_CONFIG) &
2025 ~(GEM_MAC_CC_RX_PAUSE | GEM_MAC_CC_TX_PAUSE);
2026 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
2027 IFM_ETH_RXPAUSE) != 0)
2028 v |= GEM_MAC_CC_RX_PAUSE;
2029 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
2030 IFM_ETH_TXPAUSE) != 0)
2031 v |= GEM_MAC_CC_TX_PAUSE;
2032 GEM_WRITE_4(sc, GEM_MAC_CONTROL_CONFIG, v);
2033
2034 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0 &&
2035 gigabit != 0)
2036 GEM_WRITE_4(sc, GEM_MAC_SLOT_TIME,
2037 GEM_MAC_SLOT_TIME_CARR_EXTEND);
2038 else
2039 GEM_WRITE_4(sc, GEM_MAC_SLOT_TIME,
2040 GEM_MAC_SLOT_TIME_NORMAL);
2041
2042 /* XIF Configuration */
2043 v = GEM_MAC_XIF_LINK_LED;
2044 v |= GEM_MAC_XIF_TX_MII_ENA;
2045 if ((sc->sc_flags & GEM_SERDES) == 0) {
2046 if ((GEM_READ_4(sc, GEM_MIF_CONFIG) &
2047 GEM_MIF_CONFIG_PHY_SEL) != 0) {
2048 /* External MII needs echo disable if half duplex. */
2049 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
2050 IFM_FDX) == 0)
2051 v |= GEM_MAC_XIF_ECHO_DISABL;
2052 } else
2053 /*
2054 * Internal MII needs buffer enable.
2055 * XXX buffer enable makes only sense for an
2056 * external PHY.
2057 */
2058 v |= GEM_MAC_XIF_MII_BUF_ENA;
2059 }
2060 if (gigabit != 0)
2061 v |= GEM_MAC_XIF_GMII_MODE;
2062 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
2063 v |= GEM_MAC_XIF_FDPLX_LED;
2064 GEM_WRITE_4(sc, GEM_MAC_XIF_CONFIG, v);
2065
2066 sc->sc_mac_rxcfg = rxcfg;
2067 if ((if_getdrvflags(sc->sc_ifp) & IFF_DRV_RUNNING) != 0 &&
2068 (sc->sc_flags & GEM_LINK) != 0) {
2069 GEM_WRITE_4(sc, GEM_MAC_TX_CONFIG,
2070 txcfg | GEM_MAC_TX_ENABLE);
2071 GEM_WRITE_4(sc, GEM_MAC_RX_CONFIG,
2072 rxcfg | GEM_MAC_RX_ENABLE);
2073 }
2074 }
2075
2076 int
gem_mediachange(if_t ifp)2077 gem_mediachange(if_t ifp)
2078 {
2079 struct gem_softc *sc = if_getsoftc(ifp);
2080 int error;
2081
2082 /* XXX add support for serial media. */
2083
2084 GEM_LOCK(sc);
2085 error = mii_mediachg(sc->sc_mii);
2086 GEM_UNLOCK(sc);
2087 return (error);
2088 }
2089
2090 void
gem_mediastatus(if_t ifp,struct ifmediareq * ifmr)2091 gem_mediastatus(if_t ifp, struct ifmediareq *ifmr)
2092 {
2093 struct gem_softc *sc = if_getsoftc(ifp);
2094
2095 GEM_LOCK(sc);
2096 if ((if_getflags(ifp) & IFF_UP) == 0) {
2097 GEM_UNLOCK(sc);
2098 return;
2099 }
2100
2101 mii_pollstat(sc->sc_mii);
2102 ifmr->ifm_active = sc->sc_mii->mii_media_active;
2103 ifmr->ifm_status = sc->sc_mii->mii_media_status;
2104 GEM_UNLOCK(sc);
2105 }
2106
2107 static int
gem_ioctl(if_t ifp,u_long cmd,caddr_t data)2108 gem_ioctl(if_t ifp, u_long cmd, caddr_t data)
2109 {
2110 struct gem_softc *sc = if_getsoftc(ifp);
2111 struct ifreq *ifr = (struct ifreq *)data;
2112 int error;
2113
2114 error = 0;
2115 switch (cmd) {
2116 case SIOCSIFFLAGS:
2117 GEM_LOCK(sc);
2118 if ((if_getflags(ifp) & IFF_UP) != 0) {
2119 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0 &&
2120 ((if_getflags(ifp) ^ sc->sc_ifflags) &
2121 (IFF_ALLMULTI | IFF_PROMISC)) != 0)
2122 gem_setladrf(sc);
2123 else
2124 gem_init_locked(sc);
2125 } else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
2126 gem_stop(ifp, 0);
2127 if ((if_getflags(ifp) & IFF_LINK0) != 0)
2128 sc->sc_csum_features |= CSUM_UDP;
2129 else
2130 sc->sc_csum_features &= ~CSUM_UDP;
2131 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
2132 if_sethwassist(ifp, sc->sc_csum_features);
2133 sc->sc_ifflags = if_getflags(ifp);
2134 GEM_UNLOCK(sc);
2135 break;
2136 case SIOCADDMULTI:
2137 case SIOCDELMULTI:
2138 GEM_LOCK(sc);
2139 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
2140 gem_setladrf(sc);
2141 GEM_UNLOCK(sc);
2142 break;
2143 case SIOCGIFMEDIA:
2144 case SIOCSIFMEDIA:
2145 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
2146 break;
2147 case SIOCSIFCAP:
2148 GEM_LOCK(sc);
2149 if_setcapenable(ifp, ifr->ifr_reqcap);
2150 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
2151 if_sethwassist(ifp, sc->sc_csum_features);
2152 else
2153 if_sethwassist(ifp, 0);
2154 GEM_UNLOCK(sc);
2155 break;
2156 default:
2157 error = ether_ioctl(ifp, cmd, data);
2158 break;
2159 }
2160
2161 return (error);
2162 }
2163
2164 static u_int
gem_hash_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)2165 gem_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2166 {
2167 uint32_t crc, *hash = arg;
2168
2169 crc = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN);
2170 /* We just want the 8 most significant bits. */
2171 crc >>= 24;
2172 /* Set the corresponding bit in the filter. */
2173 hash[crc >> 4] |= 1 << (15 - (crc & 15));
2174
2175 return (1);
2176 }
2177
2178 static void
gem_setladrf(struct gem_softc * sc)2179 gem_setladrf(struct gem_softc *sc)
2180 {
2181 if_t ifp = sc->sc_ifp;
2182 int i;
2183 uint32_t hash[16];
2184 uint32_t v;
2185
2186 GEM_LOCK_ASSERT(sc, MA_OWNED);
2187
2188 /*
2189 * Turn off the RX MAC and the hash filter as required by the Sun GEM
2190 * programming restrictions.
2191 */
2192 v = sc->sc_mac_rxcfg & ~GEM_MAC_RX_HASH_FILTER;
2193 GEM_WRITE_4(sc, GEM_MAC_RX_CONFIG, v);
2194 GEM_BARRIER(sc, GEM_MAC_RX_CONFIG, 4,
2195 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
2196 if (!gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_HASH_FILTER |
2197 GEM_MAC_RX_ENABLE, 0))
2198 device_printf(sc->sc_dev,
2199 "cannot disable RX MAC or hash filter\n");
2200
2201 v &= ~(GEM_MAC_RX_PROMISCUOUS | GEM_MAC_RX_PROMISC_GRP);
2202 if ((if_getflags(ifp) & IFF_PROMISC) != 0) {
2203 v |= GEM_MAC_RX_PROMISCUOUS;
2204 goto chipit;
2205 }
2206 if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) {
2207 v |= GEM_MAC_RX_PROMISC_GRP;
2208 goto chipit;
2209 }
2210
2211 /*
2212 * Set up multicast address filter by passing all multicast
2213 * addresses through a crc generator, and then using the high
2214 * order 8 bits as an index into the 256 bit logical address
2215 * filter. The high order 4 bits selects the word, while the
2216 * other 4 bits select the bit within the word (where bit 0
2217 * is the MSB).
2218 */
2219
2220 memset(hash, 0, sizeof(hash));
2221 if_foreach_llmaddr(ifp, gem_hash_maddr, hash);
2222
2223 v |= GEM_MAC_RX_HASH_FILTER;
2224
2225 /* Now load the hash table into the chip (if we are using it). */
2226 for (i = 0; i < 16; i++)
2227 GEM_WRITE_4(sc,
2228 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1 - GEM_MAC_HASH0),
2229 hash[i]);
2230
2231 chipit:
2232 sc->sc_mac_rxcfg = v;
2233 GEM_WRITE_4(sc, GEM_MAC_RX_CONFIG, v | GEM_MAC_RX_ENABLE);
2234 }
2235