1 /* $NetBSD: if_dge.c,v 1.63 2021/12/31 14:25:23 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 2004, SUNET, Swedish University Computer Network.
5 * All rights reserved.
6 *
7 * Written by Anders Magnusson for SUNET, Swedish University Computer Network.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * SUNET, Swedish University Computer Network.
21 * 4. The name of SUNET may not be used to endorse or promote products
22 * derived from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY SUNET ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 /*
38 * Copyright (c) 2001, 2002, 2003 Wasabi Systems, Inc.
39 * All rights reserved.
40 *
41 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed for the NetBSD Project by
54 * Wasabi Systems, Inc.
55 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
56 * or promote products derived from this software without specific prior
57 * written permission.
58 *
59 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
61 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
62 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
63 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
64 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
65 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
66 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
67 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
68 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
69 * POSSIBILITY OF SUCH DAMAGE.
70 */
71
72 /*
73 * Device driver for the Intel 82597EX Ten Gigabit Ethernet controller.
74 *
75 * TODO (in no specific order):
76 * HW VLAN support.
77 * TSE offloading (needs kernel changes...)
78 * RAIDC (receive interrupt delay adaptation)
79 * Use memory > 4GB.
80 */
81
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$NetBSD: if_dge.c,v 1.63 2021/12/31 14:25:23 riastradh Exp $");
84
85
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/callout.h>
90 #include <sys/mbuf.h>
91 #include <sys/malloc.h>
92 #include <sys/kernel.h>
93 #include <sys/socket.h>
94 #include <sys/ioctl.h>
95 #include <sys/errno.h>
96 #include <sys/device.h>
97 #include <sys/queue.h>
98 #include <sys/rndsource.h>
99
100 #include <net/if.h>
101 #include <net/if_dl.h>
102 #include <net/if_media.h>
103 #include <net/if_ether.h>
104 #include <net/bpf.h>
105
106 #include <netinet/in.h> /* XXX for struct ip */
107 #include <netinet/in_systm.h> /* XXX for struct ip */
108 #include <netinet/ip.h> /* XXX for struct ip */
109 #include <netinet/tcp.h> /* XXX for struct tcphdr */
110
111 #include <sys/bus.h>
112 #include <sys/intr.h>
113 #include <machine/endian.h>
114
115 #include <dev/mii/mii.h>
116 #include <dev/mii/miivar.h>
117 #include <dev/mii/mii_bitbang.h>
118
119 #include <dev/pci/pcireg.h>
120 #include <dev/pci/pcivar.h>
121 #include <dev/pci/pcidevs.h>
122
123 #include <dev/pci/if_dgereg.h>
124
125 /*
126 * The receive engine may sometimes become off-by-one when writing back
127 * chained descriptors. Avoid this by allocating a large chunk of
128 * memory and use if instead (to avoid chained descriptors).
129 * This only happens with chained descriptors under heavy load.
130 */
131 #define DGE_OFFBYONE_RXBUG
132
133 #define DGE_EVENT_COUNTERS
134 #define DGE_DEBUG
135
136 #ifdef DGE_DEBUG
137 #define DGE_DEBUG_LINK 0x01
138 #define DGE_DEBUG_TX 0x02
139 #define DGE_DEBUG_RX 0x04
140 #define DGE_DEBUG_CKSUM 0x08
141 int dge_debug = 0;
142
143 #define DPRINTF(x, y) if (dge_debug & (x)) printf y
144 #else
145 #define DPRINTF(x, y) /* nothing */
146 #endif /* DGE_DEBUG */
147
148 /*
149 * Transmit descriptor list size. We allow up to 100 DMA segments per
150 * packet (Intel reports of jumbo frame packets with as
151 * many as 80 DMA segments when using 16k buffers).
152 */
153 #define DGE_NTXSEGS 100
154 #define DGE_IFQUEUELEN 20000
155 #define DGE_TXQUEUELEN 2048
156 #define DGE_TXQUEUELEN_MASK (DGE_TXQUEUELEN - 1)
157 #define DGE_TXQUEUE_GC (DGE_TXQUEUELEN / 8)
158 #define DGE_NTXDESC 1024
159 #define DGE_NTXDESC_MASK (DGE_NTXDESC - 1)
160 #define DGE_NEXTTX(x) (((x) + 1) & DGE_NTXDESC_MASK)
161 #define DGE_NEXTTXS(x) (((x) + 1) & DGE_TXQUEUELEN_MASK)
162
163 /*
164 * Receive descriptor list size.
165 * Packet is of size MCLBYTES, and for jumbo packets buffers may
166 * be chained. Due to the nature of the card (high-speed), keep this
167 * ring large. With 2k buffers the ring can store 400 jumbo packets,
168 * which at full speed will be received in just under 3ms.
169 */
170 #define DGE_NRXDESC 2048
171 #define DGE_NRXDESC_MASK (DGE_NRXDESC - 1)
172 #define DGE_NEXTRX(x) (((x) + 1) & DGE_NRXDESC_MASK)
173 /*
174 * # of descriptors between head and written descriptors.
175 * This is to work-around two erratas.
176 */
177 #define DGE_RXSPACE 10
178 #define DGE_PREVRX(x) (((x) - DGE_RXSPACE) & DGE_NRXDESC_MASK)
179 /*
180 * Receive descriptor fetch thresholds. These are values recommended
181 * by Intel, do not touch them unless you know what you are doing.
182 */
183 #define RXDCTL_PTHRESH_VAL 128
184 #define RXDCTL_HTHRESH_VAL 16
185 #define RXDCTL_WTHRESH_VAL 16
186
187
188 /*
189 * Tweakable parameters; default values.
190 */
191 #define FCRTH 0x30000 /* Send XOFF water mark */
192 #define FCRTL 0x28000 /* Send XON water mark */
193 #define RDTR 0x20 /* Interrupt delay after receive, .8192us units */
194 #define TIDV 0x20 /* Interrupt delay after send, .8192us units */
195
196 /*
197 * Control structures are DMA'd to the i82597 chip. We allocate them in
198 * a single clump that maps to a single DMA segment to make serveral things
199 * easier.
200 */
201 struct dge_control_data {
202 /*
203 * The transmit descriptors.
204 */
205 struct dge_tdes wcd_txdescs[DGE_NTXDESC];
206
207 /*
208 * The receive descriptors.
209 */
210 struct dge_rdes wcd_rxdescs[DGE_NRXDESC];
211 };
212
213 #define DGE_CDOFF(x) offsetof(struct dge_control_data, x)
214 #define DGE_CDTXOFF(x) DGE_CDOFF(wcd_txdescs[(x)])
215 #define DGE_CDRXOFF(x) DGE_CDOFF(wcd_rxdescs[(x)])
216
217 /*
218 * The DGE interface have a higher max MTU size than normal jumbo frames.
219 */
220 #define DGE_MAX_MTU 16288 /* Max MTU size for this interface */
221
222 /*
223 * Software state for transmit jobs.
224 */
225 struct dge_txsoft {
226 struct mbuf *txs_mbuf; /* head of our mbuf chain */
227 bus_dmamap_t txs_dmamap; /* our DMA map */
228 int txs_firstdesc; /* first descriptor in packet */
229 int txs_lastdesc; /* last descriptor in packet */
230 int txs_ndesc; /* # of descriptors used */
231 };
232
233 /*
234 * Software state for receive buffers. Each descriptor gets a
235 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
236 * more than one buffer, we chain them together.
237 */
238 struct dge_rxsoft {
239 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
240 bus_dmamap_t rxs_dmamap; /* our DMA map */
241 };
242
243 /*
244 * Software state per device.
245 */
246 struct dge_softc {
247 device_t sc_dev; /* generic device information */
248 bus_space_tag_t sc_st; /* bus space tag */
249 bus_space_handle_t sc_sh; /* bus space handle */
250 bus_dma_tag_t sc_dmat; /* bus DMA tag */
251 struct ethercom sc_ethercom; /* ethernet common data */
252
253 int sc_flags; /* flags; see below */
254 int sc_bus_speed; /* PCI/PCIX bus speed */
255 int sc_pcix_offset; /* PCIX capability register offset */
256
257 const struct dge_product *sc_dgep; /* Pointer to the dge_product entry */
258 pci_chipset_tag_t sc_pc;
259 pcitag_t sc_pt;
260 int sc_mmrbc; /* Max PCIX memory read byte count */
261
262 void *sc_ih; /* interrupt cookie */
263
264 struct ifmedia sc_media;
265
266 bus_dmamap_t sc_cddmamap; /* control data DMA map */
267 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
268
269 int sc_align_tweak;
270
271 /*
272 * Software state for the transmit and receive descriptors.
273 */
274 struct dge_txsoft sc_txsoft[DGE_TXQUEUELEN];
275 struct dge_rxsoft sc_rxsoft[DGE_NRXDESC];
276
277 /*
278 * Control data structures.
279 */
280 struct dge_control_data *sc_control_data;
281 #define sc_txdescs sc_control_data->wcd_txdescs
282 #define sc_rxdescs sc_control_data->wcd_rxdescs
283
284 #ifdef DGE_EVENT_COUNTERS
285 /* Event counters. */
286 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
287 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
288 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
289 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
290 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
291 struct evcnt sc_ev_rxintr; /* Rx interrupts */
292 struct evcnt sc_ev_linkintr; /* Link interrupts */
293
294 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
295 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
296 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
297 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
298
299 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */
300 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */
301 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */
302
303 struct evcnt sc_ev_txseg[DGE_NTXSEGS]; /* Tx packets w/ N segments */
304 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
305 #endif /* DGE_EVENT_COUNTERS */
306
307 int sc_txfree; /* number of free Tx descriptors */
308 int sc_txnext; /* next ready Tx descriptor */
309
310 int sc_txsfree; /* number of free Tx jobs */
311 int sc_txsnext; /* next free Tx job */
312 int sc_txsdirty; /* dirty Tx jobs */
313
314 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum ctx */
315 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum ctx */
316
317 int sc_rxptr; /* next ready Rx descriptor/queue ent */
318 int sc_rxdiscard;
319 int sc_rxlen;
320 struct mbuf *sc_rxhead;
321 struct mbuf *sc_rxtail;
322 struct mbuf **sc_rxtailp;
323
324 uint32_t sc_ctrl0; /* prototype CTRL0 register */
325 uint32_t sc_icr; /* prototype interrupt bits */
326 uint32_t sc_tctl; /* prototype TCTL register */
327 uint32_t sc_rctl; /* prototype RCTL register */
328
329 int sc_mchash_type; /* multicast filter offset */
330
331 uint16_t sc_eeprom[EEPROM_SIZE];
332
333 krndsource_t rnd_source; /* random source */
334 #ifdef DGE_OFFBYONE_RXBUG
335 void *sc_bugbuf;
336 SLIST_HEAD(, rxbugentry) sc_buglist;
337 bus_dmamap_t sc_bugmap;
338 struct rxbugentry *sc_entry;
339 #endif
340 };
341
342 #define DGE_RXCHAIN_RESET(sc) \
343 do { \
344 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
345 *(sc)->sc_rxtailp = NULL; \
346 (sc)->sc_rxlen = 0; \
347 } while (/*CONSTCOND*/0)
348
349 #define DGE_RXCHAIN_LINK(sc, m) \
350 do { \
351 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
352 (sc)->sc_rxtailp = &(m)->m_next; \
353 } while (/*CONSTCOND*/0)
354
355 /* sc_flags */
356 #define DGE_F_BUS64 0x20 /* bus is 64-bit */
357 #define DGE_F_PCIX 0x40 /* bus is PCI-X */
358
359 #ifdef DGE_EVENT_COUNTERS
360 #define DGE_EVCNT_INCR(ev) (ev)->ev_count++
361 #else
362 #define DGE_EVCNT_INCR(ev) /* nothing */
363 #endif
364
365 #define CSR_READ(sc, reg) \
366 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
367 #define CSR_WRITE(sc, reg, val) \
368 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
369
370 #define DGE_CDTXADDR(sc, x) ((sc)->sc_cddma + DGE_CDTXOFF((x)))
371 #define DGE_CDRXADDR(sc, x) ((sc)->sc_cddma + DGE_CDRXOFF((x)))
372
373 #define DGE_CDTXSYNC(sc, x, n, ops) \
374 do { \
375 int __x, __n; \
376 \
377 __x = (x); \
378 __n = (n); \
379 \
380 /* If it will wrap around, sync to the end of the ring. */ \
381 if ((__x + __n) > DGE_NTXDESC) { \
382 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
383 DGE_CDTXOFF(__x), sizeof(struct dge_tdes) * \
384 (DGE_NTXDESC - __x), (ops)); \
385 __n -= (DGE_NTXDESC - __x); \
386 __x = 0; \
387 } \
388 \
389 /* Now sync whatever is left. */ \
390 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
391 DGE_CDTXOFF(__x), sizeof(struct dge_tdes) * __n, (ops)); \
392 } while (/*CONSTCOND*/0)
393
394 #define DGE_CDRXSYNC(sc, x, ops) \
395 do { \
396 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
397 DGE_CDRXOFF((x)), sizeof(struct dge_rdes), (ops)); \
398 } while (/*CONSTCOND*/0)
399
400 #ifdef DGE_OFFBYONE_RXBUG
401 #define DGE_INIT_RXDESC(sc, x) \
402 do { \
403 struct dge_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
404 struct dge_rdes *__rxd = &(sc)->sc_rxdescs[(x)]; \
405 struct mbuf *__m = __rxs->rxs_mbuf; \
406 const bus_addr_t __rxaddr = sc->sc_bugmap->dm_segs[0].ds_addr + \
407 (mtod((__m), char *) - (char *)sc->sc_bugbuf); \
408 \
409 __rxd->dr_baddrl = htole32(__rxaddr); \
410 __rxd->dr_baddrh = htole32(((uint64_t)__rxaddr) >> 32); \
411 __rxd->dr_len = 0; \
412 __rxd->dr_cksum = 0; \
413 __rxd->dr_status = 0; \
414 __rxd->dr_errors = 0; \
415 __rxd->dr_special = 0; \
416 DGE_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); \
417 \
418 CSR_WRITE((sc), DGE_RDT, (x)); \
419 } while (/*CONSTCOND*/0)
420 #else
421 #define DGE_INIT_RXDESC(sc, x) \
422 do { \
423 struct dge_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
424 struct dge_rdes *__rxd = &(sc)->sc_rxdescs[(x)]; \
425 struct mbuf *__m = __rxs->rxs_mbuf; \
426 \
427 /* \
428 * Note: We scoot the packet forward 2 bytes in the buffer \
429 * so that the payload after the Ethernet header is aligned \
430 * to a 4-byte boundary. \
431 * \
432 * XXX BRAINDAMAGE ALERT! \
433 * The stupid chip uses the same size for every buffer, which \
434 * is set in the Receive Control register. We are using the 2K \
435 * size option, but what we REALLY want is (2K - 2)! For this \
436 * reason, we can't "scoot" packets longer than the standard \
437 * Ethernet MTU. On strict-alignment platforms, if the total \
438 * size exceeds (2K - 2) we set align_tweak to 0 and let \
439 * the upper layer copy the headers. \
440 */ \
441 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
442 \
443 const bus_addr_t __rxaddr = \
444 __rxs->rxs_dmamap->dm_segs[0].ds_addr + \
445 (sc)->sc_align_tweak; \
446 \
447 __rxd->dr_baddrl = htole32(__rxaddr); \
448 __rxd->dr_baddrh = htole32(((uint64_t)__rxaddr) >> 32); \
449 __rxd->dr_len = 0; \
450 __rxd->dr_cksum = 0; \
451 __rxd->dr_status = 0; \
452 __rxd->dr_errors = 0; \
453 __rxd->dr_special = 0; \
454 DGE_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); \
455 \
456 CSR_WRITE((sc), DGE_RDT, (x)); \
457 } while (/*CONSTCOND*/0)
458 #endif
459
460 #ifdef DGE_OFFBYONE_RXBUG
461 /*
462 * Allocation constants. Much memory may be used for this.
463 */
464 #ifndef DGE_BUFFER_SIZE
465 #define DGE_BUFFER_SIZE DGE_MAX_MTU
466 #endif
467 #define DGE_NBUFFERS (4*DGE_NRXDESC)
468 #define DGE_RXMEM (DGE_NBUFFERS*DGE_BUFFER_SIZE)
469
470 struct rxbugentry {
471 SLIST_ENTRY(rxbugentry) rb_entry;
472 int rb_slot;
473 };
474
475 static int
dge_alloc_rcvmem(struct dge_softc * sc)476 dge_alloc_rcvmem(struct dge_softc *sc)
477 {
478 char *kva;
479 bus_dma_segment_t seg;
480 int i, rseg, state, error;
481 struct rxbugentry *entry;
482
483 state = error = 0;
484
485 if (bus_dmamem_alloc(sc->sc_dmat, DGE_RXMEM, PAGE_SIZE, 0,
486 &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
487 aprint_error_dev(sc->sc_dev, "can't alloc rx buffers\n");
488 return ENOBUFS;
489 }
490
491 state = 1;
492 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, DGE_RXMEM, (void **)&kva,
493 BUS_DMA_NOWAIT)) {
494 aprint_error_dev(sc->sc_dev,
495 "can't map DMA buffers (%d bytes)\n", (int)DGE_RXMEM);
496 error = ENOBUFS;
497 goto out;
498 }
499
500 state = 2;
501 if (bus_dmamap_create(sc->sc_dmat, DGE_RXMEM, 1, DGE_RXMEM, 0,
502 BUS_DMA_NOWAIT, &sc->sc_bugmap)) {
503 aprint_error_dev(sc->sc_dev, "can't create DMA map\n");
504 error = ENOBUFS;
505 goto out;
506 }
507
508 state = 3;
509 if (bus_dmamap_load(sc->sc_dmat, sc->sc_bugmap,
510 kva, DGE_RXMEM, NULL, BUS_DMA_NOWAIT)) {
511 aprint_error_dev(sc->sc_dev, "can't load DMA map\n");
512 error = ENOBUFS;
513 goto out;
514 }
515
516 state = 4;
517 sc->sc_bugbuf = (void *)kva;
518 SLIST_INIT(&sc->sc_buglist);
519
520 /*
521 * Now divide it up into DGE_BUFFER_SIZE pieces and save the addresses
522 * in an array.
523 */
524 entry = malloc(sizeof(*entry) * DGE_NBUFFERS, M_DEVBUF, M_WAITOK);
525 sc->sc_entry = entry;
526 for (i = 0; i < DGE_NBUFFERS; i++) {
527 entry[i].rb_slot = i;
528 SLIST_INSERT_HEAD(&sc->sc_buglist, &entry[i], rb_entry);
529 }
530 out:
531 if (error != 0) {
532 switch (state) {
533 case 4:
534 bus_dmamap_unload(sc->sc_dmat, sc->sc_bugmap);
535 /* FALLTHROUGH */
536 case 3:
537 bus_dmamap_destroy(sc->sc_dmat, sc->sc_bugmap);
538 /* FALLTHROUGH */
539 case 2:
540 bus_dmamem_unmap(sc->sc_dmat, kva, DGE_RXMEM);
541 /* FALLTHROUGH */
542 case 1:
543 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
544 break;
545 default:
546 break;
547 }
548 }
549
550 return error;
551 }
552
553 /*
554 * Allocate a jumbo buffer.
555 */
556 static void *
dge_getbuf(struct dge_softc * sc)557 dge_getbuf(struct dge_softc *sc)
558 {
559 struct rxbugentry *entry;
560
561 entry = SLIST_FIRST(&sc->sc_buglist);
562
563 if (entry == NULL) {
564 printf("%s: no free RX buffers\n", device_xname(sc->sc_dev));
565 return NULL;
566 }
567
568 SLIST_REMOVE_HEAD(&sc->sc_buglist, rb_entry);
569 return (char *)sc->sc_bugbuf + entry->rb_slot * DGE_BUFFER_SIZE;
570 }
571
572 /*
573 * Release a jumbo buffer.
574 */
575 static void
dge_freebuf(struct mbuf * m,void * buf,size_t size,void * arg)576 dge_freebuf(struct mbuf *m, void *buf, size_t size, void *arg)
577 {
578 struct rxbugentry *entry;
579 struct dge_softc *sc;
580 int i, s;
581
582 /* Extract the softc struct pointer. */
583 sc = (struct dge_softc *)arg;
584
585 if (sc == NULL)
586 panic("dge_freebuf: can't find softc pointer!");
587
588 /* calculate the slot this buffer belongs to */
589
590 i = ((char *)buf - (char *)sc->sc_bugbuf) / DGE_BUFFER_SIZE;
591
592 if ((i < 0) || (i >= DGE_NBUFFERS))
593 panic("dge_freebuf: asked to free buffer %d!", i);
594
595 s = splvm();
596 entry = sc->sc_entry + i;
597 SLIST_INSERT_HEAD(&sc->sc_buglist, entry, rb_entry);
598
599 if (__predict_true(m != NULL))
600 pool_cache_put(mb_cache, m);
601 splx(s);
602 }
603 #endif
604
605 static void dge_start(struct ifnet *);
606 static void dge_watchdog(struct ifnet *);
607 static int dge_ioctl(struct ifnet *, u_long, void *);
608 static int dge_init(struct ifnet *);
609 static void dge_stop(struct ifnet *, int);
610
611 static bool dge_shutdown(device_t, int);
612
613 static void dge_reset(struct dge_softc *);
614 static void dge_rxdrain(struct dge_softc *);
615 static int dge_add_rxbuf(struct dge_softc *, int);
616
617 static void dge_set_filter(struct dge_softc *);
618
619 static int dge_intr(void *);
620 static void dge_txintr(struct dge_softc *);
621 static void dge_rxintr(struct dge_softc *);
622 static void dge_linkintr(struct dge_softc *, uint32_t);
623
624 static int dge_match(device_t, cfdata_t, void *);
625 static void dge_attach(device_t, device_t, void *);
626
627 static int dge_read_eeprom(struct dge_softc *sc);
628 static int dge_eeprom_clockin(struct dge_softc *sc);
629 static void dge_eeprom_clockout(struct dge_softc *sc, int bit);
630 static uint16_t dge_eeprom_word(struct dge_softc *sc, int addr);
631 static int dge_xgmii_mediachange(struct ifnet *);
632 static void dge_xgmii_mediastatus(struct ifnet *, struct ifmediareq *);
633 static void dge_xgmii_reset(struct dge_softc *);
634 static void dge_xgmii_writereg(struct dge_softc *, int, int, int);
635
636
637 CFATTACH_DECL_NEW(dge, sizeof(struct dge_softc),
638 dge_match, dge_attach, NULL, NULL);
639
640 #ifdef DGE_EVENT_COUNTERS
641 #if DGE_NTXSEGS > 100
642 #error Update dge_txseg_evcnt_names
643 #endif
644 static char (*dge_txseg_evcnt_names)[DGE_NTXSEGS][8 /* "txseg00" + \0 */];
645 #endif /* DGE_EVENT_COUNTERS */
646
647 /*
648 * Devices supported by this driver.
649 */
650 struct dge_product {
651 const char *name;
652 int flags;
653 #define DGEP_F_10G_LR 0x01
654 #define DGEP_F_10G_SR 0x02
655 };
656
657 static const struct dge_product i82597EX_lr = {
658 .name = "Intel i82597EX 10GbE-LR Ethernet",
659 .flags = DGEP_F_10G_LR
660 };
661
662 static const struct dge_product i82597EX_sr = {
663 .name = "Intel i82597EX 10GbE-SR Ethernet",
664 .flags = DGEP_F_10G_SR
665 };
666
667 static const struct device_compatible_entry compat_data[] = {
668 { .id = PCI_ID_CODE(PCI_VENDOR_INTEL,
669 PCI_PRODUCT_INTEL_82597EX),
670 .data = &i82597EX_lr },
671
672 { .id = PCI_ID_CODE(PCI_VENDOR_INTEL,
673 PCI_PRODUCT_INTEL_82597EX_SR),
674 .data = &i82597EX_sr },
675
676 PCI_COMPAT_EOL
677 };
678
679 static int
dge_match(device_t parent,cfdata_t cf,void * aux)680 dge_match(device_t parent, cfdata_t cf, void *aux)
681 {
682 struct pci_attach_args *pa = aux;
683
684 return pci_compatible_match(pa, compat_data);
685 }
686
687 static void
dge_attach(device_t parent,device_t self,void * aux)688 dge_attach(device_t parent, device_t self, void *aux)
689 {
690 struct dge_softc *sc = device_private(self);
691 struct pci_attach_args *pa = aux;
692 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
693 pci_chipset_tag_t pc = pa->pa_pc;
694 pci_intr_handle_t ih;
695 const char *intrstr = NULL;
696 bus_dma_segment_t seg;
697 int i, rseg, error;
698 uint8_t enaddr[ETHER_ADDR_LEN];
699 pcireg_t preg, memtype;
700 uint32_t reg;
701 char intrbuf[PCI_INTRSTR_LEN];
702 const struct device_compatible_entry *dce;
703 const struct dge_product *dgep;
704
705 dce = pci_compatible_lookup(pa, compat_data);
706 KASSERT(dce != NULL);
707 sc->sc_dgep = dgep = dce->data;
708
709 sc->sc_dev = self;
710 sc->sc_pc = pa->pa_pc;
711 sc->sc_pt = pa->pa_tag;
712
713 if (pci_dma64_available(pa))
714 sc->sc_dmat = pa->pa_dmat64;
715 else
716 sc->sc_dmat = pa->pa_dmat;
717
718 pci_aprint_devinfo_fancy(pa, "Ethernet controller",
719 dgep->name, 1);
720
721 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, DGE_PCI_BAR);
722 if (pci_mapreg_map(pa, DGE_PCI_BAR, memtype, 0,
723 &sc->sc_st, &sc->sc_sh, NULL, NULL)) {
724 aprint_error_dev(sc->sc_dev,
725 "unable to map device registers\n");
726 return;
727 }
728
729 /* Enable bus mastering */
730 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
731 preg |= PCI_COMMAND_MASTER_ENABLE;
732 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
733
734 /*
735 * Map and establish our interrupt.
736 */
737 if (pci_intr_map(pa, &ih)) {
738 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
739 return;
740 }
741 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
742 sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_NET, dge_intr, sc,
743 device_xname(self));
744 if (sc->sc_ih == NULL) {
745 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
746 if (intrstr != NULL)
747 aprint_error(" at %s", intrstr);
748 aprint_error("\n");
749 return;
750 }
751 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
752
753 /*
754 * Determine a few things about the bus we're connected to.
755 */
756 reg = CSR_READ(sc, DGE_STATUS);
757 if (reg & STATUS_BUS64)
758 sc->sc_flags |= DGE_F_BUS64;
759
760 sc->sc_flags |= DGE_F_PCIX;
761 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
762 PCI_CAP_PCIX,
763 &sc->sc_pcix_offset, NULL) == 0)
764 aprint_error_dev(sc->sc_dev, "unable to find PCIX "
765 "capability\n");
766
767 if (sc->sc_flags & DGE_F_PCIX) {
768 switch (reg & STATUS_PCIX_MSK) {
769 case STATUS_PCIX_66:
770 sc->sc_bus_speed = 66;
771 break;
772 case STATUS_PCIX_100:
773 sc->sc_bus_speed = 100;
774 break;
775 case STATUS_PCIX_133:
776 sc->sc_bus_speed = 133;
777 break;
778 default:
779 aprint_error_dev(sc->sc_dev,
780 "unknown PCIXSPD %d; assuming 66MHz\n",
781 reg & STATUS_PCIX_MSK);
782 sc->sc_bus_speed = 66;
783 }
784 } else
785 sc->sc_bus_speed = (reg & STATUS_BUS64) ? 66 : 33;
786 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
787 (sc->sc_flags & DGE_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
788 (sc->sc_flags & DGE_F_PCIX) ? "PCIX" : "PCI");
789
790 /*
791 * Allocate the control data structures, and create and load the
792 * DMA map for it.
793 */
794 if ((error = bus_dmamem_alloc(sc->sc_dmat,
795 sizeof(struct dge_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
796 0)) != 0) {
797 aprint_error_dev(sc->sc_dev,
798 "unable to allocate control data, error = %d\n",
799 error);
800 goto fail_0;
801 }
802
803 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
804 sizeof(struct dge_control_data), (void **)&sc->sc_control_data,
805 0)) != 0) {
806 aprint_error_dev(sc->sc_dev,
807 "unable to map control data, error = %d\n", error);
808 goto fail_1;
809 }
810
811 if ((error = bus_dmamap_create(sc->sc_dmat,
812 sizeof(struct dge_control_data), 1,
813 sizeof(struct dge_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
814 aprint_error_dev(sc->sc_dev, "unable to create control data "
815 "DMA map, error = %d\n", error);
816 goto fail_2;
817 }
818
819 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
820 sc->sc_control_data, sizeof(struct dge_control_data), NULL,
821 0)) != 0) {
822 aprint_error_dev(sc->sc_dev,
823 "unable to load control data DMA map, error = %d\n",
824 error);
825 goto fail_3;
826 }
827
828 #ifdef DGE_OFFBYONE_RXBUG
829 if (dge_alloc_rcvmem(sc) != 0)
830 return; /* Already complained */
831 #endif
832 /*
833 * Create the transmit buffer DMA maps.
834 */
835 for (i = 0; i < DGE_TXQUEUELEN; i++) {
836 if ((error = bus_dmamap_create(sc->sc_dmat, DGE_MAX_MTU,
837 DGE_NTXSEGS, MCLBYTES, 0, 0,
838 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
839 aprint_error_dev(sc->sc_dev, "unable to create Tx DMA map %d, "
840 "error = %d\n", i, error);
841 goto fail_4;
842 }
843 }
844
845 /*
846 * Create the receive buffer DMA maps.
847 */
848 for (i = 0; i < DGE_NRXDESC; i++) {
849 #ifdef DGE_OFFBYONE_RXBUG
850 if ((error = bus_dmamap_create(sc->sc_dmat, DGE_BUFFER_SIZE, 1,
851 DGE_BUFFER_SIZE, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
852 #else
853 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
854 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
855 #endif
856 aprint_error_dev(sc->sc_dev, "unable to create Rx DMA "
857 "map %d, error = %d\n", i, error);
858 goto fail_5;
859 }
860 sc->sc_rxsoft[i].rxs_mbuf = NULL;
861 }
862
863 /*
864 * Set bits in ctrl0 register.
865 * Should get the software defined pins out of EEPROM?
866 */
867 sc->sc_ctrl0 |= CTRL0_RPE | CTRL0_TPE; /* XON/XOFF */
868 sc->sc_ctrl0 |= CTRL0_SDP3_DIR | CTRL0_SDP2_DIR | CTRL0_SDP1_DIR |
869 CTRL0_SDP0_DIR | CTRL0_SDP3 | CTRL0_SDP2 | CTRL0_SDP0;
870
871 /*
872 * Reset the chip to a known state.
873 */
874 dge_reset(sc);
875
876 /*
877 * Reset the PHY.
878 */
879 dge_xgmii_reset(sc);
880
881 /*
882 * Read in EEPROM data.
883 */
884 if (dge_read_eeprom(sc)) {
885 aprint_error_dev(sc->sc_dev, "couldn't read EEPROM\n");
886 return;
887 }
888
889 /*
890 * Get the ethernet address.
891 */
892 enaddr[0] = sc->sc_eeprom[EE_ADDR01] & 0377;
893 enaddr[1] = sc->sc_eeprom[EE_ADDR01] >> 8;
894 enaddr[2] = sc->sc_eeprom[EE_ADDR23] & 0377;
895 enaddr[3] = sc->sc_eeprom[EE_ADDR23] >> 8;
896 enaddr[4] = sc->sc_eeprom[EE_ADDR45] & 0377;
897 enaddr[5] = sc->sc_eeprom[EE_ADDR45] >> 8;
898
899 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
900 ether_sprintf(enaddr));
901
902 /*
903 * Setup media stuff.
904 */
905 sc->sc_ethercom.ec_ifmedia = &sc->sc_media;
906 ifmedia_init(&sc->sc_media, IFM_IMASK, dge_xgmii_mediachange,
907 dge_xgmii_mediastatus);
908 if (dgep->flags & DGEP_F_10G_SR) {
909 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10G_SR, 0, NULL);
910 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_10G_SR);
911 } else { /* XXX default is LR */
912 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10G_LR, 0, NULL);
913 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_10G_LR);
914 }
915
916 ifp = &sc->sc_ethercom.ec_if;
917 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
918 ifp->if_softc = sc;
919 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
920 ifp->if_ioctl = dge_ioctl;
921 ifp->if_start = dge_start;
922 ifp->if_watchdog = dge_watchdog;
923 ifp->if_init = dge_init;
924 ifp->if_stop = dge_stop;
925 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(DGE_IFQUEUELEN, IFQ_MAXLEN));
926 IFQ_SET_READY(&ifp->if_snd);
927
928 sc->sc_ethercom.ec_capabilities |=
929 ETHERCAP_JUMBO_MTU | ETHERCAP_VLAN_MTU;
930
931 /*
932 * We can perform TCPv4 and UDPv4 checksums in-bound.
933 */
934 ifp->if_capabilities |=
935 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
936 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
937 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
938
939 /*
940 * Attach the interface.
941 */
942 if_attach(ifp);
943 if_deferred_start_init(ifp, NULL);
944 ether_ifattach(ifp, enaddr);
945 rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
946 RND_TYPE_NET, RND_FLAG_DEFAULT);
947
948 #ifdef DGE_EVENT_COUNTERS
949 /* Fix segment event naming */
950 if (dge_txseg_evcnt_names == NULL) {
951 dge_txseg_evcnt_names =
952 malloc(sizeof(*dge_txseg_evcnt_names), M_DEVBUF, M_WAITOK);
953 for (i = 0; i < DGE_NTXSEGS; i++)
954 snprintf((*dge_txseg_evcnt_names)[i],
955 sizeof((*dge_txseg_evcnt_names)[i]), "txseg%d", i);
956 }
957
958 /* Attach event counters. */
959 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
960 NULL, device_xname(sc->sc_dev), "txsstall");
961 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
962 NULL, device_xname(sc->sc_dev), "txdstall");
963 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
964 NULL, device_xname(sc->sc_dev), "txforceintr");
965 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
966 NULL, device_xname(sc->sc_dev), "txdw");
967 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
968 NULL, device_xname(sc->sc_dev), "txqe");
969 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
970 NULL, device_xname(sc->sc_dev), "rxintr");
971 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
972 NULL, device_xname(sc->sc_dev), "linkintr");
973
974 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
975 NULL, device_xname(sc->sc_dev), "rxipsum");
976 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
977 NULL, device_xname(sc->sc_dev), "rxtusum");
978 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
979 NULL, device_xname(sc->sc_dev), "txipsum");
980 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
981 NULL, device_xname(sc->sc_dev), "txtusum");
982
983 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
984 NULL, device_xname(sc->sc_dev), "txctx init");
985 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
986 NULL, device_xname(sc->sc_dev), "txctx hit");
987 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
988 NULL, device_xname(sc->sc_dev), "txctx miss");
989
990 for (i = 0; i < DGE_NTXSEGS; i++)
991 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
992 NULL, device_xname(sc->sc_dev), (*dge_txseg_evcnt_names)[i]);
993
994 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
995 NULL, device_xname(sc->sc_dev), "txdrop");
996
997 #endif /* DGE_EVENT_COUNTERS */
998
999 /*
1000 * Make sure the interface is shutdown during reboot.
1001 */
1002 if (pmf_device_register1(self, NULL, NULL, dge_shutdown))
1003 pmf_class_network_register(self, ifp);
1004 else
1005 aprint_error_dev(self, "couldn't establish power handler\n");
1006
1007 return;
1008
1009 /*
1010 * Free any resources we've allocated during the failed attach
1011 * attempt. Do this in reverse order and fall through.
1012 */
1013 fail_5:
1014 for (i = 0; i < DGE_NRXDESC; i++) {
1015 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1016 bus_dmamap_destroy(sc->sc_dmat,
1017 sc->sc_rxsoft[i].rxs_dmamap);
1018 }
1019 fail_4:
1020 for (i = 0; i < DGE_TXQUEUELEN; i++) {
1021 if (sc->sc_txsoft[i].txs_dmamap != NULL)
1022 bus_dmamap_destroy(sc->sc_dmat,
1023 sc->sc_txsoft[i].txs_dmamap);
1024 }
1025 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1026 fail_3:
1027 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1028 fail_2:
1029 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
1030 sizeof(struct dge_control_data));
1031 fail_1:
1032 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1033 fail_0:
1034 return;
1035 }
1036
1037 /*
1038 * dge_shutdown:
1039 *
1040 * Make sure the interface is stopped at reboot time.
1041 */
1042 static bool
1043 dge_shutdown(device_t self, int howto)
1044 {
1045 struct dge_softc *sc;
1046
1047 sc = device_private(self);
1048 dge_stop(&sc->sc_ethercom.ec_if, 1);
1049
1050 return true;
1051 }
1052
1053 /*
1054 * dge_tx_cksum:
1055 *
1056 * Set up TCP/IP checksumming parameters for the
1057 * specified packet.
1058 */
1059 static int
1060 dge_tx_cksum(struct dge_softc *sc, struct dge_txsoft *txs, uint8_t *fieldsp)
1061 {
1062 struct mbuf *m0 = txs->txs_mbuf;
1063 struct dge_ctdes *t;
1064 uint32_t ipcs, tucs;
1065 struct ether_header *eh;
1066 int offset, iphl;
1067 uint8_t fields = 0;
1068
1069 /*
1070 * XXX It would be nice if the mbuf pkthdr had offset
1071 * fields for the protocol headers.
1072 */
1073
1074 eh = mtod(m0, struct ether_header *);
1075 switch (htons(eh->ether_type)) {
1076 case ETHERTYPE_IP:
1077 offset = ETHER_HDR_LEN;
1078 break;
1079
1080 case ETHERTYPE_VLAN:
1081 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1082 break;
1083
1084 default:
1085 /*
1086 * Don't support this protocol or encapsulation.
1087 */
1088 *fieldsp = 0;
1089 return 0;
1090 }
1091
1092 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
1093
1094 /*
1095 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1096 * offload feature, if we load the context descriptor, we
1097 * MUST provide valid values for IPCSS and TUCSS fields.
1098 */
1099
1100 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1101 DGE_EVCNT_INCR(&sc->sc_ev_txipsum);
1102 fields |= TDESC_POPTS_IXSM;
1103 ipcs = DGE_TCPIP_IPCSS(offset) |
1104 DGE_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1105 DGE_TCPIP_IPCSE(offset + iphl - 1);
1106 } else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
1107 /* Use the cached value. */
1108 ipcs = sc->sc_txctx_ipcs;
1109 } else {
1110 /* Just initialize it to the likely value anyway. */
1111 ipcs = DGE_TCPIP_IPCSS(offset) |
1112 DGE_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1113 DGE_TCPIP_IPCSE(offset + iphl - 1);
1114 }
1115 DPRINTF(DGE_DEBUG_CKSUM,
1116 ("%s: CKSUM: offset %d ipcs 0x%x\n",
1117 device_xname(sc->sc_dev), offset, ipcs));
1118
1119 offset += iphl;
1120
1121 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
1122 DGE_EVCNT_INCR(&sc->sc_ev_txtusum);
1123 fields |= TDESC_POPTS_TXSM;
1124 tucs = DGE_TCPIP_TUCSS(offset) |
1125 DGE_TCPIP_TUCSO(offset + M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
1126 DGE_TCPIP_TUCSE(0) /* rest of packet */;
1127 } else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
1128 /* Use the cached value. */
1129 tucs = sc->sc_txctx_tucs;
1130 } else {
1131 /* Just initialize it to a valid TCP context. */
1132 tucs = DGE_TCPIP_TUCSS(offset) |
1133 DGE_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1134 DGE_TCPIP_TUCSE(0) /* rest of packet */;
1135 }
1136
1137 DPRINTF(DGE_DEBUG_CKSUM,
1138 ("%s: CKSUM: offset %d tucs 0x%x\n",
1139 device_xname(sc->sc_dev), offset, tucs));
1140
1141 if (sc->sc_txctx_ipcs == ipcs &&
1142 sc->sc_txctx_tucs == tucs) {
1143 /* Cached context is fine. */
1144 DGE_EVCNT_INCR(&sc->sc_ev_txctx_hit);
1145 } else {
1146 /* Fill in the context descriptor. */
1147 #ifdef DGE_EVENT_COUNTERS
1148 if (sc->sc_txctx_ipcs == 0xffffffff &&
1149 sc->sc_txctx_tucs == 0xffffffff)
1150 DGE_EVCNT_INCR(&sc->sc_ev_txctx_init);
1151 else
1152 DGE_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1153 #endif
1154 t = (struct dge_ctdes *)&sc->sc_txdescs[sc->sc_txnext];
1155 t->dc_tcpip_ipcs = htole32(ipcs);
1156 t->dc_tcpip_tucs = htole32(tucs);
1157 t->dc_tcpip_cmdlen = htole32(TDESC_DTYP_CTD);
1158 t->dc_tcpip_seg = 0;
1159 DGE_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1160
1161 sc->sc_txctx_ipcs = ipcs;
1162 sc->sc_txctx_tucs = tucs;
1163
1164 sc->sc_txnext = DGE_NEXTTX(sc->sc_txnext);
1165 txs->txs_ndesc++;
1166 }
1167
1168 *fieldsp = fields;
1169
1170 return 0;
1171 }
1172
1173 /*
1174 * dge_start: [ifnet interface function]
1175 *
1176 * Start packet transmission on the interface.
1177 */
1178 static void
1179 dge_start(struct ifnet *ifp)
1180 {
1181 struct dge_softc *sc = ifp->if_softc;
1182 struct mbuf *m0;
1183 struct dge_txsoft *txs;
1184 bus_dmamap_t dmamap;
1185 int error, nexttx, lasttx = -1, ofree, seg;
1186 uint32_t cksumcmd;
1187 uint8_t cksumfields;
1188
1189 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1190 return;
1191
1192 /*
1193 * Remember the previous number of free descriptors.
1194 */
1195 ofree = sc->sc_txfree;
1196
1197 /*
1198 * Loop through the send queue, setting up transmit descriptors
1199 * until we drain the queue, or use up all available transmit
1200 * descriptors.
1201 */
1202 for (;;) {
1203 /* Grab a packet off the queue. */
1204 IFQ_POLL(&ifp->if_snd, m0);
1205 if (m0 == NULL)
1206 break;
1207
1208 DPRINTF(DGE_DEBUG_TX,
1209 ("%s: TX: have packet to transmit: %p\n",
1210 device_xname(sc->sc_dev), m0));
1211
1212 /* Get a work queue entry. */
1213 if (sc->sc_txsfree < DGE_TXQUEUE_GC) {
1214 dge_txintr(sc);
1215 if (sc->sc_txsfree == 0) {
1216 DPRINTF(DGE_DEBUG_TX,
1217 ("%s: TX: no free job descriptors\n",
1218 device_xname(sc->sc_dev)));
1219 DGE_EVCNT_INCR(&sc->sc_ev_txsstall);
1220 break;
1221 }
1222 }
1223
1224 txs = &sc->sc_txsoft[sc->sc_txsnext];
1225 dmamap = txs->txs_dmamap;
1226
1227 /*
1228 * Load the DMA map. If this fails, the packet either
1229 * didn't fit in the allotted number of segments, or we
1230 * were short on resources. For the too-many-segments
1231 * case, we simply report an error and drop the packet,
1232 * since we can't sanely copy a jumbo packet to a single
1233 * buffer.
1234 */
1235 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1236 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1237 if (error) {
1238 if (error == EFBIG) {
1239 DGE_EVCNT_INCR(&sc->sc_ev_txdrop);
1240 printf("%s: Tx packet consumes too many "
1241 "DMA segments, dropping...\n",
1242 device_xname(sc->sc_dev));
1243 IFQ_DEQUEUE(&ifp->if_snd, m0);
1244 m_freem(m0);
1245 continue;
1246 }
1247 /*
1248 * Short on resources, just stop for now.
1249 */
1250 DPRINTF(DGE_DEBUG_TX,
1251 ("%s: TX: dmamap load failed: %d\n",
1252 device_xname(sc->sc_dev), error));
1253 break;
1254 }
1255
1256 /*
1257 * Ensure we have enough descriptors free to describe
1258 * the packet. Note, we always reserve one descriptor
1259 * at the end of the ring due to the semantics of the
1260 * TDT register, plus one more in the event we need
1261 * to re-load checksum offload context.
1262 */
1263 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1264 /*
1265 * Not enough free descriptors to transmit this
1266 * packet. We haven't committed anything yet,
1267 * so just unload the DMA map, put the packet
1268 * pack on the queue, and punt. Notify the upper
1269 * layer that there are no more slots left.
1270 */
1271 DPRINTF(DGE_DEBUG_TX,
1272 ("%s: TX: need %d descriptors, have %d\n",
1273 device_xname(sc->sc_dev), dmamap->dm_nsegs,
1274 sc->sc_txfree - 1));
1275 ifp->if_flags |= IFF_OACTIVE;
1276 bus_dmamap_unload(sc->sc_dmat, dmamap);
1277 DGE_EVCNT_INCR(&sc->sc_ev_txdstall);
1278 break;
1279 }
1280
1281 IFQ_DEQUEUE(&ifp->if_snd, m0);
1282
1283 /*
1284 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1285 */
1286
1287 /* Sync the DMA map. */
1288 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1289 BUS_DMASYNC_PREWRITE);
1290
1291 DPRINTF(DGE_DEBUG_TX,
1292 ("%s: TX: packet has %d DMA segments\n",
1293 device_xname(sc->sc_dev), dmamap->dm_nsegs));
1294
1295 DGE_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1296
1297 /*
1298 * Store a pointer to the packet so that we can free it
1299 * later.
1300 *
1301 * Initially, we consider the number of descriptors the
1302 * packet uses the number of DMA segments. This may be
1303 * incremented by 1 if we do checksum offload (a descriptor
1304 * is used to set the checksum context).
1305 */
1306 txs->txs_mbuf = m0;
1307 txs->txs_firstdesc = sc->sc_txnext;
1308 txs->txs_ndesc = dmamap->dm_nsegs;
1309
1310 /*
1311 * Set up checksum offload parameters for
1312 * this packet.
1313 */
1314 if (m0->m_pkthdr.csum_flags &
1315 (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
1316 if (dge_tx_cksum(sc, txs, &cksumfields) != 0) {
1317 /* Error message already displayed. */
1318 bus_dmamap_unload(sc->sc_dmat, dmamap);
1319 continue;
1320 }
1321 } else {
1322 cksumfields = 0;
1323 }
1324
1325 cksumcmd = TDESC_DCMD_IDE | TDESC_DTYP_DATA;
1326
1327 /*
1328 * Initialize the transmit descriptor.
1329 */
1330 for (nexttx = sc->sc_txnext, seg = 0;
1331 seg < dmamap->dm_nsegs;
1332 seg++, nexttx = DGE_NEXTTX(nexttx)) {
1333 sc->sc_txdescs[nexttx].dt_baddrh =
1334 htole32(((uint64_t)dmamap->dm_segs[seg].ds_addr) >> 32);
1335 sc->sc_txdescs[nexttx].dt_baddrl =
1336 htole32(dmamap->dm_segs[seg].ds_addr);
1337 sc->sc_txdescs[nexttx].dt_ctl =
1338 htole32(cksumcmd | dmamap->dm_segs[seg].ds_len);
1339 sc->sc_txdescs[nexttx].dt_status = 0;
1340 sc->sc_txdescs[nexttx].dt_popts = cksumfields;
1341 sc->sc_txdescs[nexttx].dt_vlan = 0;
1342 lasttx = nexttx;
1343
1344 DPRINTF(DGE_DEBUG_TX,
1345 ("%s: TX: desc %d: high 0x%08lx, low 0x%08lx, len 0x%04lx\n",
1346 device_xname(sc->sc_dev), nexttx,
1347 (unsigned long)(((uint64_t)dmamap->dm_segs[seg].ds_addr) >> 32),
1348 (unsigned long)((uint32_t)dmamap->dm_segs[seg].ds_addr),
1349 (unsigned long)dmamap->dm_segs[seg].ds_len));
1350 }
1351
1352 KASSERT(lasttx != -1);
1353
1354 /*
1355 * Set up the command byte on the last descriptor of
1356 * the packet. If we're in the interrupt delay window,
1357 * delay the interrupt.
1358 */
1359 sc->sc_txdescs[lasttx].dt_ctl |=
1360 htole32(TDESC_DCMD_EOP | TDESC_DCMD_RS);
1361
1362 txs->txs_lastdesc = lasttx;
1363
1364 DPRINTF(DGE_DEBUG_TX,
1365 ("%s: TX: desc %d: cmdlen 0x%08x\n", device_xname(sc->sc_dev),
1366 lasttx, le32toh(sc->sc_txdescs[lasttx].dt_ctl)));
1367
1368 /* Sync the descriptors we're using. */
1369 DGE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1370 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1371
1372 /* Give the packet to the chip. */
1373 CSR_WRITE(sc, DGE_TDT, nexttx);
1374
1375 DPRINTF(DGE_DEBUG_TX,
1376 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
1377
1378 DPRINTF(DGE_DEBUG_TX,
1379 ("%s: TX: finished transmitting packet, job %d\n",
1380 device_xname(sc->sc_dev), sc->sc_txsnext));
1381
1382 /* Advance the tx pointer. */
1383 sc->sc_txfree -= txs->txs_ndesc;
1384 sc->sc_txnext = nexttx;
1385
1386 sc->sc_txsfree--;
1387 sc->sc_txsnext = DGE_NEXTTXS(sc->sc_txsnext);
1388
1389 /* Pass the packet to any BPF listeners. */
1390 bpf_mtap(ifp, m0, BPF_D_OUT);
1391 }
1392
1393 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1394 /* No more slots; notify upper layer. */
1395 ifp->if_flags |= IFF_OACTIVE;
1396 }
1397
1398 if (sc->sc_txfree != ofree) {
1399 /* Set a watchdog timer in case the chip flakes out. */
1400 ifp->if_timer = 5;
1401 }
1402 }
1403
1404 /*
1405 * dge_watchdog: [ifnet interface function]
1406 *
1407 * Watchdog timer handler.
1408 */
1409 static void
1410 dge_watchdog(struct ifnet *ifp)
1411 {
1412 struct dge_softc *sc = ifp->if_softc;
1413
1414 /*
1415 * Since we're using delayed interrupts, sweep up
1416 * before we report an error.
1417 */
1418 dge_txintr(sc);
1419
1420 if (sc->sc_txfree != DGE_NTXDESC) {
1421 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1422 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
1423 sc->sc_txnext);
1424 if_statinc(ifp, if_oerrors);
1425
1426 /* Reset the interface. */
1427 (void) dge_init(ifp);
1428 }
1429
1430 /* Try to get more packets going. */
1431 dge_start(ifp);
1432 }
1433
1434 /*
1435 * dge_ioctl: [ifnet interface function]
1436 *
1437 * Handle control requests from the operator.
1438 */
1439 static int
1440 dge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1441 {
1442 struct dge_softc *sc = ifp->if_softc;
1443 struct ifreq *ifr = (struct ifreq *) data;
1444 pcireg_t preg;
1445 int s, error, mmrbc;
1446
1447 s = splnet();
1448
1449 switch (cmd) {
1450 case SIOCSIFMTU:
1451 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > DGE_MAX_MTU)
1452 error = EINVAL;
1453 else if ((error = ifioctl_common(ifp, cmd, data)) != ENETRESET)
1454 break;
1455 else if (ifp->if_flags & IFF_UP)
1456 error = if_init(ifp);
1457 else
1458 error = 0;
1459 break;
1460
1461 case SIOCSIFFLAGS:
1462 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
1463 break;
1464 /* extract link flags */
1465 if ((ifp->if_flags & IFF_LINK0) == 0 &&
1466 (ifp->if_flags & IFF_LINK1) == 0)
1467 mmrbc = PCIX_MMRBC_512;
1468 else if ((ifp->if_flags & IFF_LINK0) == 0 &&
1469 (ifp->if_flags & IFF_LINK1) != 0)
1470 mmrbc = PCIX_MMRBC_1024;
1471 else if ((ifp->if_flags & IFF_LINK0) != 0 &&
1472 (ifp->if_flags & IFF_LINK1) == 0)
1473 mmrbc = PCIX_MMRBC_2048;
1474 else
1475 mmrbc = PCIX_MMRBC_4096;
1476 if (mmrbc != sc->sc_mmrbc) {
1477 preg = pci_conf_read(sc->sc_pc, sc->sc_pt,DGE_PCIX_CMD);
1478 preg &= ~PCIX_MMRBC_MSK;
1479 preg |= mmrbc;
1480 pci_conf_write(sc->sc_pc, sc->sc_pt,DGE_PCIX_CMD, preg);
1481 sc->sc_mmrbc = mmrbc;
1482 }
1483 /* FALLTHROUGH */
1484 default:
1485 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
1486 break;
1487
1488 error = 0;
1489
1490 if (cmd == SIOCSIFCAP)
1491 error = if_init(ifp);
1492 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
1493 ;
1494 else if (ifp->if_flags & IFF_RUNNING) {
1495 /*
1496 * Multicast list has changed; set the hardware filter
1497 * accordingly.
1498 */
1499 dge_set_filter(sc);
1500 }
1501 break;
1502 }
1503
1504 /* Try to get more packets going. */
1505 dge_start(ifp);
1506
1507 splx(s);
1508 return error;
1509 }
1510
1511 /*
1512 * dge_intr:
1513 *
1514 * Interrupt service routine.
1515 */
1516 static int
1517 dge_intr(void *arg)
1518 {
1519 struct dge_softc *sc = arg;
1520 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1521 uint32_t icr;
1522 int wantinit, handled = 0;
1523
1524 for (wantinit = 0; wantinit == 0;) {
1525 icr = CSR_READ(sc, DGE_ICR);
1526 if ((icr & sc->sc_icr) == 0)
1527 break;
1528
1529 rnd_add_uint32(&sc->rnd_source, icr);
1530
1531 handled = 1;
1532
1533 #if defined(DGE_DEBUG) || defined(DGE_EVENT_COUNTERS)
1534 if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
1535 DPRINTF(DGE_DEBUG_RX,
1536 ("%s: RX: got Rx intr 0x%08x\n",
1537 device_xname(sc->sc_dev),
1538 icr & (ICR_RXDMT0 | ICR_RXT0)));
1539 DGE_EVCNT_INCR(&sc->sc_ev_rxintr);
1540 }
1541 #endif
1542 dge_rxintr(sc);
1543
1544 #if defined(DGE_DEBUG) || defined(DGE_EVENT_COUNTERS)
1545 if (icr & ICR_TXDW) {
1546 DPRINTF(DGE_DEBUG_TX,
1547 ("%s: TX: got TXDW interrupt\n",
1548 device_xname(sc->sc_dev)));
1549 DGE_EVCNT_INCR(&sc->sc_ev_txdw);
1550 }
1551 if (icr & ICR_TXQE)
1552 DGE_EVCNT_INCR(&sc->sc_ev_txqe);
1553 #endif
1554 dge_txintr(sc);
1555
1556 if (icr & (ICR_LSC | ICR_RXSEQ)) {
1557 DGE_EVCNT_INCR(&sc->sc_ev_linkintr);
1558 dge_linkintr(sc, icr);
1559 }
1560
1561 if (icr & ICR_RXO) {
1562 printf("%s: Receive overrun\n",
1563 device_xname(sc->sc_dev));
1564 wantinit = 1;
1565 }
1566 }
1567
1568 if (handled) {
1569 if (wantinit)
1570 dge_init(ifp);
1571
1572 /* Try to get more packets going. */
1573 if_schedule_deferred_start(ifp);
1574 }
1575
1576 return handled;
1577 }
1578
1579 /*
1580 * dge_txintr:
1581 *
1582 * Helper; handle transmit interrupts.
1583 */
1584 static void
1585 dge_txintr(struct dge_softc *sc)
1586 {
1587 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1588 struct dge_txsoft *txs;
1589 uint8_t status;
1590 int i;
1591
1592 ifp->if_flags &= ~IFF_OACTIVE;
1593
1594 /*
1595 * Go through the Tx list and free mbufs for those
1596 * frames which have been transmitted.
1597 */
1598 for (i = sc->sc_txsdirty; sc->sc_txsfree != DGE_TXQUEUELEN;
1599 i = DGE_NEXTTXS(i), sc->sc_txsfree++) {
1600 txs = &sc->sc_txsoft[i];
1601
1602 DPRINTF(DGE_DEBUG_TX,
1603 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
1604
1605 DGE_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1606 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1607
1608 status =
1609 sc->sc_txdescs[txs->txs_lastdesc].dt_status;
1610 if ((status & TDESC_STA_DD) == 0) {
1611 DGE_CDTXSYNC(sc, txs->txs_lastdesc, 1,
1612 BUS_DMASYNC_PREREAD);
1613 break;
1614 }
1615
1616 DPRINTF(DGE_DEBUG_TX,
1617 ("%s: TX: job %d done: descs %d..%d\n",
1618 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
1619 txs->txs_lastdesc));
1620
1621 if_statinc(ifp, if_opackets);
1622 sc->sc_txfree += txs->txs_ndesc;
1623 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1624 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1625 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1626 m_freem(txs->txs_mbuf);
1627 txs->txs_mbuf = NULL;
1628 }
1629
1630 /* Update the dirty transmit buffer pointer. */
1631 sc->sc_txsdirty = i;
1632 DPRINTF(DGE_DEBUG_TX,
1633 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
1634
1635 /*
1636 * If there are no more pending transmissions, cancel the watchdog
1637 * timer.
1638 */
1639 if (sc->sc_txsfree == DGE_TXQUEUELEN)
1640 ifp->if_timer = 0;
1641 }
1642
1643 /*
1644 * dge_rxintr:
1645 *
1646 * Helper; handle receive interrupts.
1647 */
1648 static void
1649 dge_rxintr(struct dge_softc *sc)
1650 {
1651 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1652 struct dge_rxsoft *rxs;
1653 struct mbuf *m;
1654 int i, len;
1655 uint8_t status, errors;
1656
1657 for (i = sc->sc_rxptr;; i = DGE_NEXTRX(i)) {
1658 rxs = &sc->sc_rxsoft[i];
1659
1660 DPRINTF(DGE_DEBUG_RX,
1661 ("%s: RX: checking descriptor %d\n",
1662 device_xname(sc->sc_dev), i));
1663
1664 DGE_CDRXSYNC(sc, i,
1665 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1666
1667 status = sc->sc_rxdescs[i].dr_status;
1668 errors = sc->sc_rxdescs[i].dr_errors;
1669 len = le16toh(sc->sc_rxdescs[i].dr_len);
1670
1671 if ((status & RDESC_STS_DD) == 0) {
1672 /* We have processed all of the receive descriptors. */
1673 DGE_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
1674 break;
1675 }
1676
1677 if (__predict_false(sc->sc_rxdiscard)) {
1678 DPRINTF(DGE_DEBUG_RX,
1679 ("%s: RX: discarding contents of descriptor %d\n",
1680 device_xname(sc->sc_dev), i));
1681 DGE_INIT_RXDESC(sc, i);
1682 if (status & RDESC_STS_EOP) {
1683 /* Reset our state. */
1684 DPRINTF(DGE_DEBUG_RX,
1685 ("%s: RX: resetting rxdiscard -> 0\n",
1686 device_xname(sc->sc_dev)));
1687 sc->sc_rxdiscard = 0;
1688 }
1689 continue;
1690 }
1691
1692 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1693 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1694
1695 m = rxs->rxs_mbuf;
1696
1697 /*
1698 * Add a new receive buffer to the ring.
1699 */
1700 if (dge_add_rxbuf(sc, i) != 0) {
1701 /*
1702 * Failed, throw away what we've done so
1703 * far, and discard the rest of the packet.
1704 */
1705 if_statinc(ifp, if_ierrors);
1706 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1707 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1708 DGE_INIT_RXDESC(sc, i);
1709 if ((status & RDESC_STS_EOP) == 0)
1710 sc->sc_rxdiscard = 1;
1711 if (sc->sc_rxhead != NULL)
1712 m_freem(sc->sc_rxhead);
1713 DGE_RXCHAIN_RESET(sc);
1714 DPRINTF(DGE_DEBUG_RX,
1715 ("%s: RX: Rx buffer allocation failed, "
1716 "dropping packet%s\n", device_xname(sc->sc_dev),
1717 sc->sc_rxdiscard ? " (discard)" : ""));
1718 continue;
1719 }
1720 DGE_INIT_RXDESC(sc, DGE_PREVRX(i)); /* Write the descriptor */
1721
1722 DGE_RXCHAIN_LINK(sc, m);
1723
1724 m->m_len = len;
1725
1726 DPRINTF(DGE_DEBUG_RX,
1727 ("%s: RX: buffer at %p len %d\n",
1728 device_xname(sc->sc_dev), m->m_data, len));
1729
1730 /*
1731 * If this is not the end of the packet, keep
1732 * looking.
1733 */
1734 if ((status & RDESC_STS_EOP) == 0) {
1735 sc->sc_rxlen += len;
1736 DPRINTF(DGE_DEBUG_RX,
1737 ("%s: RX: not yet EOP, rxlen -> %d\n",
1738 device_xname(sc->sc_dev), sc->sc_rxlen));
1739 continue;
1740 }
1741
1742 /*
1743 * Okay, we have the entire packet now...
1744 */
1745 *sc->sc_rxtailp = NULL;
1746 m = sc->sc_rxhead;
1747 len += sc->sc_rxlen;
1748
1749 DGE_RXCHAIN_RESET(sc);
1750
1751 DPRINTF(DGE_DEBUG_RX,
1752 ("%s: RX: have entire packet, len -> %d\n",
1753 device_xname(sc->sc_dev), len));
1754
1755 /*
1756 * If an error occurred, update stats and drop the packet.
1757 */
1758 if (errors & (RDESC_ERR_CE | RDESC_ERR_SE | RDESC_ERR_P |
1759 RDESC_ERR_RXE)) {
1760 if_statinc(ifp, if_ierrors);
1761 if (errors & RDESC_ERR_SE)
1762 printf("%s: symbol error\n",
1763 device_xname(sc->sc_dev));
1764 else if (errors & RDESC_ERR_P)
1765 printf("%s: parity error\n",
1766 device_xname(sc->sc_dev));
1767 else if (errors & RDESC_ERR_CE)
1768 printf("%s: CRC error\n",
1769 device_xname(sc->sc_dev));
1770 m_freem(m);
1771 continue;
1772 }
1773
1774 /*
1775 * No errors. Receive the packet.
1776 */
1777 m_set_rcvif(m, ifp);
1778 m->m_pkthdr.len = len;
1779
1780 /*
1781 * Set up checksum info for this packet.
1782 */
1783 if (status & RDESC_STS_IPCS) {
1784 DGE_EVCNT_INCR(&sc->sc_ev_rxipsum);
1785 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1786 if (errors & RDESC_ERR_IPE)
1787 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1788 }
1789 if (status & RDESC_STS_TCPCS) {
1790 /*
1791 * Note: we don't know if this was TCP or UDP,
1792 * so we just set both bits, and expect the
1793 * upper layers to deal.
1794 */
1795 DGE_EVCNT_INCR(&sc->sc_ev_rxtusum);
1796 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4 | M_CSUM_UDPv4;
1797 if (errors & RDESC_ERR_TCPE)
1798 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1799 }
1800
1801 /* Pass it on. */
1802 if_percpuq_enqueue(ifp->if_percpuq, m);
1803 }
1804
1805 /* Update the receive pointer. */
1806 sc->sc_rxptr = i;
1807
1808 DPRINTF(DGE_DEBUG_RX,
1809 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
1810 }
1811
1812 /*
1813 * dge_linkintr:
1814 *
1815 * Helper; handle link interrupts.
1816 */
1817 static void
1818 dge_linkintr(struct dge_softc *sc, uint32_t icr)
1819 {
1820 uint32_t status;
1821
1822 if (icr & ICR_LSC) {
1823 status = CSR_READ(sc, DGE_STATUS);
1824 if (status & STATUS_LINKUP) {
1825 DPRINTF(DGE_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
1826 device_xname(sc->sc_dev)));
1827 } else {
1828 DPRINTF(DGE_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
1829 device_xname(sc->sc_dev)));
1830 }
1831 } else if (icr & ICR_RXSEQ) {
1832 DPRINTF(DGE_DEBUG_LINK,
1833 ("%s: LINK: Receive sequence error\n",
1834 device_xname(sc->sc_dev)));
1835 }
1836 /* XXX - fix errata */
1837 }
1838
1839 /*
1840 * dge_reset:
1841 *
1842 * Reset the i82597 chip.
1843 */
1844 static void
1845 dge_reset(struct dge_softc *sc)
1846 {
1847 int i;
1848
1849 /*
1850 * Do a chip reset.
1851 */
1852 CSR_WRITE(sc, DGE_CTRL0, CTRL0_RST | sc->sc_ctrl0);
1853
1854 delay(10000);
1855
1856 for (i = 0; i < 1000; i++) {
1857 if ((CSR_READ(sc, DGE_CTRL0) & CTRL0_RST) == 0)
1858 break;
1859 delay(20);
1860 }
1861
1862 if (CSR_READ(sc, DGE_CTRL0) & CTRL0_RST)
1863 printf("%s: WARNING: reset failed to complete\n",
1864 device_xname(sc->sc_dev));
1865 /*
1866 * Reset the EEPROM logic.
1867 * This will cause the chip to reread its default values,
1868 * which doesn't happen otherwise (errata).
1869 */
1870 CSR_WRITE(sc, DGE_CTRL1, CTRL1_EE_RST);
1871 delay(10000);
1872 }
1873
1874 /*
1875 * dge_init: [ifnet interface function]
1876 *
1877 * Initialize the interface. Must be called at splnet().
1878 */
1879 static int
1880 dge_init(struct ifnet *ifp)
1881 {
1882 struct dge_softc *sc = ifp->if_softc;
1883 struct dge_rxsoft *rxs;
1884 int i, error = 0;
1885 uint32_t reg;
1886
1887 /*
1888 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGNMENT is set.
1889 * There is a small but measurable benefit to avoiding the adjusment
1890 * of the descriptor so that the headers are aligned, for normal mtu,
1891 * on such platforms. One possibility is that the DMA itself is
1892 * slightly more efficient if the front of the entire packet (instead
1893 * of the front of the headers) is aligned.
1894 *
1895 * Note we must always set align_tweak to 0 if we are using
1896 * jumbo frames.
1897 */
1898 #ifdef __NO_STRICT_ALIGNMENT
1899 sc->sc_align_tweak = 0;
1900 #else
1901 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
1902 sc->sc_align_tweak = 0;
1903 else
1904 sc->sc_align_tweak = 2;
1905 #endif /* __NO_STRICT_ALIGNMENT */
1906
1907 /* Cancel any pending I/O. */
1908 dge_stop(ifp, 0);
1909
1910 /* Reset the chip to a known state. */
1911 dge_reset(sc);
1912
1913 /* Initialize the transmit descriptor ring. */
1914 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1915 DGE_CDTXSYNC(sc, 0, DGE_NTXDESC,
1916 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1917 sc->sc_txfree = DGE_NTXDESC;
1918 sc->sc_txnext = 0;
1919
1920 sc->sc_txctx_ipcs = 0xffffffff;
1921 sc->sc_txctx_tucs = 0xffffffff;
1922
1923 CSR_WRITE(sc, DGE_TDBAH, ((uint64_t)DGE_CDTXADDR(sc, 0)) >> 32);
1924 CSR_WRITE(sc, DGE_TDBAL, DGE_CDTXADDR(sc, 0));
1925 CSR_WRITE(sc, DGE_TDLEN, sizeof(sc->sc_txdescs));
1926 CSR_WRITE(sc, DGE_TDH, 0);
1927 CSR_WRITE(sc, DGE_TDT, 0);
1928 CSR_WRITE(sc, DGE_TIDV, TIDV);
1929
1930 #if 0
1931 CSR_WRITE(sc, DGE_TXDCTL, TXDCTL_PTHRESH(0) |
1932 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
1933 #endif
1934 CSR_WRITE(sc, DGE_RXDCTL,
1935 RXDCTL_PTHRESH(RXDCTL_PTHRESH_VAL) |
1936 RXDCTL_HTHRESH(RXDCTL_HTHRESH_VAL) |
1937 RXDCTL_WTHRESH(RXDCTL_WTHRESH_VAL));
1938
1939 /* Initialize the transmit job descriptors. */
1940 for (i = 0; i < DGE_TXQUEUELEN; i++)
1941 sc->sc_txsoft[i].txs_mbuf = NULL;
1942 sc->sc_txsfree = DGE_TXQUEUELEN;
1943 sc->sc_txsnext = 0;
1944 sc->sc_txsdirty = 0;
1945
1946 /*
1947 * Initialize the receive descriptor and receive job
1948 * descriptor rings.
1949 */
1950 CSR_WRITE(sc, DGE_RDBAH, ((uint64_t)DGE_CDRXADDR(sc, 0)) >> 32);
1951 CSR_WRITE(sc, DGE_RDBAL, DGE_CDRXADDR(sc, 0));
1952 CSR_WRITE(sc, DGE_RDLEN, sizeof(sc->sc_rxdescs));
1953 CSR_WRITE(sc, DGE_RDH, DGE_RXSPACE);
1954 CSR_WRITE(sc, DGE_RDT, 0);
1955 CSR_WRITE(sc, DGE_RDTR, RDTR | 0x80000000);
1956 CSR_WRITE(sc, DGE_FCRTL, FCRTL | FCRTL_XONE);
1957 CSR_WRITE(sc, DGE_FCRTH, FCRTH);
1958
1959 for (i = 0; i < DGE_NRXDESC; i++) {
1960 rxs = &sc->sc_rxsoft[i];
1961 if (rxs->rxs_mbuf == NULL) {
1962 if ((error = dge_add_rxbuf(sc, i)) != 0) {
1963 printf("%s: unable to allocate or map rx "
1964 "buffer %d, error = %d\n",
1965 device_xname(sc->sc_dev), i, error);
1966 /*
1967 * XXX Should attempt to run with fewer receive
1968 * XXX buffers instead of just failing.
1969 */
1970 dge_rxdrain(sc);
1971 goto out;
1972 }
1973 }
1974 DGE_INIT_RXDESC(sc, i);
1975 }
1976 sc->sc_rxptr = DGE_RXSPACE;
1977 sc->sc_rxdiscard = 0;
1978 DGE_RXCHAIN_RESET(sc);
1979
1980 if (sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) {
1981 sc->sc_ctrl0 |= CTRL0_JFE;
1982 CSR_WRITE(sc, DGE_MFS, ETHER_MAX_LEN_JUMBO << 16);
1983 }
1984
1985 /* Write the control registers. */
1986 CSR_WRITE(sc, DGE_CTRL0, sc->sc_ctrl0);
1987
1988 /*
1989 * Set up checksum offload parameters.
1990 */
1991 reg = CSR_READ(sc, DGE_RXCSUM);
1992 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
1993 reg |= RXCSUM_IPOFL;
1994 else
1995 reg &= ~RXCSUM_IPOFL;
1996 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
1997 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
1998 else {
1999 reg &= ~RXCSUM_TUOFL;
2000 if ((ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) == 0)
2001 reg &= ~RXCSUM_IPOFL;
2002 }
2003 CSR_WRITE(sc, DGE_RXCSUM, reg);
2004
2005 /*
2006 * Set up the interrupt registers.
2007 */
2008 CSR_WRITE(sc, DGE_IMC, 0xffffffffU);
2009 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
2010 ICR_RXO | ICR_RXT0;
2011
2012 CSR_WRITE(sc, DGE_IMS, sc->sc_icr);
2013
2014 /*
2015 * Set up the transmit control register.
2016 */
2017 sc->sc_tctl = TCTL_TCE | TCTL_TPDE | TCTL_TXEN;
2018 CSR_WRITE(sc, DGE_TCTL, sc->sc_tctl);
2019
2020 /*
2021 * Set up the receive control register; we actually program
2022 * the register when we set the receive filter. Use multicast
2023 * address offset type 0.
2024 */
2025 sc->sc_mchash_type = 0;
2026
2027 sc->sc_rctl = RCTL_RXEN | RCTL_RDMTS_12 | RCTL_RPDA_MC |
2028 RCTL_CFF | RCTL_SECRC | RCTL_MO(sc->sc_mchash_type);
2029
2030 #ifdef DGE_OFFBYONE_RXBUG
2031 sc->sc_rctl |= RCTL_BSIZE_16k;
2032 #else
2033 switch (MCLBYTES) {
2034 case 2048:
2035 sc->sc_rctl |= RCTL_BSIZE_2k;
2036 break;
2037 case 4096:
2038 sc->sc_rctl |= RCTL_BSIZE_4k;
2039 break;
2040 case 8192:
2041 sc->sc_rctl |= RCTL_BSIZE_8k;
2042 break;
2043 case 16384:
2044 sc->sc_rctl |= RCTL_BSIZE_16k;
2045 break;
2046 default:
2047 panic("dge_init: MCLBYTES %d unsupported", MCLBYTES);
2048 }
2049 #endif
2050
2051 /* Set the receive filter. */
2052 /* Also sets RCTL */
2053 dge_set_filter(sc);
2054
2055 /* ...all done! */
2056 ifp->if_flags |= IFF_RUNNING;
2057 ifp->if_flags &= ~IFF_OACTIVE;
2058
2059 out:
2060 if (error)
2061 printf("%s: interface not running\n", device_xname(sc->sc_dev));
2062 return error;
2063 }
2064
2065 /*
2066 * dge_rxdrain:
2067 *
2068 * Drain the receive queue.
2069 */
2070 static void
2071 dge_rxdrain(struct dge_softc *sc)
2072 {
2073 struct dge_rxsoft *rxs;
2074 int i;
2075
2076 for (i = 0; i < DGE_NRXDESC; i++) {
2077 rxs = &sc->sc_rxsoft[i];
2078 if (rxs->rxs_mbuf != NULL) {
2079 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2080 m_freem(rxs->rxs_mbuf);
2081 rxs->rxs_mbuf = NULL;
2082 }
2083 }
2084 }
2085
2086 /*
2087 * dge_stop: [ifnet interface function]
2088 *
2089 * Stop transmission on the interface.
2090 */
2091 static void
2092 dge_stop(struct ifnet *ifp, int disable)
2093 {
2094 struct dge_softc *sc = ifp->if_softc;
2095 struct dge_txsoft *txs;
2096 int i;
2097
2098 /* Stop the transmit and receive processes. */
2099 CSR_WRITE(sc, DGE_TCTL, 0);
2100 CSR_WRITE(sc, DGE_RCTL, 0);
2101
2102 /* Release any queued transmit buffers. */
2103 for (i = 0; i < DGE_TXQUEUELEN; i++) {
2104 txs = &sc->sc_txsoft[i];
2105 if (txs->txs_mbuf != NULL) {
2106 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2107 m_freem(txs->txs_mbuf);
2108 txs->txs_mbuf = NULL;
2109 }
2110 }
2111
2112 /* Mark the interface as down and cancel the watchdog timer. */
2113 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2114 ifp->if_timer = 0;
2115
2116 if (disable)
2117 dge_rxdrain(sc);
2118 }
2119
2120 /*
2121 * dge_add_rxbuf:
2122 *
2123 * Add a receive buffer to the indiciated descriptor.
2124 */
2125 static int
2126 dge_add_rxbuf(struct dge_softc *sc, int idx)
2127 {
2128 struct dge_rxsoft *rxs = &sc->sc_rxsoft[idx];
2129 struct mbuf *m;
2130 int error;
2131 #ifdef DGE_OFFBYONE_RXBUG
2132 void *buf;
2133 #endif
2134
2135 MGETHDR(m, M_DONTWAIT, MT_DATA);
2136 if (m == NULL)
2137 return ENOBUFS;
2138
2139 #ifdef DGE_OFFBYONE_RXBUG
2140 if ((buf = dge_getbuf(sc)) == NULL)
2141 return ENOBUFS;
2142
2143 m->m_len = m->m_pkthdr.len = DGE_BUFFER_SIZE;
2144 MEXTADD(m, buf, DGE_BUFFER_SIZE, M_DEVBUF, dge_freebuf, sc);
2145 m->m_flags |= M_EXT_RW;
2146
2147 if (rxs->rxs_mbuf != NULL)
2148 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2149 rxs->rxs_mbuf = m;
2150
2151 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, buf,
2152 DGE_BUFFER_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
2153 #else
2154 MCLGET(m, M_DONTWAIT);
2155 if ((m->m_flags & M_EXT) == 0) {
2156 m_freem(m);
2157 return ENOBUFS;
2158 }
2159
2160 if (rxs->rxs_mbuf != NULL)
2161 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2162
2163 rxs->rxs_mbuf = m;
2164
2165 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2166 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
2167 BUS_DMA_READ | BUS_DMA_NOWAIT);
2168 #endif
2169 if (error) {
2170 printf("%s: unable to load rx DMA map %d, error = %d\n",
2171 device_xname(sc->sc_dev), idx, error);
2172 panic("dge_add_rxbuf"); /* XXX XXX XXX */
2173 }
2174 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2175 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2176
2177 return 0;
2178 }
2179
2180 /*
2181 * dge_set_ral:
2182 *
2183 * Set an entry in the receive address list.
2184 */
2185 static void
2186 dge_set_ral(struct dge_softc *sc, const uint8_t *enaddr, int idx)
2187 {
2188 uint32_t ral_lo, ral_hi;
2189
2190 if (enaddr != NULL) {
2191 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2192 (enaddr[3] << 24);
2193 ral_hi = enaddr[4] | (enaddr[5] << 8);
2194 ral_hi |= RAH_AV;
2195 } else {
2196 ral_lo = 0;
2197 ral_hi = 0;
2198 }
2199 CSR_WRITE(sc, RA_ADDR(DGE_RAL, idx), ral_lo);
2200 CSR_WRITE(sc, RA_ADDR(DGE_RAH, idx), ral_hi);
2201 }
2202
2203 /*
2204 * dge_mchash:
2205 *
2206 * Compute the hash of the multicast address for the 4096-bit
2207 * multicast filter.
2208 */
2209 static uint32_t
2210 dge_mchash(struct dge_softc *sc, const uint8_t *enaddr)
2211 {
2212 static const int lo_shift[4] = { 4, 3, 2, 0 };
2213 static const int hi_shift[4] = { 4, 5, 6, 8 };
2214 uint32_t hash;
2215
2216 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2217 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2218
2219 return (hash & 0xfff);
2220 }
2221
2222 /*
2223 * dge_set_filter:
2224 *
2225 * Set up the receive filter.
2226 */
2227 static void
2228 dge_set_filter(struct dge_softc *sc)
2229 {
2230 struct ethercom *ec = &sc->sc_ethercom;
2231 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2232 struct ether_multi *enm;
2233 struct ether_multistep step;
2234 uint32_t hash, reg, bit;
2235 int i;
2236
2237 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2238
2239 if (ifp->if_flags & IFF_BROADCAST)
2240 sc->sc_rctl |= RCTL_BAM;
2241 if (ifp->if_flags & IFF_PROMISC) {
2242 sc->sc_rctl |= RCTL_UPE;
2243 goto allmulti;
2244 }
2245
2246 /*
2247 * Set the station address in the first RAL slot, and
2248 * clear the remaining slots.
2249 */
2250 dge_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
2251 for (i = 1; i < RA_TABSIZE; i++)
2252 dge_set_ral(sc, NULL, i);
2253
2254 /* Clear out the multicast table. */
2255 for (i = 0; i < MC_TABSIZE; i++)
2256 CSR_WRITE(sc, DGE_MTA + (i << 2), 0);
2257
2258 ETHER_LOCK(ec);
2259 ETHER_FIRST_MULTI(step, ec, enm);
2260 while (enm != NULL) {
2261 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2262 /*
2263 * We must listen to a range of multicast addresses.
2264 * For now, just accept all multicasts, rather than
2265 * trying to set only those filter bits needed to match
2266 * the range. (At this time, the only use of address
2267 * ranges is for IP multicast routing, for which the
2268 * range is big enough to require all bits set.)
2269 */
2270 ETHER_UNLOCK(ec);
2271 goto allmulti;
2272 }
2273
2274 hash = dge_mchash(sc, enm->enm_addrlo);
2275
2276 reg = (hash >> 5) & 0x7f;
2277 bit = hash & 0x1f;
2278
2279 hash = CSR_READ(sc, DGE_MTA + (reg << 2));
2280 hash |= 1U << bit;
2281
2282 CSR_WRITE(sc, DGE_MTA + (reg << 2), hash);
2283
2284 ETHER_NEXT_MULTI(step, enm);
2285 }
2286 ETHER_UNLOCK(ec);
2287
2288 ifp->if_flags &= ~IFF_ALLMULTI;
2289 goto setit;
2290
2291 allmulti:
2292 ifp->if_flags |= IFF_ALLMULTI;
2293 sc->sc_rctl |= RCTL_MPE;
2294
2295 setit:
2296 CSR_WRITE(sc, DGE_RCTL, sc->sc_rctl);
2297 }
2298
2299 /*
2300 * Read in the EEPROM info and verify checksum.
2301 */
2302 int
2303 dge_read_eeprom(struct dge_softc *sc)
2304 {
2305 uint16_t cksum;
2306 int i;
2307
2308 cksum = 0;
2309 for (i = 0; i < EEPROM_SIZE; i++) {
2310 sc->sc_eeprom[i] = dge_eeprom_word(sc, i);
2311 cksum += sc->sc_eeprom[i];
2312 }
2313 return cksum != EEPROM_CKSUM;
2314 }
2315
2316
2317 /*
2318 * Read a 16-bit word from address addr in the serial EEPROM.
2319 */
2320 uint16_t
2321 dge_eeprom_word(struct dge_softc *sc, int addr)
2322 {
2323 uint32_t reg;
2324 uint16_t rval = 0;
2325 int i;
2326
2327 reg = CSR_READ(sc, DGE_EECD) & ~(EECD_SK | EECD_DI | EECD_CS);
2328
2329 /* Lower clock pulse (and data in to chip) */
2330 CSR_WRITE(sc, DGE_EECD, reg);
2331 /* Select chip */
2332 CSR_WRITE(sc, DGE_EECD, reg | EECD_CS);
2333
2334 /* Send read command */
2335 dge_eeprom_clockout(sc, 1);
2336 dge_eeprom_clockout(sc, 1);
2337 dge_eeprom_clockout(sc, 0);
2338
2339 /* Send address */
2340 for (i = 5; i >= 0; i--)
2341 dge_eeprom_clockout(sc, (addr >> i) & 1);
2342
2343 /* Read data */
2344 for (i = 0; i < 16; i++) {
2345 rval <<= 1;
2346 rval |= dge_eeprom_clockin(sc);
2347 }
2348
2349 /* Deselect chip */
2350 CSR_WRITE(sc, DGE_EECD, reg);
2351
2352 return rval;
2353 }
2354
2355 /*
2356 * Clock out a single bit to the EEPROM.
2357 */
2358 void
2359 dge_eeprom_clockout(struct dge_softc *sc, int bit)
2360 {
2361 int reg;
2362
2363 reg = CSR_READ(sc, DGE_EECD) & ~(EECD_DI | EECD_SK);
2364 if (bit)
2365 reg |= EECD_DI;
2366
2367 CSR_WRITE(sc, DGE_EECD, reg);
2368 delay(2);
2369 CSR_WRITE(sc, DGE_EECD, reg | EECD_SK);
2370 delay(2);
2371 CSR_WRITE(sc, DGE_EECD, reg);
2372 delay(2);
2373 }
2374
2375 /*
2376 * Clock in a single bit from EEPROM.
2377 */
2378 int
2379 dge_eeprom_clockin(struct dge_softc *sc)
2380 {
2381 int reg, rv;
2382
2383 reg = CSR_READ(sc, DGE_EECD) & ~(EECD_DI | EECD_DO | EECD_SK);
2384
2385 CSR_WRITE(sc, DGE_EECD, reg | EECD_SK); /* Raise clock */
2386 delay(2);
2387 rv = (CSR_READ(sc, DGE_EECD) & EECD_DO) != 0; /* Get bit */
2388 CSR_WRITE(sc, DGE_EECD, reg); /* Lower clock */
2389 delay(2);
2390
2391 return rv;
2392 }
2393
2394 static void
2395 dge_xgmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2396 {
2397 struct dge_softc *sc = ifp->if_softc;
2398
2399 ifmr->ifm_status = IFM_AVALID;
2400 if (sc->sc_dgep->flags & DGEP_F_10G_SR ) {
2401 ifmr->ifm_active = IFM_ETHER | IFM_10G_SR;
2402 } else {
2403 ifmr->ifm_active = IFM_ETHER | IFM_10G_LR;
2404 }
2405
2406 if (CSR_READ(sc, DGE_STATUS) & STATUS_LINKUP)
2407 ifmr->ifm_status |= IFM_ACTIVE;
2408 }
2409
2410 static inline int
2411 phwait(struct dge_softc *sc, int p, int r, int d, int type)
2412 {
2413 int i, mdic;
2414
2415 CSR_WRITE(sc, DGE_MDIO,
2416 MDIO_PHY(p) | MDIO_REG(r) | MDIO_DEV(d) | type | MDIO_CMD);
2417 for (i = 0; i < 10; i++) {
2418 delay(10);
2419 if (((mdic = CSR_READ(sc, DGE_MDIO)) & MDIO_CMD) == 0)
2420 break;
2421 }
2422 return mdic;
2423 }
2424
2425 static void
2426 dge_xgmii_writereg(struct dge_softc *sc, int phy, int reg, int val)
2427 {
2428 int mdic;
2429
2430 CSR_WRITE(sc, DGE_MDIRW, val);
2431 if (((mdic = phwait(sc, phy, reg, 1, MDIO_ADDR)) & MDIO_CMD)) {
2432 printf("%s: address cycle timeout; phy %d reg %d\n",
2433 device_xname(sc->sc_dev), phy, reg);
2434 return;
2435 }
2436 if (((mdic = phwait(sc, phy, reg, 1, MDIO_WRITE)) & MDIO_CMD)) {
2437 printf("%s: write cycle timeout; phy %d reg %d\n",
2438 device_xname(sc->sc_dev), phy, reg);
2439 return;
2440 }
2441 }
2442
2443 static void
2444 dge_xgmii_reset(struct dge_softc *sc)
2445 {
2446 dge_xgmii_writereg(sc, 0, 0, BMCR_RESET);
2447 }
2448
2449 static int
2450 dge_xgmii_mediachange(struct ifnet *ifp)
2451 {
2452 return 0;
2453 }
2454