1 /* $OpenBSD: if_cad.c,v 1.14 2024/03/24 22:34:06 patrick Exp $ */
2
3 /*
4 * Copyright (c) 2021-2022 Visa Hankala
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 /*
20 * Driver for Cadence 10/100/Gigabit Ethernet device.
21 */
22
23 #include "bpfilter.h"
24 #include "kstat.h"
25
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/atomic.h>
29 #include <sys/device.h>
30 #include <sys/ioctl.h>
31 #include <sys/mutex.h>
32 #include <sys/kstat.h>
33 #include <sys/rwlock.h>
34 #include <sys/task.h>
35 #include <sys/timeout.h>
36
37 #include <net/if.h>
38 #include <net/if_media.h>
39 #include <netinet/in.h>
40 #include <netinet/ip.h>
41 #include <netinet/if_ether.h>
42
43 #if NBPFILTER > 0
44 #include <net/bpf.h>
45 #endif
46
47 #include <dev/mii/mii.h>
48 #include <dev/mii/miivar.h>
49 #include <dev/mii/miidevs.h>
50
51 #include <machine/bus.h>
52 #include <machine/fdt.h>
53
54 #include <dev/ofw/fdt.h>
55 #include <dev/ofw/openfirm.h>
56 #include <dev/ofw/ofw_clock.h>
57 #include <dev/ofw/ofw_gpio.h>
58
59 #define GEM_NETCTL 0x0000
60 #define GEM_NETCTL_DPRAM (1 << 18)
61 #define GEM_NETCTL_STARTTX (1 << 9)
62 #define GEM_NETCTL_STATCLR (1 << 5)
63 #define GEM_NETCTL_MDEN (1 << 4)
64 #define GEM_NETCTL_TXEN (1 << 3)
65 #define GEM_NETCTL_RXEN (1 << 2)
66 #define GEM_NETCFG 0x0004
67 #define GEM_NETCFG_SGMIIEN (1 << 27)
68 #define GEM_NETCFG_RXCSUMEN (1 << 24)
69 #define GEM_NETCFG_MDCCLKDIV_MASK (0x7 << 18)
70 #define GEM_NETCFG_MDCCLKDIV_SHIFT 18
71 #define GEM_NETCFG_FCSREM (1 << 17)
72 #define GEM_NETCFG_RXOFFS_MASK (0x3 << 14)
73 #define GEM_NETCFG_RXOFFS_SHIFT 14
74 #define GEM_NETCFG_PCSSEL (1 << 11)
75 #define GEM_NETCFG_1000 (1 << 10)
76 #define GEM_NETCFG_1536RXEN (1 << 8)
77 #define GEM_NETCFG_UCASTHASHEN (1 << 7)
78 #define GEM_NETCFG_MCASTHASHEN (1 << 6)
79 #define GEM_NETCFG_BCASTDI (1 << 5)
80 #define GEM_NETCFG_COPYALL (1 << 4)
81 #define GEM_NETCFG_FDEN (1 << 1)
82 #define GEM_NETCFG_100 (1 << 0)
83 #define GEM_NETSR 0x0008
84 #define GEM_NETSR_PHY_MGMT_IDLE (1 << 2)
85 #define GEM_DMACR 0x0010
86 #define GEM_DMACR_DMA64 (1 << 30)
87 #define GEM_DMACR_AHBDISC (1 << 24)
88 #define GEM_DMACR_RXBUF_MASK (0xff << 16)
89 #define GEM_DMACR_RXBUF_SHIFT 16
90 #define GEM_DMACR_TXCSUMEN (1 << 11)
91 #define GEM_DMACR_TXSIZE (1 << 10)
92 #define GEM_DMACR_RXSIZE_MASK (0x3 << 8)
93 #define GEM_DMACR_RXSIZE_8K (0x3 << 8)
94 #define GEM_DMACR_ES_PDATA (1 << 7)
95 #define GEM_DMACR_ES_DESCR (1 << 6)
96 #define GEM_DMACR_BLEN_MASK (0x1f << 0)
97 #define GEM_DMACR_BLEN_16 (0x10 << 0)
98 #define GEM_TXSR 0x0014
99 #define GEM_TXSR_TXGO (1 << 3)
100 #define GEM_RXQBASE 0x0018
101 #define GEM_TXQBASE 0x001c
102 #define GEM_RXSR 0x0020
103 #define GEM_RXSR_RXOVR (1 << 2)
104 #define GEM_ISR 0x0024
105 #define GEM_IER 0x0028
106 #define GEM_IDR 0x002c
107 #define GEM_IXR_HRESP (1 << 11)
108 #define GEM_IXR_RXOVR (1 << 10)
109 #define GEM_IXR_TXDONE (1 << 7)
110 #define GEM_IXR_TXURUN (1 << 6)
111 #define GEM_IXR_RETRY (1 << 5)
112 #define GEM_IXR_TXUSED (1 << 3)
113 #define GEM_IXR_RXUSED (1 << 2)
114 #define GEM_IXR_RXDONE (1 << 1)
115 #define GEM_PHYMNTNC 0x0034
116 #define GEM_PHYMNTNC_CLAUSE_22 (1 << 30)
117 #define GEM_PHYMNTNC_OP_READ (0x2 << 28)
118 #define GEM_PHYMNTNC_OP_WRITE (0x1 << 28)
119 #define GEM_PHYMNTNC_ADDR_MASK (0x1f << 23)
120 #define GEM_PHYMNTNC_ADDR_SHIFT 23
121 #define GEM_PHYMNTNC_REG_MASK (0x1f << 18)
122 #define GEM_PHYMNTNC_REG_SHIFT 18
123 #define GEM_PHYMNTNC_MUST_10 (0x2 << 16)
124 #define GEM_PHYMNTNC_DATA_MASK 0xffff
125 #define GEM_HASHL 0x0080
126 #define GEM_HASHH 0x0084
127 #define GEM_LADDRL(i) (0x0088 + (i) * 8)
128 #define GEM_LADDRH(i) (0x008c + (i) * 8)
129 #define GEM_LADDRNUM 4
130 #define GEM_MID 0x00fc
131 #define GEM_MID_VERSION_MASK (0xfff << 16)
132 #define GEM_MID_VERSION_SHIFT 16
133 #define GEM_OCTTXL 0x0100
134 #define GEM_OCTTXH 0x0104
135 #define GEM_TXCNT 0x0108
136 #define GEM_TXBCCNT 0x010c
137 #define GEM_TXMCCNT 0x0110
138 #define GEM_TXPAUSECNT 0x0114
139 #define GEM_TX64CNT 0x0118
140 #define GEM_TX65CNT 0x011c
141 #define GEM_TX128CNT 0x0120
142 #define GEM_TX256CNT 0x0124
143 #define GEM_TX512CNT 0x0128
144 #define GEM_TX1024CNT 0x012c
145 #define GEM_TXURUNCNT 0x0134
146 #define GEM_SNGLCOLLCNT 0x0138
147 #define GEM_MULTICOLLCNT 0x013c
148 #define GEM_EXCESSCOLLCNT 0x0140
149 #define GEM_LATECOLLCNT 0x0144
150 #define GEM_TXDEFERCNT 0x0148
151 #define GEM_TXCSENSECNT 0x014c
152 #define GEM_OCTRXL 0x0150
153 #define GEM_OCTRXH 0x0154
154 #define GEM_RXCNT 0x0158
155 #define GEM_RXBROADCNT 0x015c
156 #define GEM_RXMULTICNT 0x0160
157 #define GEM_RXPAUSECNT 0x0164
158 #define GEM_RX64CNT 0x0168
159 #define GEM_RX65CNT 0x016c
160 #define GEM_RX128CNT 0x0170
161 #define GEM_RX256CNT 0x0174
162 #define GEM_RX512CNT 0x0178
163 #define GEM_RX1024CNT 0x017c
164 #define GEM_RXUNDRCNT 0x0184
165 #define GEM_RXOVRCNT 0x0188
166 #define GEM_RXJABCNT 0x018c
167 #define GEM_RXFCSCNT 0x0190
168 #define GEM_RXLENGTHCNT 0x0194
169 #define GEM_RXSYMBCNT 0x0198
170 #define GEM_RXALIGNCNT 0x019c
171 #define GEM_RXRESERRCNT 0x01a0
172 #define GEM_RXORCNT 0x01a4
173 #define GEM_RXIPCCNT 0x01a8
174 #define GEM_RXTCPCCNT 0x01ac
175 #define GEM_RXUDPCCNT 0x01b0
176 #define GEM_CFG6 0x0294
177 #define GEM_CFG6_DMA64 (1 << 23)
178 #define GEM_CFG6_PRIQ_MASK(x) ((x) & 0xffff)
179 #define GEM_CFG8 0x029c
180 #define GEM_CFG8_NUM_TYPE1_SCR(x) (((x) >> 24) & 0xff)
181 #define GEM_CFG8_NUM_TYPE2_SCR(x) (((x) >> 16) & 0xff)
182 #define GEM_TXQ1BASE(i) (0x0440 + (i) * 4)
183 #define GEM_TXQ1BASE_DISABLE (1 << 0)
184 #define GEM_RXQ1BASE(i) (0x0480 + (i) * 4)
185 #define GEM_RXQ1BASE_DISABLE (1 << 0)
186 #define GEM_TXQBASEHI 0x04c8
187 #define GEM_RXQBASEHI 0x04d4
188 #define GEM_SCR_TYPE1(i) (0x0500 + (i) * 4)
189 #define GEM_SCR_TYPE2(i) (0x0540 + (i) * 4)
190 #define GEM_RXQ8BASE(i) (0x05c0 + (i) * 4)
191 #define GEM_RXQ8BASE_DISABLE (1 << 0)
192
193 #define GEM_MAX_PRIQ 16
194
195 #define GEM_CLK_TX "tx_clk"
196
197 struct cad_buf {
198 bus_dmamap_t bf_map;
199 struct mbuf *bf_m;
200 };
201
202 struct cad_dmamem {
203 bus_dmamap_t cdm_map;
204 bus_dma_segment_t cdm_seg;
205 size_t cdm_size;
206 caddr_t cdm_kva;
207 };
208
209 struct cad_desc32 {
210 uint32_t d_addr;
211 uint32_t d_status;
212 };
213
214 struct cad_desc64 {
215 uint32_t d_addrlo;
216 uint32_t d_status;
217 uint32_t d_addrhi;
218 uint32_t d_unused;
219 };
220
221 #define GEM_RXD_ADDR_WRAP (1 << 1)
222 #define GEM_RXD_ADDR_USED (1 << 0)
223
224 #define GEM_RXD_BCAST (1U << 31)
225 #define GEM_RXD_MCAST (1 << 30)
226 #define GEM_RXD_UCAST (1 << 29)
227 #define GEM_RXD_SPEC (1 << 27)
228 #define GEM_RXD_SPEC_MASK (0x3 << 25)
229 #define GEM_RXD_CSUM_MASK (0x3 << 22)
230 #define GEM_RXD_CSUM_UDP_OK (0x3 << 22)
231 #define GEM_RXD_CSUM_TCP_OK (0x2 << 22)
232 #define GEM_RXD_CSUM_IP_OK (0x1 << 22)
233 #define GEM_RXD_VLANTAG (1 << 21)
234 #define GEM_RXD_PRIOTAG (1 << 20)
235 #define GEM_RXD_CFI (1 << 16)
236 #define GEM_RXD_EOF (1 << 15)
237 #define GEM_RXD_SOF (1 << 14)
238 #define GEM_RXD_BADFCS (1 << 13)
239 #define GEM_RXD_LEN_MASK 0x1fff
240
241 #define GEM_TXD_USED (1U << 31)
242 #define GEM_TXD_WRAP (1 << 30)
243 #define GEM_TXD_RLIMIT (1 << 29)
244 #define GEM_TXD_CORRUPT (1 << 27)
245 #define GEM_TXD_LCOLL (1 << 26)
246 #define GEM_TXD_CSUMERR_MASK (0x7 << 20)
247 #define GEM_TXD_NOFCS (1 << 16)
248 #define GEM_TXD_LAST (1 << 15)
249 #define GEM_TXD_LEN_MASK 0x3fff
250
251 #define CAD_NRXDESC 256
252
253 #define CAD_NTXDESC 256
254 #define CAD_NTXSEGS 16
255
256 enum cad_phy_mode {
257 CAD_PHY_MODE_GMII,
258 CAD_PHY_MODE_RGMII,
259 CAD_PHY_MODE_RGMII_ID,
260 CAD_PHY_MODE_RGMII_RXID,
261 CAD_PHY_MODE_RGMII_TXID,
262 CAD_PHY_MODE_SGMII,
263 };
264
265 struct cad_softc {
266 struct device sc_dev;
267 struct arpcom sc_ac;
268
269 bus_dma_tag_t sc_dmat;
270 bus_space_tag_t sc_iot;
271 bus_space_handle_t sc_ioh;
272 void *sc_ih;
273 int sc_node;
274 int sc_phy_loc;
275 enum cad_phy_mode sc_phy_mode;
276 unsigned char sc_rxhang_erratum;
277 unsigned char sc_rxdone;
278 unsigned char sc_dma64;
279 size_t sc_descsize;
280 uint32_t sc_qmask;
281 uint8_t sc_ntype1scr;
282 uint8_t sc_ntype2scr;
283
284 struct mii_data sc_mii;
285 #define sc_media sc_mii.mii_media
286 struct timeout sc_tick;
287
288 struct cad_dmamem *sc_txring;
289 struct cad_buf *sc_txbuf;
290 caddr_t sc_txdesc;
291 unsigned int sc_tx_prod;
292 unsigned int sc_tx_cons;
293
294 struct if_rxring sc_rx_ring;
295 struct cad_dmamem *sc_rxring;
296 struct cad_buf *sc_rxbuf;
297 caddr_t sc_rxdesc;
298 unsigned int sc_rx_prod;
299 unsigned int sc_rx_cons;
300 uint32_t sc_netctl;
301
302 struct rwlock sc_cfg_lock;
303 struct task sc_statchg_task;
304 uint32_t sc_tx_freq;
305
306 struct mutex sc_kstat_mtx;
307 struct kstat *sc_kstat;
308 };
309
310 #define HREAD4(sc, reg) \
311 (bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg)))
312 #define HWRITE4(sc, reg, val) \
313 bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
314
315 int cad_match(struct device *, void *, void *);
316 void cad_attach(struct device *, struct device *, void *);
317
318 int cad_ioctl(struct ifnet *, u_long, caddr_t);
319 void cad_start(struct ifqueue *);
320 void cad_watchdog(struct ifnet *);
321
322 void cad_reset(struct cad_softc *);
323 int cad_up(struct cad_softc *);
324 void cad_down(struct cad_softc *);
325 void cad_iff(struct cad_softc *);
326 int cad_intr(void *);
327 void cad_tick(void *);
328 void cad_statchg_task(void *);
329
330 int cad_media_change(struct ifnet *);
331 void cad_media_status(struct ifnet *, struct ifmediareq *);
332 int cad_mii_readreg(struct device *, int, int);
333 void cad_mii_writereg(struct device *, int, int, int);
334 void cad_mii_statchg(struct device *);
335
336 struct cad_dmamem *cad_dmamem_alloc(struct cad_softc *, bus_size_t, bus_size_t);
337 void cad_dmamem_free(struct cad_softc *, struct cad_dmamem *);
338 void cad_rxfill(struct cad_softc *);
339 void cad_rxeof(struct cad_softc *);
340 void cad_txeof(struct cad_softc *);
341 unsigned int cad_encap(struct cad_softc *, struct mbuf *);
342 struct mbuf *cad_alloc_mbuf(struct cad_softc *, bus_dmamap_t);
343
344 #if NKSTAT > 0
345 void cad_kstat_attach(struct cad_softc *);
346 int cad_kstat_read(struct kstat *);
347 void cad_kstat_tick(void *);
348 #endif
349
350 #ifdef DDB
351 struct cad_softc *cad_sc[4];
352 #endif
353
354 const struct cfattach cad_ca = {
355 sizeof(struct cad_softc), cad_match, cad_attach
356 };
357
358 struct cfdriver cad_cd = {
359 NULL, "cad", DV_IFNET
360 };
361
362 const struct {
363 const char *name;
364 enum cad_phy_mode mode;
365 } cad_phy_modes[] = {
366 { "gmii", CAD_PHY_MODE_GMII },
367 { "rgmii", CAD_PHY_MODE_RGMII },
368 { "rgmii-id", CAD_PHY_MODE_RGMII_ID },
369 { "rgmii-rxid", CAD_PHY_MODE_RGMII_RXID },
370 { "rgmii-txid", CAD_PHY_MODE_RGMII_TXID },
371 { "sgmii", CAD_PHY_MODE_SGMII },
372 };
373
374 int
cad_match(struct device * parent,void * match,void * aux)375 cad_match(struct device *parent, void *match, void *aux)
376 {
377 struct fdt_attach_args *faa = aux;
378
379 return (OF_is_compatible(faa->fa_node, "cdns,gem") ||
380 OF_is_compatible(faa->fa_node, "cdns,macb") ||
381 OF_is_compatible(faa->fa_node, "sifive,fu540-c000-gem") ||
382 OF_is_compatible(faa->fa_node, "sifive,fu740-c000-gem"));
383 }
384
385 void
cad_attach(struct device * parent,struct device * self,void * aux)386 cad_attach(struct device *parent, struct device *self, void *aux)
387 {
388 char phy_mode[16];
389 struct fdt_attach_args *faa = aux;
390 struct cad_softc *sc = (struct cad_softc *)self;
391 struct ifnet *ifp = &sc->sc_ac.ac_if;
392 uint32_t phy_reset_gpio[3];
393 uint32_t phy_reset_duration;
394 uint32_t hi, lo;
395 uint32_t rev, ver;
396 uint32_t val;
397 unsigned int i;
398 int node, phy;
399
400 if (faa->fa_nreg < 1) {
401 printf(": no registers\n");
402 return;
403 }
404
405 sc->sc_node = faa->fa_node;
406 sc->sc_dmat = faa->fa_dmat;
407 sc->sc_iot = faa->fa_iot;
408 if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
409 faa->fa_reg[0].size, 0, &sc->sc_ioh) != 0) {
410 printf(": can't map registers\n");
411 return;
412 }
413
414 if (OF_getprop(faa->fa_node, "local-mac-address", sc->sc_ac.ac_enaddr,
415 sizeof(sc->sc_ac.ac_enaddr)) != sizeof(sc->sc_ac.ac_enaddr)) {
416 for (i = 0; i < GEM_LADDRNUM; i++) {
417 lo = HREAD4(sc, GEM_LADDRL(i));
418 hi = HREAD4(sc, GEM_LADDRH(i));
419 if (lo != 0 || hi != 0) {
420 sc->sc_ac.ac_enaddr[0] = lo;
421 sc->sc_ac.ac_enaddr[1] = lo >> 8;
422 sc->sc_ac.ac_enaddr[2] = lo >> 16;
423 sc->sc_ac.ac_enaddr[3] = lo >> 24;
424 sc->sc_ac.ac_enaddr[4] = hi;
425 sc->sc_ac.ac_enaddr[5] = hi >> 8;
426 break;
427 }
428 }
429 if (i == GEM_LADDRNUM)
430 ether_fakeaddr(ifp);
431 }
432
433 if (OF_getpropintarray(faa->fa_node, "phy-reset-gpios", phy_reset_gpio,
434 sizeof(phy_reset_gpio)) == sizeof(phy_reset_gpio)) {
435 phy_reset_duration = OF_getpropint(faa->fa_node,
436 "phy-reset-duration", 1);
437 if (phy_reset_duration > 1000)
438 phy_reset_duration = 1;
439
440 gpio_controller_config_pin(phy_reset_gpio, GPIO_CONFIG_OUTPUT);
441 gpio_controller_set_pin(phy_reset_gpio, 1);
442 delay((phy_reset_duration + 1) * 1000);
443 gpio_controller_set_pin(phy_reset_gpio, 0);
444 delay(1000);
445 }
446
447 phy = OF_getpropint(faa->fa_node, "phy-handle", 0);
448 node = OF_getnodebyphandle(phy);
449 if (node != 0)
450 sc->sc_phy_loc = OF_getpropint(node, "reg", MII_PHY_ANY);
451 else
452 sc->sc_phy_loc = MII_PHY_ANY;
453
454 sc->sc_phy_mode = CAD_PHY_MODE_RGMII;
455 OF_getprop(faa->fa_node, "phy-mode", phy_mode, sizeof(phy_mode));
456 for (i = 0; i < nitems(cad_phy_modes); i++) {
457 if (strcmp(phy_mode, cad_phy_modes[i].name) == 0) {
458 sc->sc_phy_mode = cad_phy_modes[i].mode;
459 break;
460 }
461 }
462
463 rev = HREAD4(sc, GEM_MID);
464 ver = (rev & GEM_MID_VERSION_MASK) >> GEM_MID_VERSION_SHIFT;
465
466 sc->sc_descsize = sizeof(struct cad_desc32);
467 /* Queue 0 is always present. */
468 sc->sc_qmask = 0x1;
469 /*
470 * Registers CFG1 and CFG6-10 are not present
471 * on Zynq-7000 / GEM version 0x2.
472 */
473 if (ver >= 0x7) {
474 val = HREAD4(sc, GEM_CFG6);
475 if (val & GEM_CFG6_DMA64) {
476 sc->sc_descsize = sizeof(struct cad_desc64);
477 sc->sc_dma64 = 1;
478 }
479 sc->sc_qmask |= GEM_CFG6_PRIQ_MASK(val);
480
481 val = HREAD4(sc, GEM_CFG8);
482 sc->sc_ntype1scr = GEM_CFG8_NUM_TYPE1_SCR(val);
483 sc->sc_ntype2scr = GEM_CFG8_NUM_TYPE2_SCR(val);
484 }
485
486 if (OF_is_compatible(faa->fa_node, "cdns,zynq-gem"))
487 sc->sc_rxhang_erratum = 1;
488
489 rw_init(&sc->sc_cfg_lock, "cadcfg");
490 timeout_set(&sc->sc_tick, cad_tick, sc);
491 task_set(&sc->sc_statchg_task, cad_statchg_task, sc);
492
493 rw_enter_write(&sc->sc_cfg_lock);
494 cad_reset(sc);
495 rw_exit_write(&sc->sc_cfg_lock);
496
497 sc->sc_ih = fdt_intr_establish(faa->fa_node, IPL_NET | IPL_MPSAFE,
498 cad_intr, sc, sc->sc_dev.dv_xname);
499 if (sc->sc_ih == NULL) {
500 printf(": can't establish interrupt\n");
501 goto fail;
502 }
503
504 ifp->if_softc = sc;
505 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
506 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
507 ifp->if_xflags |= IFXF_MPSAFE;
508 ifp->if_ioctl = cad_ioctl;
509 ifp->if_qstart = cad_start;
510 ifp->if_watchdog = cad_watchdog;
511 ifp->if_hardmtu = ETHER_MAX_DIX_LEN - ETHER_HDR_LEN - ETHER_CRC_LEN;
512 ifp->if_capabilities = IFCAP_VLAN_MTU;
513
514 /*
515 * Enable transmit checksum offload only on reliable hardware.
516 * At least Zynq-7000 appears to generate bad UDP header checksum if
517 * the checksum field has not been initialized to zero and
518 * UDP payload size is less than three octets.
519 */
520 if (0) {
521 ifp->if_capabilities |= IFCAP_CSUM_IPv4 |
522 IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4 |
523 IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6;
524 }
525
526 printf(": rev 0x%x, address %s\n", rev,
527 ether_sprintf(sc->sc_ac.ac_enaddr));
528
529 sc->sc_mii.mii_ifp = ifp;
530 sc->sc_mii.mii_readreg = cad_mii_readreg;
531 sc->sc_mii.mii_writereg = cad_mii_writereg;
532 sc->sc_mii.mii_statchg = cad_mii_statchg;
533 ifmedia_init(&sc->sc_media, 0, cad_media_change, cad_media_status);
534
535 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, sc->sc_phy_loc,
536 MII_OFFSET_ANY, MIIF_NOISOLATE);
537
538 if (LIST_EMPTY(&sc->sc_mii.mii_phys)) {
539 printf("%s: no PHY found\n", sc->sc_dev.dv_xname);
540 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
541 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_MANUAL);
542 } else {
543 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
544 }
545
546 if_attach(ifp);
547 ether_ifattach(ifp);
548
549 #if NKSTAT > 0
550 cad_kstat_attach(sc);
551 #endif
552
553 #ifdef DDB
554 if (sc->sc_dev.dv_unit < nitems(cad_sc))
555 cad_sc[sc->sc_dev.dv_unit] = sc;
556 #endif
557
558 return;
559
560 fail:
561 if (sc->sc_ioh != 0)
562 bus_space_unmap(sc->sc_iot, sc->sc_ioh, faa->fa_reg[0].size);
563 }
564
565 int
cad_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)566 cad_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
567 {
568 struct cad_softc *sc = ifp->if_softc;
569 struct ifreq *ifr = (struct ifreq *)data;
570 int error = 0, netlock_held = 1;
571 int s;
572
573 switch (cmd) {
574 case SIOCGIFMEDIA:
575 case SIOCSIFMEDIA:
576 case SIOCGIFSFFPAGE:
577 netlock_held = 0;
578 break;
579 }
580
581 if (netlock_held)
582 NET_UNLOCK();
583 rw_enter_write(&sc->sc_cfg_lock);
584 if (netlock_held)
585 NET_LOCK();
586 s = splnet();
587
588 switch (cmd) {
589 case SIOCSIFADDR:
590 ifp->if_flags |= IFF_UP;
591 /* FALLTHROUGH */
592
593 case SIOCSIFFLAGS:
594 if (ISSET(ifp->if_flags, IFF_UP)) {
595 if (ISSET(ifp->if_flags, IFF_RUNNING))
596 error = ENETRESET;
597 else
598 error = cad_up(sc);
599 } else {
600 if (ISSET(ifp->if_flags, IFF_RUNNING))
601 cad_down(sc);
602 }
603 break;
604
605 case SIOCGIFMEDIA:
606 case SIOCSIFMEDIA:
607 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
608 break;
609
610 case SIOCGIFRXR:
611 error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
612 NULL, MCLBYTES, &sc->sc_rx_ring);
613 break;
614
615 default:
616 error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
617 break;
618 }
619
620 if (error == ENETRESET) {
621 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
622 (IFF_UP | IFF_RUNNING))
623 cad_iff(sc);
624 error = 0;
625 }
626
627 splx(s);
628 rw_exit_write(&sc->sc_cfg_lock);
629
630 return error;
631 }
632
633 void
cad_reset(struct cad_softc * sc)634 cad_reset(struct cad_softc *sc)
635 {
636 static const unsigned int mdcclk_divs[] = {
637 8, 16, 32, 48, 64, 96, 128, 224
638 };
639 unsigned int freq, i;
640 uint32_t div, netcfg;
641
642 rw_assert_wrlock(&sc->sc_cfg_lock);
643
644 HWRITE4(sc, GEM_NETCTL, 0);
645 HWRITE4(sc, GEM_IDR, ~0U);
646 HWRITE4(sc, GEM_RXSR, 0);
647 HWRITE4(sc, GEM_TXSR, 0);
648 if (sc->sc_dma64) {
649 HWRITE4(sc, GEM_RXQBASEHI, 0);
650 HWRITE4(sc, GEM_TXQBASEHI, 0);
651 }
652 HWRITE4(sc, GEM_RXQBASE, 0);
653 HWRITE4(sc, GEM_TXQBASE, 0);
654
655 for (i = 1; i < GEM_MAX_PRIQ; i++) {
656 if (sc->sc_qmask & (1U << i)) {
657 if (i < 8)
658 HWRITE4(sc, GEM_RXQ1BASE(i - 1), 0);
659 else
660 HWRITE4(sc, GEM_RXQ8BASE(i - 8), 0);
661 HWRITE4(sc, GEM_TXQ1BASE(i - 1), 0);
662 }
663 }
664
665 /* Disable all screeners so that Rx goes through queue 0. */
666 for (i = 0; i < sc->sc_ntype1scr; i++)
667 HWRITE4(sc, GEM_SCR_TYPE1(i), 0);
668 for (i = 0; i < sc->sc_ntype2scr; i++)
669 HWRITE4(sc, GEM_SCR_TYPE2(i), 0);
670
671 /* MDIO clock rate must not exceed 2.5 MHz. */
672 freq = clock_get_frequency(sc->sc_node, "pclk");
673 for (div = 0; div < nitems(mdcclk_divs) - 1; div++) {
674 if (freq / mdcclk_divs[div] <= 2500000)
675 break;
676 }
677 KASSERT(div < nitems(mdcclk_divs));
678
679 netcfg = HREAD4(sc, GEM_NETCFG);
680 netcfg &= ~GEM_NETCFG_MDCCLKDIV_MASK;
681 netcfg |= div << GEM_NETCFG_MDCCLKDIV_SHIFT;
682 HWRITE4(sc, GEM_NETCFG, netcfg);
683
684 /* Enable MDIO bus. */
685 sc->sc_netctl = GEM_NETCTL_MDEN;
686 HWRITE4(sc, GEM_NETCTL, sc->sc_netctl);
687 }
688
689 int
cad_up(struct cad_softc * sc)690 cad_up(struct cad_softc *sc)
691 {
692 struct ifnet *ifp = &sc->sc_ac.ac_if;
693 struct cad_buf *rxb, *txb;
694 struct cad_desc32 *desc32;
695 struct cad_desc64 *desc64;
696 uint64_t addr;
697 int flags = BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW;
698 unsigned int i, nrxd, ntxd;
699 uint32_t val;
700
701 rw_assert_wrlock(&sc->sc_cfg_lock);
702
703 /* Release lock for memory allocation. */
704 NET_UNLOCK();
705
706 if (sc->sc_dma64)
707 flags |= BUS_DMA_64BIT;
708
709 ntxd = CAD_NTXDESC;
710 nrxd = CAD_NRXDESC;
711
712 /*
713 * Allocate a dummy descriptor for unused priority queues.
714 * This is necessary with GEM revisions that have no option
715 * to disable queues.
716 */
717 if (sc->sc_qmask & ~1U) {
718 ntxd++;
719 nrxd++;
720 }
721
722 /*
723 * Set up Tx descriptor ring.
724 */
725
726 sc->sc_txring = cad_dmamem_alloc(sc,
727 ntxd * sc->sc_descsize, sc->sc_descsize);
728 sc->sc_txdesc = sc->sc_txring->cdm_kva;
729
730 desc32 = (struct cad_desc32 *)sc->sc_txdesc;
731 desc64 = (struct cad_desc64 *)sc->sc_txdesc;
732
733 sc->sc_txbuf = malloc(sizeof(*sc->sc_txbuf) * CAD_NTXDESC,
734 M_DEVBUF, M_WAITOK);
735 for (i = 0; i < CAD_NTXDESC; i++) {
736 txb = &sc->sc_txbuf[i];
737 bus_dmamap_create(sc->sc_dmat, MCLBYTES, CAD_NTXSEGS,
738 MCLBYTES, 0, flags, &txb->bf_map);
739 txb->bf_m = NULL;
740
741 if (sc->sc_dma64) {
742 desc64[i].d_addrhi = 0;
743 desc64[i].d_addrlo = 0;
744 desc64[i].d_status = GEM_TXD_USED;
745 if (i == CAD_NTXDESC - 1)
746 desc64[i].d_status |= GEM_TXD_WRAP;
747 } else {
748 desc32[i].d_addr = 0;
749 desc32[i].d_status = GEM_TXD_USED;
750 if (i == CAD_NTXDESC - 1)
751 desc32[i].d_status |= GEM_TXD_WRAP;
752 }
753 }
754
755 /* The remaining descriptors are dummies. */
756 for (; i < ntxd; i++) {
757 if (sc->sc_dma64) {
758 desc64[i].d_addrhi = 0;
759 desc64[i].d_addrlo = 0;
760 desc64[i].d_status = GEM_TXD_USED | GEM_TXD_WRAP;
761 } else {
762 desc32[i].d_addr = 0;
763 desc32[i].d_status = GEM_TXD_USED | GEM_TXD_WRAP;
764 }
765 }
766
767 sc->sc_tx_prod = 0;
768 sc->sc_tx_cons = 0;
769
770 bus_dmamap_sync(sc->sc_dmat, sc->sc_txring->cdm_map,
771 0, sc->sc_txring->cdm_size,
772 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
773
774 addr = sc->sc_txring->cdm_map->dm_segs[0].ds_addr;
775 if (sc->sc_dma64)
776 HWRITE4(sc, GEM_TXQBASEHI, addr >> 32);
777 HWRITE4(sc, GEM_TXQBASE, addr);
778
779 /* Initialize unused queues. Disable them if possible. */
780 addr += CAD_NTXDESC * sc->sc_descsize;
781 for (i = 1; i < GEM_MAX_PRIQ; i++) {
782 if (sc->sc_qmask & (1U << i)) {
783 HWRITE4(sc, GEM_TXQ1BASE(i - 1),
784 addr | GEM_TXQ1BASE_DISABLE);
785 }
786 }
787
788 /*
789 * Set up Rx descriptor ring.
790 */
791
792 sc->sc_rxring = cad_dmamem_alloc(sc,
793 nrxd * sc->sc_descsize, sc->sc_descsize);
794 sc->sc_rxdesc = sc->sc_rxring->cdm_kva;
795
796 desc32 = (struct cad_desc32 *)sc->sc_rxdesc;
797 desc64 = (struct cad_desc64 *)sc->sc_rxdesc;
798
799 sc->sc_rxbuf = malloc(sizeof(struct cad_buf) * CAD_NRXDESC,
800 M_DEVBUF, M_WAITOK);
801 for (i = 0; i < CAD_NRXDESC; i++) {
802 rxb = &sc->sc_rxbuf[i];
803 bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
804 MCLBYTES, 0, flags, &rxb->bf_map);
805 rxb->bf_m = NULL;
806
807 /* Mark all descriptors as used so that driver owns them. */
808 if (sc->sc_dma64) {
809 desc64[i].d_addrhi = 0;
810 desc64[i].d_addrlo = GEM_RXD_ADDR_USED;
811 if (i == CAD_NRXDESC - 1)
812 desc64[i].d_addrlo |= GEM_RXD_ADDR_WRAP;
813 } else {
814 desc32[i].d_addr = GEM_RXD_ADDR_USED;
815 if (i == CAD_NRXDESC - 1)
816 desc32[i].d_addr |= GEM_RXD_ADDR_WRAP;
817 }
818 }
819
820 /* The remaining descriptors are dummies. */
821 for (; i < nrxd; i++) {
822 if (sc->sc_dma64) {
823 desc64[i].d_addrhi = 0;
824 desc64[i].d_addrlo =
825 GEM_RXD_ADDR_USED | GEM_RXD_ADDR_WRAP;
826 } else {
827 desc32[i].d_addr =
828 GEM_RXD_ADDR_USED | GEM_RXD_ADDR_WRAP;
829 }
830 }
831
832 if_rxr_init(&sc->sc_rx_ring, 2, CAD_NRXDESC);
833
834 sc->sc_rx_prod = 0;
835 sc->sc_rx_cons = 0;
836 cad_rxfill(sc);
837
838 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxring->cdm_map,
839 0, sc->sc_rxring->cdm_size,
840 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
841
842 addr = sc->sc_rxring->cdm_map->dm_segs[0].ds_addr;
843 if (sc->sc_dma64)
844 HWRITE4(sc, GEM_RXQBASEHI, addr >> 32);
845 HWRITE4(sc, GEM_RXQBASE, addr);
846
847 /* Initialize unused queues. Disable them if possible. */
848 addr += sc->sc_descsize * CAD_NRXDESC;
849 for (i = 1; i < GEM_MAX_PRIQ; i++) {
850 if (sc->sc_qmask & (1U << i)) {
851 if (i < 8) {
852 HWRITE4(sc, GEM_RXQ1BASE(i - 1),
853 addr | GEM_RXQ1BASE_DISABLE);
854 } else {
855 HWRITE4(sc, GEM_RXQ8BASE(i - 8),
856 addr | GEM_RXQ8BASE_DISABLE);
857 }
858 }
859 }
860
861 NET_LOCK();
862
863 /*
864 * Set MAC address filters.
865 */
866
867 HWRITE4(sc, GEM_LADDRL(0), sc->sc_ac.ac_enaddr[0] |
868 ((uint32_t)sc->sc_ac.ac_enaddr[1] << 8) |
869 ((uint32_t)sc->sc_ac.ac_enaddr[2] << 16) |
870 ((uint32_t)sc->sc_ac.ac_enaddr[3] << 24));
871 HWRITE4(sc, GEM_LADDRH(0), sc->sc_ac.ac_enaddr[4] |
872 ((uint32_t)sc->sc_ac.ac_enaddr[5] << 8));
873
874 for (i = 1; i < GEM_LADDRNUM; i++) {
875 HWRITE4(sc, GEM_LADDRL(i), 0);
876 HWRITE4(sc, GEM_LADDRH(i), 0);
877 }
878
879 cad_iff(sc);
880
881 clock_set_frequency(sc->sc_node, GEM_CLK_TX, 2500000);
882 clock_enable(sc->sc_node, GEM_CLK_TX);
883 delay(1000);
884
885 val = HREAD4(sc, GEM_NETCFG);
886
887 val |= GEM_NETCFG_FCSREM | GEM_NETCFG_RXCSUMEN | GEM_NETCFG_1000 |
888 GEM_NETCFG_100 | GEM_NETCFG_FDEN | GEM_NETCFG_1536RXEN;
889 val &= ~GEM_NETCFG_RXOFFS_MASK;
890 val |= ETHER_ALIGN << GEM_NETCFG_RXOFFS_SHIFT;
891 val &= ~GEM_NETCFG_BCASTDI;
892
893 if (sc->sc_phy_mode == CAD_PHY_MODE_SGMII)
894 val |= GEM_NETCFG_SGMIIEN | GEM_NETCFG_PCSSEL;
895 else
896 val &= ~(GEM_NETCFG_SGMIIEN | GEM_NETCFG_PCSSEL);
897
898 HWRITE4(sc, GEM_NETCFG, val);
899
900 val = HREAD4(sc, GEM_DMACR);
901
902 if (sc->sc_dma64)
903 val |= GEM_DMACR_DMA64;
904 else
905 val &= ~GEM_DMACR_DMA64;
906 /* Use CPU's native byte order with descriptor words. */
907 #if BYTE_ORDER == BIG_ENDIAN
908 val |= GEM_DMACR_ES_DESCR;
909 #else
910 val &= ~GEM_DMACR_ES_DESCR;
911 #endif
912 val &= ~GEM_DMACR_ES_PDATA;
913 val |= GEM_DMACR_AHBDISC | GEM_DMACR_TXSIZE;
914 val &= ~GEM_DMACR_RXSIZE_MASK;
915 val |= GEM_DMACR_RXSIZE_8K;
916 val &= ~GEM_DMACR_RXBUF_MASK;
917 val |= (MCLBYTES / 64) << GEM_DMACR_RXBUF_SHIFT;
918 val &= ~GEM_DMACR_BLEN_MASK;
919 val |= GEM_DMACR_BLEN_16;
920
921 if (ifp->if_capabilities & IFCAP_CSUM_IPv4)
922 val |= GEM_DMACR_TXCSUMEN;
923
924 HWRITE4(sc, GEM_DMACR, val);
925
926 /* Clear statistics. */
927 HWRITE4(sc, GEM_NETCTL, sc->sc_netctl | GEM_NETCTL_STATCLR);
928
929 /* Enable Rx and Tx. */
930 sc->sc_netctl |= GEM_NETCTL_RXEN | GEM_NETCTL_TXEN;
931 HWRITE4(sc, GEM_NETCTL, sc->sc_netctl);
932
933 /* Enable interrupts. */
934 HWRITE4(sc, GEM_IER, GEM_IXR_HRESP | GEM_IXR_RXOVR | GEM_IXR_RXDONE |
935 GEM_IXR_TXDONE);
936
937 if (sc->sc_rxhang_erratum)
938 HWRITE4(sc, GEM_IER, GEM_IXR_RXUSED);
939
940 if (!LIST_EMPTY(&sc->sc_mii.mii_phys))
941 mii_mediachg(&sc->sc_mii);
942
943 ifp->if_flags |= IFF_RUNNING;
944 ifq_clr_oactive(&ifp->if_snd);
945
946 timeout_add_sec(&sc->sc_tick, 1);
947
948 return 0;
949 }
950
951 void
cad_down(struct cad_softc * sc)952 cad_down(struct cad_softc *sc)
953 {
954 struct ifnet *ifp = &sc->sc_ac.ac_if;
955 struct cad_buf *rxb, *txb;
956 unsigned int i, timeout;
957
958 rw_assert_wrlock(&sc->sc_cfg_lock);
959
960 ifp->if_flags &= ~IFF_RUNNING;
961
962 ifq_clr_oactive(&ifp->if_snd);
963 ifp->if_timer = 0;
964
965 /* Avoid lock order issues with barriers. */
966 NET_UNLOCK();
967
968 timeout_del_barrier(&sc->sc_tick);
969
970 /* Disable data transfer. */
971 sc->sc_netctl &= ~(GEM_NETCTL_TXEN | GEM_NETCTL_RXEN);
972 HWRITE4(sc, GEM_NETCTL, sc->sc_netctl);
973
974 /* Disable all interrupts. */
975 HWRITE4(sc, GEM_IDR, ~0U);
976
977 /* Wait for transmitter to become idle. */
978 for (timeout = 1000; timeout > 0; timeout--) {
979 if ((HREAD4(sc, GEM_TXSR) & GEM_TXSR_TXGO) == 0)
980 break;
981 delay(10);
982 }
983 if (timeout == 0)
984 printf("%s: transmitter not idle\n", sc->sc_dev.dv_xname);
985
986 mii_down(&sc->sc_mii);
987
988 /* Wait for activity to cease. */
989 intr_barrier(sc->sc_ih);
990 ifq_barrier(&ifp->if_snd);
991 taskq_del_barrier(systq, &sc->sc_statchg_task);
992
993 /* Disable the packet clock as it is not needed any longer. */
994 clock_disable(sc->sc_node, GEM_CLK_TX);
995
996 cad_reset(sc);
997
998 /*
999 * Tear down the Tx descriptor ring.
1000 */
1001
1002 for (i = 0; i < CAD_NTXDESC; i++) {
1003 txb = &sc->sc_txbuf[i];
1004 if (txb->bf_m != NULL) {
1005 bus_dmamap_sync(sc->sc_dmat, txb->bf_map, 0,
1006 txb->bf_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1007 bus_dmamap_unload(sc->sc_dmat, txb->bf_map);
1008 m_freem(txb->bf_m);
1009 }
1010 bus_dmamap_destroy(sc->sc_dmat, txb->bf_map);
1011 }
1012 free(sc->sc_txbuf, M_DEVBUF, sizeof(*sc->sc_txbuf) * CAD_NTXDESC);
1013 sc->sc_txbuf = NULL;
1014
1015 cad_dmamem_free(sc, sc->sc_txring);
1016 sc->sc_txring = NULL;
1017 sc->sc_txdesc = NULL;
1018
1019 /*
1020 * Tear down the Rx descriptor ring.
1021 */
1022
1023 for (i = 0; i < CAD_NRXDESC; i++) {
1024 rxb = &sc->sc_rxbuf[i];
1025 if (rxb->bf_m != NULL) {
1026 bus_dmamap_sync(sc->sc_dmat, rxb->bf_map, 0,
1027 rxb->bf_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1028 bus_dmamap_unload(sc->sc_dmat, rxb->bf_map);
1029 m_freem(rxb->bf_m);
1030 }
1031 bus_dmamap_destroy(sc->sc_dmat, rxb->bf_map);
1032 }
1033 free(sc->sc_rxbuf, M_DEVBUF, sizeof(*sc->sc_txbuf) * CAD_NRXDESC);
1034 sc->sc_rxbuf = NULL;
1035
1036 cad_dmamem_free(sc, sc->sc_rxring);
1037 sc->sc_rxring = NULL;
1038 sc->sc_rxdesc = NULL;
1039
1040 NET_LOCK();
1041 }
1042
1043 uint8_t
cad_hash_mac(const uint8_t * eaddr)1044 cad_hash_mac(const uint8_t *eaddr)
1045 {
1046 uint64_t val = 0;
1047 int i;
1048 uint8_t hash = 0;
1049
1050 for (i = ETHER_ADDR_LEN - 1; i >= 0; i--)
1051 val = (val << 8) | eaddr[i];
1052
1053 for (i = 0; i < 8; i++) {
1054 hash ^= val;
1055 val >>= 6;
1056 }
1057
1058 return hash & 0x3f;
1059 }
1060
1061 void
cad_iff(struct cad_softc * sc)1062 cad_iff(struct cad_softc *sc)
1063 {
1064 struct arpcom *ac = &sc->sc_ac;
1065 struct ifnet *ifp = &sc->sc_ac.ac_if;
1066 struct ether_multi *enm;
1067 struct ether_multistep step;
1068 uint64_t hash;
1069 uint32_t netcfg;
1070
1071 rw_assert_wrlock(&sc->sc_cfg_lock);
1072
1073 netcfg = HREAD4(sc, GEM_NETCFG);
1074 netcfg &= ~GEM_NETCFG_UCASTHASHEN;
1075
1076 ifp->if_flags &= ~IFF_ALLMULTI;
1077
1078 if (ifp->if_flags & IFF_PROMISC) {
1079 netcfg |= GEM_NETCFG_COPYALL;
1080 netcfg &= ~GEM_NETCFG_MCASTHASHEN;
1081 } else {
1082 netcfg &= ~GEM_NETCFG_COPYALL;
1083 netcfg |= GEM_NETCFG_MCASTHASHEN;
1084
1085 if (ac->ac_multirangecnt > 0)
1086 ifp->if_flags |= IFF_ALLMULTI;
1087
1088 if (ifp->if_flags & IFF_ALLMULTI) {
1089 hash = ~0ULL;
1090 } else {
1091 hash = 0;
1092 ETHER_FIRST_MULTI(step, ac, enm);
1093 while (enm != NULL) {
1094 hash |= 1ULL << cad_hash_mac(enm->enm_addrlo);
1095 ETHER_NEXT_MULTI(step, enm);
1096 }
1097 }
1098
1099 HWRITE4(sc, GEM_HASHL, hash);
1100 HWRITE4(sc, GEM_HASHH, hash >> 32);
1101 }
1102
1103 HWRITE4(sc, GEM_NETCFG, netcfg);
1104 }
1105
1106 void
cad_start(struct ifqueue * ifq)1107 cad_start(struct ifqueue *ifq)
1108 {
1109 struct ifnet *ifp = ifq->ifq_if;
1110 struct cad_softc *sc = ifp->if_softc;
1111 struct mbuf *m;
1112 unsigned int free, head, used;
1113
1114 free = sc->sc_tx_cons;
1115 head = sc->sc_tx_prod;
1116 if (free <= head)
1117 free += CAD_NTXDESC;
1118 free -= head;
1119
1120 for (;;) {
1121 if (free <= CAD_NTXSEGS) {
1122 ifq_set_oactive(ifq);
1123 break;
1124 }
1125
1126 m = ifq_dequeue(ifq);
1127 if (m == NULL)
1128 break;
1129
1130 used = cad_encap(sc, m);
1131 if (used == 0) {
1132 m_freem(m);
1133 continue;
1134 }
1135
1136 #if NBPFILTER > 0
1137 if (ifp->if_bpf != NULL)
1138 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1139 #endif
1140
1141 ifp->if_timer = 5;
1142
1143 KASSERT(free >= used);
1144 free -= used;
1145 }
1146
1147 HWRITE4(sc, GEM_NETCTL, sc->sc_netctl | GEM_NETCTL_STARTTX);
1148 }
1149
1150 void
cad_watchdog(struct ifnet * ifp)1151 cad_watchdog(struct ifnet *ifp)
1152 {
1153 struct cad_softc *sc = ifp->if_softc;
1154
1155 ifp->if_timer = 0;
1156
1157 if ((ifp->if_flags & IFF_RUNNING) == 0)
1158 return;
1159
1160 if (sc->sc_tx_cons == sc->sc_tx_prod)
1161 return;
1162
1163 /* XXX */
1164 HWRITE4(sc, GEM_NETCTL, sc->sc_netctl | GEM_NETCTL_STARTTX);
1165 }
1166
1167 unsigned int
cad_encap(struct cad_softc * sc,struct mbuf * m)1168 cad_encap(struct cad_softc *sc, struct mbuf *m)
1169 {
1170 bus_dmamap_t map;
1171 struct cad_buf *txb;
1172 struct cad_desc32 *desc32 = (struct cad_desc32 *)sc->sc_txdesc;
1173 struct cad_desc64 *desc64 = (struct cad_desc64 *)sc->sc_txdesc;
1174 unsigned int head, idx, nsegs;
1175 uint32_t status;
1176 int i;
1177
1178 head = sc->sc_tx_prod;
1179
1180 txb = &sc->sc_txbuf[head];
1181 map = txb->bf_map;
1182
1183 switch (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT)) {
1184 case 0:
1185 break;
1186 case EFBIG:
1187 if (m_defrag(m, M_DONTWAIT) != 0)
1188 return 0;
1189 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1190 BUS_DMA_NOWAIT) != 0)
1191 return 0;
1192 break;
1193 default:
1194 return 0;
1195 }
1196
1197 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1198 BUS_DMASYNC_PREWRITE);
1199
1200 nsegs = map->dm_nsegs;
1201 KASSERT(nsegs > 0);
1202
1203 txb->bf_m = m;
1204
1205 /*
1206 * Fill descriptors in reverse order so that all the descriptors
1207 * are ready when the first descriptor's GEM_TXD_USED bit is cleared.
1208 */
1209 for (i = nsegs - 1; i >= 0; i--) {
1210 idx = (head + i) % CAD_NTXDESC;
1211
1212 status = map->dm_segs[i].ds_len & GEM_TXD_LEN_MASK;
1213 if (i == nsegs - 1)
1214 status |= GEM_TXD_LAST;
1215 if (idx == CAD_NTXDESC - 1)
1216 status |= GEM_TXD_WRAP;
1217
1218 if (sc->sc_dma64) {
1219 uint64_t addr = map->dm_segs[i].ds_addr;
1220
1221 desc64[idx].d_addrlo = addr;
1222 desc64[idx].d_addrhi = addr >> 32;
1223 } else {
1224 desc32[idx].d_addr = map->dm_segs[i].ds_addr;
1225 }
1226
1227 /* Make d_addr visible before GEM_TXD_USED is cleared
1228 * in d_status. */
1229 bus_dmamap_sync(sc->sc_dmat, sc->sc_txring->cdm_map,
1230 idx * sc->sc_descsize, sc->sc_descsize,
1231 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1232
1233 if (sc->sc_dma64)
1234 desc64[idx].d_status = status;
1235 else
1236 desc32[idx].d_status = status;
1237
1238 bus_dmamap_sync(sc->sc_dmat, sc->sc_txring->cdm_map,
1239 idx * sc->sc_descsize, sc->sc_descsize,
1240 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1241 }
1242
1243 sc->sc_tx_prod = (head + nsegs) % CAD_NTXDESC;
1244
1245 return nsegs;
1246 }
1247
1248 int
cad_intr(void * arg)1249 cad_intr(void *arg)
1250 {
1251 struct cad_softc *sc = arg;
1252 struct ifnet *ifp = &sc->sc_ac.ac_if;
1253 uint32_t isr;
1254
1255 isr = HREAD4(sc, GEM_ISR);
1256 HWRITE4(sc, GEM_ISR, isr);
1257
1258 if (isr & GEM_IXR_RXDONE)
1259 cad_rxeof(sc);
1260 if (isr & GEM_IXR_TXDONE)
1261 cad_txeof(sc);
1262
1263 if (isr & GEM_IXR_RXOVR)
1264 ifp->if_ierrors++;
1265
1266 if (sc->sc_rxhang_erratum && (isr & GEM_IXR_RXUSED)) {
1267 /*
1268 * Try to flush a packet from the Rx SRAM to avoid triggering
1269 * the Rx hang.
1270 */
1271 HWRITE4(sc, GEM_NETCTL, sc->sc_netctl | GEM_NETCTL_DPRAM);
1272 cad_rxfill(sc);
1273 }
1274
1275 /* If there has been a DMA error, stop the interface to limit damage. */
1276 if (isr & GEM_IXR_HRESP) {
1277 sc->sc_netctl &= ~(GEM_NETCTL_TXEN | GEM_NETCTL_RXEN);
1278 HWRITE4(sc, GEM_NETCTL, sc->sc_netctl);
1279 HWRITE4(sc, GEM_IDR, ~0U);
1280
1281 printf("%s: hresp error, interface stopped\n",
1282 sc->sc_dev.dv_xname);
1283 }
1284
1285 return 1;
1286 }
1287
1288 void
cad_rxeof(struct cad_softc * sc)1289 cad_rxeof(struct cad_softc *sc)
1290 {
1291 struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1292 struct ifnet *ifp = &sc->sc_ac.ac_if;
1293 struct mbuf *m;
1294 struct cad_buf *rxb;
1295 struct cad_desc32 *desc32 = (struct cad_desc32 *)sc->sc_rxdesc;
1296 struct cad_desc64 *desc64 = (struct cad_desc64 *)sc->sc_rxdesc;
1297 size_t len;
1298 unsigned int idx;
1299 uint32_t addr, status;
1300
1301 idx = sc->sc_rx_cons;
1302
1303 while (if_rxr_inuse(&sc->sc_rx_ring) > 0) {
1304 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxring->cdm_map,
1305 idx * sc->sc_descsize, sc->sc_descsize,
1306 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1307
1308 if (sc->sc_dma64)
1309 addr = desc64[idx].d_addrlo;
1310 else
1311 addr = desc32[idx].d_addr;
1312 if ((addr & GEM_RXD_ADDR_USED) == 0)
1313 break;
1314
1315 /* Prevent premature read of d_status. */
1316 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxring->cdm_map,
1317 idx * sc->sc_descsize, sc->sc_descsize,
1318 BUS_DMASYNC_POSTREAD);
1319
1320 if (sc->sc_dma64)
1321 status = desc64[idx].d_status;
1322 else
1323 status = desc32[idx].d_status;
1324 len = status & GEM_RXD_LEN_MASK;
1325
1326 rxb = &sc->sc_rxbuf[idx];
1327
1328 bus_dmamap_sync(sc->sc_dmat, rxb->bf_map, ETHER_ALIGN, len,
1329 BUS_DMASYNC_POSTREAD);
1330 bus_dmamap_unload(sc->sc_dmat, rxb->bf_map);
1331
1332 m = rxb->bf_m;
1333 rxb->bf_m = NULL;
1334 KASSERT(m != NULL);
1335
1336 if_rxr_put(&sc->sc_rx_ring, 1);
1337 idx = (idx + 1) % CAD_NRXDESC;
1338
1339 if ((status & (GEM_RXD_SOF | GEM_RXD_EOF)) !=
1340 (GEM_RXD_SOF | GEM_RXD_EOF)) {
1341 m_freem(m);
1342 ifp->if_ierrors++;
1343 continue;
1344 }
1345
1346 m_adj(m, ETHER_ALIGN);
1347 m->m_len = m->m_pkthdr.len = len;
1348
1349 m->m_pkthdr.csum_flags = 0;
1350 switch (status & GEM_RXD_CSUM_MASK) {
1351 case GEM_RXD_CSUM_IP_OK:
1352 m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
1353 break;
1354 case GEM_RXD_CSUM_TCP_OK:
1355 case GEM_RXD_CSUM_UDP_OK:
1356 m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK |
1357 M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
1358 break;
1359 }
1360
1361 ml_enqueue(&ml, m);
1362
1363 sc->sc_rxdone = 1;
1364 }
1365
1366 sc->sc_rx_cons = idx;
1367
1368 cad_rxfill(sc);
1369
1370 if (ifiq_input(&ifp->if_rcv, &ml))
1371 if_rxr_livelocked(&sc->sc_rx_ring);
1372 }
1373
1374 void
cad_rxfill(struct cad_softc * sc)1375 cad_rxfill(struct cad_softc *sc)
1376 {
1377 struct cad_buf *rxb;
1378 struct cad_desc32 *desc32 = (struct cad_desc32 *)sc->sc_rxdesc;
1379 struct cad_desc64 *desc64 = (struct cad_desc64 *)sc->sc_rxdesc;
1380 uint64_t addr;
1381 unsigned int idx;
1382 u_int slots;
1383
1384 idx = sc->sc_rx_prod;
1385
1386 for (slots = if_rxr_get(&sc->sc_rx_ring, CAD_NRXDESC);
1387 slots > 0; slots--) {
1388 rxb = &sc->sc_rxbuf[idx];
1389 rxb->bf_m = cad_alloc_mbuf(sc, rxb->bf_map);
1390 if (rxb->bf_m == NULL)
1391 break;
1392
1393 addr = rxb->bf_map->dm_segs[0].ds_addr;
1394 KASSERT((addr & (GEM_RXD_ADDR_WRAP | GEM_RXD_ADDR_USED)) == 0);
1395 if (idx == CAD_NRXDESC - 1)
1396 addr |= GEM_RXD_ADDR_WRAP;
1397
1398 if (sc->sc_dma64) {
1399 desc64[idx].d_addrhi = addr >> 32;
1400 desc64[idx].d_status = 0;
1401 } else {
1402 desc32[idx].d_status = 0;
1403 }
1404
1405 /* Make d_addrhi and d_status visible before clearing
1406 * GEM_RXD_ADDR_USED in d_addr or d_addrlo. */
1407 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxring->cdm_map,
1408 idx * sc->sc_descsize, sc->sc_descsize,
1409 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1410
1411 if (sc->sc_dma64)
1412 desc64[idx].d_addrlo = addr;
1413 else
1414 desc32[idx].d_addr = addr;
1415
1416 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxring->cdm_map,
1417 idx * sc->sc_descsize, sc->sc_descsize,
1418 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1419
1420 idx = (idx + 1) % CAD_NRXDESC;
1421 }
1422 if_rxr_put(&sc->sc_rx_ring, slots);
1423
1424 sc->sc_rx_prod = idx;
1425 }
1426
1427 void
cad_txeof(struct cad_softc * sc)1428 cad_txeof(struct cad_softc *sc)
1429 {
1430 struct ifnet *ifp = &sc->sc_ac.ac_if;
1431 struct cad_buf *txb;
1432 struct cad_desc32 *desc32 = (struct cad_desc32 *)sc->sc_txdesc;
1433 struct cad_desc64 *desc64 = (struct cad_desc64 *)sc->sc_txdesc;
1434 unsigned int free = 0;
1435 unsigned int idx, nsegs;
1436 uint32_t status;
1437
1438 idx = sc->sc_tx_cons;
1439
1440 while (idx != sc->sc_tx_prod) {
1441 bus_dmamap_sync(sc->sc_dmat, sc->sc_txring->cdm_map,
1442 idx * sc->sc_descsize, sc->sc_descsize,
1443 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1444
1445 if (sc->sc_dma64)
1446 status = desc64[idx].d_status;
1447 else
1448 status = desc32[idx].d_status;
1449 if ((status & GEM_TXD_USED) == 0)
1450 break;
1451
1452 if (status & (GEM_TXD_RLIMIT | GEM_TXD_CORRUPT |
1453 GEM_TXD_LCOLL | GEM_TXD_CSUMERR_MASK))
1454 ifp->if_oerrors++;
1455
1456 txb = &sc->sc_txbuf[idx];
1457 nsegs = txb->bf_map->dm_nsegs;
1458 KASSERT(nsegs > 0);
1459
1460 bus_dmamap_sync(sc->sc_dmat, txb->bf_map, 0,
1461 txb->bf_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1462 bus_dmamap_unload(sc->sc_dmat, txb->bf_map);
1463
1464 m_freem(txb->bf_m);
1465 txb->bf_m = NULL;
1466
1467 for (;;) {
1468 idx = (idx + 1) % CAD_NTXDESC;
1469
1470 nsegs--;
1471 if (nsegs == 0)
1472 break;
1473
1474 /*
1475 * The controller marks only the initial segment used.
1476 * Mark the remaining segments used manually, so that
1477 * the controller will not accidentally use them later.
1478 *
1479 * This could be done lazily on the Tx ring producer
1480 * side by ensuring that the subsequent descriptor
1481 * after the actual segments is marked used.
1482 * However, this would make the ring trickier to debug.
1483 */
1484
1485 bus_dmamap_sync(sc->sc_dmat, sc->sc_txring->cdm_map,
1486 idx * sc->sc_descsize, sc->sc_descsize,
1487 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1488
1489 if (sc->sc_dma64)
1490 desc64[idx].d_status |= GEM_TXD_USED;
1491 else
1492 desc32[idx].d_status |= GEM_TXD_USED;
1493
1494 bus_dmamap_sync(sc->sc_dmat, sc->sc_txring->cdm_map,
1495 idx * sc->sc_descsize, sc->sc_descsize,
1496 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1497 }
1498
1499 free++;
1500 }
1501
1502 if (free == 0)
1503 return;
1504
1505 sc->sc_tx_cons = idx;
1506
1507 if (ifq_is_oactive(&ifp->if_snd))
1508 ifq_restart(&ifp->if_snd);
1509 }
1510
1511 void
cad_tick(void * arg)1512 cad_tick(void *arg)
1513 {
1514 struct cad_softc *sc = arg;
1515 struct ifnet *ifp = &sc->sc_ac.ac_if;
1516 int s;
1517
1518 if ((ifp->if_flags & IFF_RUNNING) == 0)
1519 return;
1520
1521 s = splnet();
1522
1523 mii_tick(&sc->sc_mii);
1524
1525 /*
1526 * If there has been no Rx for a moment, Rx DMA might be stuck.
1527 * Try to recover by restarting the receiver.
1528 */
1529 if (sc->sc_rxhang_erratum && !sc->sc_rxdone) {
1530 HWRITE4(sc, GEM_NETCTL, sc->sc_netctl & ~GEM_NETCTL_RXEN);
1531 (void)HREAD4(sc, GEM_NETCTL);
1532 HWRITE4(sc, GEM_NETCTL, sc->sc_netctl);
1533 }
1534 sc->sc_rxdone = 0;
1535
1536 splx(s);
1537
1538 timeout_add_sec(&sc->sc_tick, 1);
1539 }
1540
1541 int
cad_media_change(struct ifnet * ifp)1542 cad_media_change(struct ifnet *ifp)
1543 {
1544 struct cad_softc *sc = ifp->if_softc;
1545
1546 if (!LIST_EMPTY(&sc->sc_mii.mii_phys))
1547 mii_mediachg(&sc->sc_mii);
1548
1549 return 0;
1550 }
1551
1552 void
cad_media_status(struct ifnet * ifp,struct ifmediareq * imr)1553 cad_media_status(struct ifnet *ifp, struct ifmediareq *imr)
1554 {
1555 struct cad_softc *sc = ifp->if_softc;
1556
1557 if (!LIST_EMPTY(&sc->sc_mii.mii_phys)) {
1558 mii_pollstat(&sc->sc_mii);
1559 imr->ifm_active = sc->sc_mii.mii_media_active;
1560 imr->ifm_status = sc->sc_mii.mii_media_status;
1561 }
1562 }
1563
1564 int
cad_mii_wait(struct cad_softc * sc)1565 cad_mii_wait(struct cad_softc *sc)
1566 {
1567 int timeout;
1568
1569 for (timeout = 10000; timeout > 0; timeout--) {
1570 if (HREAD4(sc, GEM_NETSR) & GEM_NETSR_PHY_MGMT_IDLE)
1571 break;
1572 delay(10);
1573 }
1574 if (timeout == 0)
1575 return ETIMEDOUT;
1576 return 0;
1577 }
1578
1579 void
cad_mii_oper(struct cad_softc * sc,int phy_no,int reg,uint32_t oper)1580 cad_mii_oper(struct cad_softc *sc, int phy_no, int reg, uint32_t oper)
1581 {
1582 oper |= (phy_no << GEM_PHYMNTNC_ADDR_SHIFT) & GEM_PHYMNTNC_ADDR_MASK;
1583 oper |= (reg << GEM_PHYMNTNC_REG_SHIFT) & GEM_PHYMNTNC_REG_MASK;
1584 oper |= GEM_PHYMNTNC_CLAUSE_22 | GEM_PHYMNTNC_MUST_10;
1585
1586 if (cad_mii_wait(sc) != 0) {
1587 printf("%s: MII bus idle timeout\n", sc->sc_dev.dv_xname);
1588 return;
1589 }
1590
1591 HWRITE4(sc, GEM_PHYMNTNC, oper);
1592
1593 if (cad_mii_wait(sc) != 0) {
1594 printf("%s: MII bus operation timeout\n", sc->sc_dev.dv_xname);
1595 return;
1596 }
1597 }
1598
1599 int
cad_mii_readreg(struct device * self,int phy_no,int reg)1600 cad_mii_readreg(struct device *self, int phy_no, int reg)
1601 {
1602 struct cad_softc *sc = (struct cad_softc *)self;
1603 int val;
1604
1605 cad_mii_oper(sc, phy_no, reg, GEM_PHYMNTNC_OP_READ);
1606
1607 val = HREAD4(sc, GEM_PHYMNTNC) & GEM_PHYMNTNC_DATA_MASK;
1608
1609 /* The MAC does not handle 1000baseT in half duplex mode. */
1610 if (reg == MII_EXTSR)
1611 val &= ~EXTSR_1000THDX;
1612
1613 return val;
1614 }
1615
1616 void
cad_mii_writereg(struct device * self,int phy_no,int reg,int val)1617 cad_mii_writereg(struct device *self, int phy_no, int reg, int val)
1618 {
1619 struct cad_softc *sc = (struct cad_softc *)self;
1620
1621 cad_mii_oper(sc, phy_no, reg, GEM_PHYMNTNC_OP_WRITE |
1622 (val & GEM_PHYMNTNC_DATA_MASK));
1623 }
1624
1625 void
cad_mii_statchg(struct device * self)1626 cad_mii_statchg(struct device *self)
1627 {
1628 struct cad_softc *sc = (struct cad_softc *)self;
1629 uint32_t netcfg;
1630
1631 netcfg = HREAD4(sc, GEM_NETCFG);
1632 if (sc->sc_mii.mii_media_active & IFM_FDX)
1633 netcfg |= GEM_NETCFG_FDEN;
1634 else
1635 netcfg &= ~GEM_NETCFG_FDEN;
1636
1637 netcfg &= ~(GEM_NETCFG_100 | GEM_NETCFG_1000);
1638 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
1639 default:
1640 sc->sc_tx_freq = 2500000;
1641 break;
1642 case IFM_100_TX:
1643 netcfg |= GEM_NETCFG_100;
1644 sc->sc_tx_freq = 25000000;
1645 break;
1646 case IFM_1000_T:
1647 netcfg |= GEM_NETCFG_100 | GEM_NETCFG_1000;
1648 sc->sc_tx_freq = 125000000;
1649 break;
1650 }
1651
1652 HWRITE4(sc, GEM_NETCFG, netcfg);
1653
1654 /* Defer clock setting because it allocates memory with M_WAITOK. */
1655 task_add(systq, &sc->sc_statchg_task);
1656 }
1657
1658 void
cad_statchg_task(void * arg)1659 cad_statchg_task(void *arg)
1660 {
1661 struct cad_softc *sc = arg;
1662
1663 clock_set_frequency(sc->sc_node, GEM_CLK_TX, sc->sc_tx_freq);
1664 }
1665
1666 struct cad_dmamem *
cad_dmamem_alloc(struct cad_softc * sc,bus_size_t size,bus_size_t align)1667 cad_dmamem_alloc(struct cad_softc *sc, bus_size_t size, bus_size_t align)
1668 {
1669 struct cad_dmamem *cdm;
1670 bus_size_t boundary = 0;
1671 int flags = BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW;
1672 int nsegs;
1673
1674 cdm = malloc(sizeof(*cdm), M_DEVBUF, M_WAITOK | M_ZERO);
1675 cdm->cdm_size = size;
1676
1677 if (sc->sc_dma64) {
1678 /*
1679 * The segment contains an actual ring and possibly
1680 * a dummy ring for unused priority queues.
1681 * The segment must not cross a 32-bit boundary so that
1682 * the rings have the same base address bits 63:32.
1683 */
1684 boundary = 1ULL << 32;
1685 flags |= BUS_DMA_64BIT;
1686 }
1687
1688 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, boundary,
1689 flags, &cdm->cdm_map) != 0)
1690 goto cdmfree;
1691 if (bus_dmamem_alloc(sc->sc_dmat, size, align, boundary,
1692 &cdm->cdm_seg, 1, &nsegs, BUS_DMA_WAITOK) != 0)
1693 goto destroy;
1694 if (bus_dmamem_map(sc->sc_dmat, &cdm->cdm_seg, nsegs, size,
1695 &cdm->cdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
1696 goto free;
1697 if (bus_dmamap_load(sc->sc_dmat, cdm->cdm_map, cdm->cdm_kva, size,
1698 NULL, BUS_DMA_WAITOK) != 0)
1699 goto unmap;
1700 memset(cdm->cdm_kva, 0, size);
1701 return cdm;
1702
1703 unmap:
1704 bus_dmamem_unmap(sc->sc_dmat, cdm->cdm_kva, size);
1705 free:
1706 bus_dmamem_free(sc->sc_dmat, &cdm->cdm_seg, 1);
1707 destroy:
1708 bus_dmamap_destroy(sc->sc_dmat, cdm->cdm_map);
1709 cdmfree:
1710 free(cdm, M_DEVBUF, sizeof(*cdm));
1711 return NULL;
1712 }
1713
1714 void
cad_dmamem_free(struct cad_softc * sc,struct cad_dmamem * cdm)1715 cad_dmamem_free(struct cad_softc *sc, struct cad_dmamem *cdm)
1716 {
1717 bus_dmamem_unmap(sc->sc_dmat, cdm->cdm_kva, cdm->cdm_size);
1718 bus_dmamem_free(sc->sc_dmat, &cdm->cdm_seg, 1);
1719 bus_dmamap_destroy(sc->sc_dmat, cdm->cdm_map);
1720 free(cdm, M_DEVBUF, sizeof(*cdm));
1721 }
1722
1723 struct mbuf *
cad_alloc_mbuf(struct cad_softc * sc,bus_dmamap_t map)1724 cad_alloc_mbuf(struct cad_softc *sc, bus_dmamap_t map)
1725 {
1726 struct mbuf *m;
1727
1728 m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
1729 if (m == NULL)
1730 return NULL;
1731 m->m_len = m->m_pkthdr.len = MCLBYTES;
1732
1733 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
1734 m_freem(m);
1735 return NULL;
1736 }
1737
1738 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1739 BUS_DMASYNC_PREREAD);
1740
1741 return m;
1742 }
1743
1744 #if NKSTAT > 0
1745 enum cad_stat {
1746 cad_stat_tx_toto,
1747 cad_stat_tx_totp,
1748 cad_stat_tx_bcast,
1749 cad_stat_tx_mcast,
1750 cad_stat_tx_pause,
1751 cad_stat_tx_h64,
1752 cad_stat_tx_h65,
1753 cad_stat_tx_h128,
1754 cad_stat_tx_h256,
1755 cad_stat_tx_h512,
1756 cad_stat_tx_h1024,
1757 cad_stat_tx_underrun,
1758 cad_stat_tx_scoll,
1759 cad_stat_tx_mcoll,
1760 cad_stat_tx_ecoll,
1761 cad_stat_tx_lcoll,
1762 cad_stat_tx_defer,
1763 cad_stat_tx_sense,
1764 cad_stat_rx_toto,
1765 cad_stat_rx_totp,
1766 cad_stat_rx_bcast,
1767 cad_stat_rx_mcast,
1768 cad_stat_rx_pause,
1769 cad_stat_rx_h64,
1770 cad_stat_rx_h65,
1771 cad_stat_rx_h128,
1772 cad_stat_rx_h256,
1773 cad_stat_rx_h512,
1774 cad_stat_rx_h1024,
1775 cad_stat_rx_undersz,
1776 cad_stat_rx_oversz,
1777 cad_stat_rx_jabber,
1778 cad_stat_rx_fcs,
1779 cad_stat_rx_symberr,
1780 cad_stat_rx_align,
1781 cad_stat_rx_reserr,
1782 cad_stat_rx_overrun,
1783 cad_stat_rx_ipcsum,
1784 cad_stat_rx_tcpcsum,
1785 cad_stat_rx_udpcsum,
1786 cad_stat_count
1787 };
1788
1789 struct cad_counter {
1790 const char *c_name;
1791 enum kstat_kv_unit c_unit;
1792 uint32_t c_reg;
1793 };
1794
1795 const struct cad_counter cad_counters[cad_stat_count] = {
1796 [cad_stat_tx_toto] =
1797 { "tx total", KSTAT_KV_U_BYTES, 0 },
1798 [cad_stat_tx_totp] =
1799 { "tx total", KSTAT_KV_U_PACKETS, GEM_TXCNT },
1800 [cad_stat_tx_bcast] =
1801 { "tx bcast", KSTAT_KV_U_PACKETS, GEM_TXBCCNT },
1802 [cad_stat_tx_mcast] =
1803 { "tx mcast", KSTAT_KV_U_PACKETS, GEM_TXMCCNT },
1804 [cad_stat_tx_pause] =
1805 { "tx pause", KSTAT_KV_U_PACKETS, GEM_TXPAUSECNT },
1806 [cad_stat_tx_h64] =
1807 { "tx 64B", KSTAT_KV_U_PACKETS, GEM_TX64CNT },
1808 [cad_stat_tx_h65] =
1809 { "tx 65-127B", KSTAT_KV_U_PACKETS, GEM_TX65CNT },
1810 [cad_stat_tx_h128] =
1811 { "tx 128-255B", KSTAT_KV_U_PACKETS, GEM_TX128CNT },
1812 [cad_stat_tx_h256] =
1813 { "tx 256-511B", KSTAT_KV_U_PACKETS, GEM_TX256CNT },
1814 [cad_stat_tx_h512] =
1815 { "tx 512-1023B", KSTAT_KV_U_PACKETS, GEM_TX512CNT },
1816 [cad_stat_tx_h1024] =
1817 { "tx 1024-1518B", KSTAT_KV_U_PACKETS, GEM_TX1024CNT },
1818 [cad_stat_tx_underrun] =
1819 { "tx underrun", KSTAT_KV_U_PACKETS, GEM_TXURUNCNT },
1820 [cad_stat_tx_scoll] =
1821 { "tx scoll", KSTAT_KV_U_PACKETS, GEM_SNGLCOLLCNT },
1822 [cad_stat_tx_mcoll] =
1823 { "tx mcoll", KSTAT_KV_U_PACKETS, GEM_MULTICOLLCNT },
1824 [cad_stat_tx_ecoll] =
1825 { "tx excess coll", KSTAT_KV_U_PACKETS, GEM_EXCESSCOLLCNT },
1826 [cad_stat_tx_lcoll] =
1827 { "tx late coll", KSTAT_KV_U_PACKETS, GEM_LATECOLLCNT },
1828 [cad_stat_tx_defer] =
1829 { "tx defer", KSTAT_KV_U_PACKETS, GEM_TXDEFERCNT },
1830 [cad_stat_tx_sense] =
1831 { "tx csense", KSTAT_KV_U_PACKETS, GEM_TXCSENSECNT },
1832 [cad_stat_rx_toto] =
1833 { "rx total", KSTAT_KV_U_BYTES, 0 },
1834 [cad_stat_rx_totp] =
1835 { "rx total", KSTAT_KV_U_PACKETS, GEM_RXCNT },
1836 [cad_stat_rx_bcast] =
1837 { "rx bcast", KSTAT_KV_U_PACKETS, GEM_RXBROADCNT },
1838 [cad_stat_rx_mcast] =
1839 { "rx mcast", KSTAT_KV_U_PACKETS, GEM_RXMULTICNT },
1840 [cad_stat_rx_pause] =
1841 { "rx pause", KSTAT_KV_U_PACKETS, GEM_RXPAUSECNT },
1842 [cad_stat_rx_h64] =
1843 { "rx 64B", KSTAT_KV_U_PACKETS, GEM_RX64CNT },
1844 [cad_stat_rx_h65] =
1845 { "rx 65-127B", KSTAT_KV_U_PACKETS, GEM_RX65CNT },
1846 [cad_stat_rx_h128] =
1847 { "rx 128-255B", KSTAT_KV_U_PACKETS, GEM_RX128CNT },
1848 [cad_stat_rx_h256] =
1849 { "rx 256-511B", KSTAT_KV_U_PACKETS, GEM_RX256CNT },
1850 [cad_stat_rx_h512] =
1851 { "rx 512-1023B", KSTAT_KV_U_PACKETS, GEM_RX512CNT },
1852 [cad_stat_rx_h1024] =
1853 { "rx 1024-1518B", KSTAT_KV_U_PACKETS, GEM_RX1024CNT },
1854 [cad_stat_rx_undersz] =
1855 { "rx undersz", KSTAT_KV_U_PACKETS, GEM_RXUNDRCNT },
1856 [cad_stat_rx_oversz] =
1857 { "rx oversz", KSTAT_KV_U_PACKETS, GEM_RXOVRCNT },
1858 [cad_stat_rx_jabber] =
1859 { "rx jabber", KSTAT_KV_U_PACKETS, GEM_RXJABCNT },
1860 [cad_stat_rx_fcs] =
1861 { "rx fcs", KSTAT_KV_U_PACKETS, GEM_RXFCSCNT },
1862 [cad_stat_rx_symberr] =
1863 { "rx symberr", KSTAT_KV_U_PACKETS, GEM_RXSYMBCNT },
1864 [cad_stat_rx_align] =
1865 { "rx align", KSTAT_KV_U_PACKETS, GEM_RXALIGNCNT },
1866 [cad_stat_rx_reserr] =
1867 { "rx reserr", KSTAT_KV_U_PACKETS, GEM_RXRESERRCNT },
1868 [cad_stat_rx_overrun] =
1869 { "rx overrun", KSTAT_KV_U_PACKETS, GEM_RXORCNT },
1870 [cad_stat_rx_ipcsum] =
1871 { "rx ip csum", KSTAT_KV_U_PACKETS, GEM_RXIPCCNT },
1872 [cad_stat_rx_tcpcsum] =
1873 { "rx tcp csum", KSTAT_KV_U_PACKETS, GEM_RXTCPCCNT },
1874 [cad_stat_rx_udpcsum] =
1875 { "rx udp csum", KSTAT_KV_U_PACKETS, GEM_RXUDPCCNT },
1876 };
1877
1878 void
cad_kstat_attach(struct cad_softc * sc)1879 cad_kstat_attach(struct cad_softc *sc)
1880 {
1881 const struct cad_counter *c;
1882 struct kstat *ks;
1883 struct kstat_kv *kvs;
1884 int i;
1885
1886 mtx_init(&sc->sc_kstat_mtx, IPL_SOFTCLOCK);
1887
1888 ks = kstat_create(sc->sc_dev.dv_xname, 0, "cad-stats", 0,
1889 KSTAT_T_KV, 0);
1890 if (ks == NULL)
1891 return;
1892
1893 kvs = mallocarray(nitems(cad_counters), sizeof(*kvs),
1894 M_DEVBUF, M_WAITOK | M_ZERO);
1895 for (i = 0; i < nitems(cad_counters); i++) {
1896 c = &cad_counters[i];
1897 kstat_kv_unit_init(&kvs[i], c->c_name, KSTAT_KV_T_COUNTER64,
1898 c->c_unit);
1899 }
1900
1901 kstat_set_mutex(ks, &sc->sc_kstat_mtx);
1902 ks->ks_softc = sc;
1903 ks->ks_data = kvs;
1904 ks->ks_datalen = nitems(cad_counters) * sizeof(*kvs);
1905 ks->ks_read = cad_kstat_read;
1906
1907 sc->sc_kstat = ks;
1908 kstat_install(ks);
1909 }
1910
1911 int
cad_kstat_read(struct kstat * ks)1912 cad_kstat_read(struct kstat *ks)
1913 {
1914 const struct cad_counter *c;
1915 struct kstat_kv *kvs = ks->ks_data;
1916 struct cad_softc *sc = ks->ks_softc;
1917 uint64_t v64;
1918 int i;
1919
1920 v64 = HREAD4(sc, GEM_OCTTXL);
1921 v64 |= (uint64_t)HREAD4(sc, GEM_OCTTXH) << 32;
1922 kstat_kv_u64(&kvs[cad_stat_tx_toto]) += v64;
1923
1924 v64 = HREAD4(sc, GEM_OCTRXL);
1925 v64 |= (uint64_t)HREAD4(sc, GEM_OCTRXH) << 32;
1926 kstat_kv_u64(&kvs[cad_stat_rx_toto]) += v64;
1927
1928 for (i = 0; i < nitems(cad_counters); i++) {
1929 c = &cad_counters[i];
1930 if (c->c_reg == 0)
1931 continue;
1932 kstat_kv_u64(&kvs[i]) += HREAD4(sc, c->c_reg);
1933 }
1934
1935 getnanouptime(&ks->ks_updated);
1936
1937 return 0;
1938 }
1939
1940 void
cad_kstat_tick(void * arg)1941 cad_kstat_tick(void *arg)
1942 {
1943 struct cad_softc *sc = arg;
1944
1945 if (mtx_enter_try(&sc->sc_kstat_mtx)) {
1946 cad_kstat_read(sc->sc_kstat);
1947 mtx_leave(&sc->sc_kstat_mtx);
1948 }
1949 }
1950 #endif /* NKSTAT > 0 */
1951
1952 #ifdef DDB
1953 void
cad_dump(struct cad_softc * sc)1954 cad_dump(struct cad_softc *sc)
1955 {
1956 struct cad_buf *rxb, *txb;
1957 struct cad_desc32 *desc32;
1958 struct cad_desc64 *desc64;
1959 int i;
1960
1961 printf("isr 0x%x txsr 0x%x rxsr 0x%x\n", HREAD4(sc, GEM_ISR),
1962 HREAD4(sc, GEM_TXSR), HREAD4(sc, GEM_RXSR));
1963
1964 if (sc->sc_dma64) {
1965 printf("tx q 0x%08x%08x\n",
1966 HREAD4(sc, GEM_TXQBASEHI),
1967 HREAD4(sc, GEM_TXQBASE));
1968 } else {
1969 printf("tx q 0x%08x\n",
1970 HREAD4(sc, GEM_TXQBASE));
1971 }
1972 desc32 = (struct cad_desc32 *)sc->sc_txdesc;
1973 desc64 = (struct cad_desc64 *)sc->sc_txdesc;
1974 if (sc->sc_txbuf != NULL) {
1975 for (i = 0; i < CAD_NTXDESC; i++) {
1976 txb = &sc->sc_txbuf[i];
1977 if (sc->sc_dma64) {
1978 printf(" %3i %p 0x%08x%08x 0x%08x %s%s "
1979 "m %p\n", i,
1980 &desc64[i],
1981 desc64[i].d_addrhi, desc64[i].d_addrlo,
1982 desc64[i].d_status,
1983 sc->sc_tx_cons == i ? ">" : " ",
1984 sc->sc_tx_prod == i ? "<" : " ",
1985 txb->bf_m);
1986 } else {
1987 printf(" %3i %p 0x%08x 0x%08x %s%s m %p\n", i,
1988 &desc32[i],
1989 desc32[i].d_addr,
1990 desc32[i].d_status,
1991 sc->sc_tx_cons == i ? ">" : " ",
1992 sc->sc_tx_prod == i ? "<" : " ",
1993 txb->bf_m);
1994 }
1995 }
1996 }
1997 for (i = 1; i < GEM_MAX_PRIQ; i++) {
1998 if (sc->sc_qmask & (1U << i)) {
1999 printf("tx q%d 0x%08x\n", i,
2000 HREAD4(sc, GEM_TXQ1BASE(i - 1)));
2001 }
2002 }
2003
2004 if (sc->sc_dma64) {
2005 printf("rx q 0x%08x%08x\n",
2006 HREAD4(sc, GEM_RXQBASEHI),
2007 HREAD4(sc, GEM_RXQBASE));
2008 } else {
2009 printf("rx q 0x%08x\n",
2010 HREAD4(sc, GEM_RXQBASE));
2011 }
2012 desc32 = (struct cad_desc32 *)sc->sc_rxdesc;
2013 desc64 = (struct cad_desc64 *)sc->sc_rxdesc;
2014 if (sc->sc_rxbuf != NULL) {
2015 for (i = 0; i < CAD_NRXDESC; i++) {
2016 rxb = &sc->sc_rxbuf[i];
2017 if (sc->sc_dma64) {
2018 printf(" %3i %p 0x%08x%08x 0x%08x %s%s "
2019 "m %p\n", i,
2020 &desc64[i],
2021 desc64[i].d_addrhi, desc64[i].d_addrlo,
2022 desc64[i].d_status,
2023 sc->sc_rx_cons == i ? ">" : " ",
2024 sc->sc_rx_prod == i ? "<" : " ",
2025 rxb->bf_m);
2026 } else {
2027 printf(" %3i %p 0x%08x 0x%08x %s%s m %p\n", i,
2028 &desc32[i],
2029 desc32[i].d_addr,
2030 desc32[i].d_status,
2031 sc->sc_rx_cons == i ? ">" : " ",
2032 sc->sc_rx_prod == i ? "<" : " ",
2033 rxb->bf_m);
2034 }
2035 }
2036 }
2037 for (i = 1; i < GEM_MAX_PRIQ; i++) {
2038 if (sc->sc_qmask & (1U << i)) {
2039 printf("rx q%d 0x%08x\n", i,
2040 HREAD4(sc, (i < 8) ? GEM_RXQ1BASE(i - 1)
2041 : GEM_RXQ8BASE(i - 8)));
2042 }
2043 }
2044 }
2045 #endif
2046