1 /* $OpenBSD: if_cnmac.c,v 1.86 2024/05/20 23:13:33 jsg Exp $ */
2
3 /*
4 * Copyright (c) 2007 Internet Initiative Japan, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28 #include "bpfilter.h"
29
30 /*
31 * XXXSEIL
32 * If no free send buffer is available, free all the sent buffer and bail out.
33 */
34 #define OCTEON_ETH_SEND_QUEUE_CHECK
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/pool.h>
39 #include <sys/proc.h>
40 #include <sys/mbuf.h>
41 #include <sys/malloc.h>
42 #include <sys/kernel.h>
43 #include <sys/socket.h>
44 #include <sys/ioctl.h>
45 #include <sys/errno.h>
46 #include <sys/device.h>
47 #include <sys/queue.h>
48 #include <sys/conf.h>
49 #include <sys/stdint.h> /* uintptr_t */
50 #include <sys/syslog.h>
51 #include <sys/endian.h>
52 #include <sys/atomic.h>
53
54 #include <net/if.h>
55 #include <net/if_media.h>
56 #include <netinet/in.h>
57 #include <netinet/if_ether.h>
58
59 #if NBPFILTER > 0
60 #include <net/bpf.h>
61 #endif
62
63 #include <machine/bus.h>
64 #include <machine/intr.h>
65 #include <machine/octeonvar.h>
66 #include <machine/octeon_model.h>
67
68 #include <dev/mii/mii.h>
69 #include <dev/mii/miivar.h>
70
71 #include <octeon/dev/cn30xxciureg.h>
72 #include <octeon/dev/cn30xxnpireg.h>
73 #include <octeon/dev/cn30xxgmxreg.h>
74 #include <octeon/dev/cn30xxipdreg.h>
75 #include <octeon/dev/cn30xxpipreg.h>
76 #include <octeon/dev/cn30xxpowreg.h>
77 #include <octeon/dev/cn30xxfaureg.h>
78 #include <octeon/dev/cn30xxfpareg.h>
79 #include <octeon/dev/cn30xxbootbusreg.h>
80 #include <octeon/dev/cn30xxfpavar.h>
81 #include <octeon/dev/cn30xxgmxvar.h>
82 #include <octeon/dev/cn30xxfauvar.h>
83 #include <octeon/dev/cn30xxpowvar.h>
84 #include <octeon/dev/cn30xxipdvar.h>
85 #include <octeon/dev/cn30xxpipvar.h>
86 #include <octeon/dev/cn30xxpkovar.h>
87 #include <octeon/dev/cn30xxsmivar.h>
88 #include <octeon/dev/iobusvar.h>
89 #include <octeon/dev/if_cnmacvar.h>
90
91 #ifdef OCTEON_ETH_DEBUG
92 #define OCTEON_ETH_KASSERT(x) KASSERT(x)
93 #define OCTEON_ETH_KDASSERT(x) KDASSERT(x)
94 #else
95 #define OCTEON_ETH_KASSERT(x)
96 #define OCTEON_ETH_KDASSERT(x)
97 #endif
98
99 /*
100 * Set the PKO to think command buffers are an odd length. This makes it so we
101 * never have to divide a command across two buffers.
102 */
103 #define OCTEON_POOL_NWORDS_CMD \
104 (((uint32_t)OCTEON_POOL_SIZE_CMD / sizeof(uint64_t)) - 1)
105 #define FPA_COMMAND_BUFFER_POOL_NWORDS OCTEON_POOL_NWORDS_CMD /* XXX */
106
107 CTASSERT(MCLBYTES >= OCTEON_POOL_SIZE_PKT + CACHELINESIZE);
108
109 void cnmac_buf_init(struct cnmac_softc *);
110
111 int cnmac_match(struct device *, void *, void *);
112 void cnmac_attach(struct device *, struct device *, void *);
113 void cnmac_pip_init(struct cnmac_softc *);
114 void cnmac_ipd_init(struct cnmac_softc *);
115 void cnmac_pko_init(struct cnmac_softc *);
116
117 void cnmac_board_mac_addr(uint8_t *);
118
119 int cnmac_mii_readreg(struct device *, int, int);
120 void cnmac_mii_writereg(struct device *, int, int, int);
121 void cnmac_mii_statchg(struct device *);
122
123 int cnmac_mediainit(struct cnmac_softc *);
124 void cnmac_mediastatus(struct ifnet *, struct ifmediareq *);
125 int cnmac_mediachange(struct ifnet *);
126
127 void cnmac_send_queue_flush_prefetch(struct cnmac_softc *);
128 void cnmac_send_queue_flush_fetch(struct cnmac_softc *);
129 void cnmac_send_queue_flush(struct cnmac_softc *);
130 int cnmac_send_queue_is_full(struct cnmac_softc *);
131 void cnmac_send_queue_add(struct cnmac_softc *,
132 struct mbuf *, uint64_t *);
133 void cnmac_send_queue_del(struct cnmac_softc *,
134 struct mbuf **, uint64_t **);
135 int cnmac_buf_free_work(struct cnmac_softc *, uint64_t *);
136
137 int cnmac_ioctl(struct ifnet *, u_long, caddr_t);
138 void cnmac_watchdog(struct ifnet *);
139 int cnmac_init(struct ifnet *);
140 int cnmac_stop(struct ifnet *, int);
141 void cnmac_start(struct ifqueue *);
142
143 int cnmac_send_cmd(struct cnmac_softc *, uint64_t, uint64_t);
144 uint64_t cnmac_send_makecmd_w1(int, paddr_t);
145 uint64_t cnmac_send_makecmd_w0(uint64_t, uint64_t, size_t, int, int);
146 int cnmac_send_makecmd_gbuf(struct cnmac_softc *,
147 struct mbuf *, uint64_t *, int *);
148 int cnmac_send_makecmd(struct cnmac_softc *,
149 struct mbuf *, uint64_t *, uint64_t *, uint64_t *);
150 int cnmac_send_buf(struct cnmac_softc *,
151 struct mbuf *, uint64_t *);
152 int cnmac_send(struct cnmac_softc *, struct mbuf *);
153
154 int cnmac_reset(struct cnmac_softc *);
155 int cnmac_configure(struct cnmac_softc *);
156 int cnmac_configure_common(struct cnmac_softc *);
157
158 void cnmac_free_task(void *);
159 void cnmac_tick_free(void *arg);
160 void cnmac_tick_misc(void *);
161
162 int cnmac_recv_mbuf(struct cnmac_softc *,
163 uint64_t *, struct mbuf **, int *);
164 int cnmac_recv_check(struct cnmac_softc *, uint64_t);
165 int cnmac_recv(struct cnmac_softc *, uint64_t *, struct mbuf_list *);
166 int cnmac_intr(void *);
167
168 int cnmac_mbuf_alloc(int);
169
170 #if NKSTAT > 0
171 void cnmac_kstat_attach(struct cnmac_softc *);
172 int cnmac_kstat_read(struct kstat *);
173 void cnmac_kstat_tick(struct cnmac_softc *);
174 #endif
175
176 /* device parameters */
177 int cnmac_param_pko_cmd_w0_n2 = 1;
178
179 const struct cfattach cnmac_ca = {
180 sizeof(struct cnmac_softc), cnmac_match, cnmac_attach
181 };
182
183 struct cfdriver cnmac_cd = { NULL, "cnmac", DV_IFNET };
184
185 /* ---- buffer management */
186
187 const struct cnmac_pool_param {
188 int poolno;
189 size_t size;
190 size_t nelems;
191 } cnmac_pool_params[] = {
192 #define _ENTRY(x) { OCTEON_POOL_NO_##x, OCTEON_POOL_SIZE_##x, OCTEON_POOL_NELEMS_##x }
193 _ENTRY(WQE),
194 _ENTRY(CMD),
195 _ENTRY(SG)
196 #undef _ENTRY
197 };
198 struct cn30xxfpa_buf *cnmac_pools[8];
199 #define cnmac_fb_wqe cnmac_pools[OCTEON_POOL_NO_WQE]
200 #define cnmac_fb_cmd cnmac_pools[OCTEON_POOL_NO_CMD]
201 #define cnmac_fb_sg cnmac_pools[OCTEON_POOL_NO_SG]
202
203 uint64_t cnmac_mac_addr = 0;
204 uint32_t cnmac_mac_addr_offset = 0;
205
206 int cnmac_mbufs_to_alloc;
207 int cnmac_npowgroups = 0;
208
209 void
cnmac_buf_init(struct cnmac_softc * sc)210 cnmac_buf_init(struct cnmac_softc *sc)
211 {
212 static int once;
213 int i;
214 const struct cnmac_pool_param *pp;
215 struct cn30xxfpa_buf *fb;
216
217 if (once == 1)
218 return;
219 once = 1;
220
221 for (i = 0; i < (int)nitems(cnmac_pool_params); i++) {
222 pp = &cnmac_pool_params[i];
223 cn30xxfpa_buf_init(pp->poolno, pp->size, pp->nelems, &fb);
224 cnmac_pools[pp->poolno] = fb;
225 }
226 }
227
228 /* ---- autoconf */
229
230 int
cnmac_match(struct device * parent,void * match,void * aux)231 cnmac_match(struct device *parent, void *match, void *aux)
232 {
233 struct cfdata *cf = (struct cfdata *)match;
234 struct cn30xxgmx_attach_args *ga = aux;
235
236 if (strcmp(cf->cf_driver->cd_name, ga->ga_name) != 0) {
237 return 0;
238 }
239 return 1;
240 }
241
242 void
cnmac_attach(struct device * parent,struct device * self,void * aux)243 cnmac_attach(struct device *parent, struct device *self, void *aux)
244 {
245 struct cnmac_softc *sc = (void *)self;
246 struct cn30xxgmx_attach_args *ga = aux;
247 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
248
249 if (cnmac_npowgroups >= OCTEON_POW_GROUP_MAX) {
250 printf(": out of POW groups\n");
251 return;
252 }
253
254 atomic_add_int(&cnmac_mbufs_to_alloc,
255 cnmac_mbuf_alloc(CNMAC_MBUFS_PER_PORT));
256
257 sc->sc_regt = ga->ga_regt;
258 sc->sc_dmat = ga->ga_dmat;
259 sc->sc_port = ga->ga_portno;
260 sc->sc_port_type = ga->ga_port_type;
261 sc->sc_gmx = ga->ga_gmx;
262 sc->sc_gmx_port = ga->ga_gmx_port;
263 sc->sc_smi = ga->ga_smi;
264 sc->sc_phy_addr = ga->ga_phy_addr;
265 sc->sc_powgroup = cnmac_npowgroups++;
266
267 sc->sc_init_flag = 0;
268
269 /*
270 * XXX
271 * Setting PIP_IP_OFFSET[OFFSET] to 8 causes panic ... why???
272 */
273 sc->sc_ip_offset = 0/* XXX */;
274
275 cnmac_board_mac_addr(sc->sc_arpcom.ac_enaddr);
276 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
277
278 ml_init(&sc->sc_sendq);
279 sc->sc_soft_req_thresh = 15/* XXX */;
280 sc->sc_ext_callback_cnt = 0;
281
282 task_set(&sc->sc_free_task, cnmac_free_task, sc);
283 timeout_set(&sc->sc_tick_misc_ch, cnmac_tick_misc, sc);
284 timeout_set(&sc->sc_tick_free_ch, cnmac_tick_free, sc);
285
286 cn30xxfau_op_init(&sc->sc_fau_done,
287 OCTEON_CVMSEG_ETHER_OFFSET(sc->sc_dev.dv_unit, csm_ether_fau_done),
288 OCT_FAU_REG_ADDR_END - (8 * (sc->sc_dev.dv_unit + 1))/* XXX */);
289 cn30xxfau_op_set_8(&sc->sc_fau_done, 0);
290
291 cnmac_pip_init(sc);
292 cnmac_ipd_init(sc);
293 cnmac_pko_init(sc);
294
295 cnmac_configure_common(sc);
296
297 sc->sc_gmx_port->sc_ipd = sc->sc_ipd;
298 sc->sc_gmx_port->sc_port_mii = &sc->sc_mii;
299 sc->sc_gmx_port->sc_port_ac = &sc->sc_arpcom;
300
301 /* XXX */
302 sc->sc_pow = &cn30xxpow_softc;
303
304 cnmac_mediainit(sc);
305
306 strncpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof(ifp->if_xname));
307 ifp->if_softc = sc;
308 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
309 ifp->if_xflags = IFXF_MPSAFE;
310 ifp->if_ioctl = cnmac_ioctl;
311 ifp->if_qstart = cnmac_start;
312 ifp->if_watchdog = cnmac_watchdog;
313 ifp->if_hardmtu = CNMAC_MAX_MTU;
314 ifq_init_maxlen(&ifp->if_snd, max(GATHER_QUEUE_SIZE, IFQ_MAXLEN));
315
316 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_TCPv4 |
317 IFCAP_CSUM_UDPv4 | IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6;
318
319 cn30xxgmx_set_filter(sc->sc_gmx_port);
320
321 if_attach(ifp);
322 ether_ifattach(ifp);
323
324 cnmac_buf_init(sc);
325
326 #if NKSTAT > 0
327 cnmac_kstat_attach(sc);
328 #endif
329
330 sc->sc_ih = octeon_intr_establish(POW_WORKQ_IRQ(sc->sc_powgroup),
331 IPL_NET | IPL_MPSAFE, cnmac_intr, sc, sc->sc_dev.dv_xname);
332 if (sc->sc_ih == NULL)
333 panic("%s: could not set up interrupt", sc->sc_dev.dv_xname);
334 }
335
336 /* ---- submodules */
337
338 void
cnmac_pip_init(struct cnmac_softc * sc)339 cnmac_pip_init(struct cnmac_softc *sc)
340 {
341 struct cn30xxpip_attach_args pip_aa;
342
343 pip_aa.aa_port = sc->sc_port;
344 pip_aa.aa_regt = sc->sc_regt;
345 pip_aa.aa_tag_type = POW_TAG_TYPE_ORDERED/* XXX */;
346 pip_aa.aa_receive_group = sc->sc_powgroup;
347 pip_aa.aa_ip_offset = sc->sc_ip_offset;
348 cn30xxpip_init(&pip_aa, &sc->sc_pip);
349 cn30xxpip_port_config(sc->sc_pip);
350 }
351
352 void
cnmac_ipd_init(struct cnmac_softc * sc)353 cnmac_ipd_init(struct cnmac_softc *sc)
354 {
355 struct cn30xxipd_attach_args ipd_aa;
356
357 ipd_aa.aa_port = sc->sc_port;
358 ipd_aa.aa_regt = sc->sc_regt;
359 ipd_aa.aa_first_mbuff_skip = 0/* XXX */;
360 ipd_aa.aa_not_first_mbuff_skip = 0/* XXX */;
361 cn30xxipd_init(&ipd_aa, &sc->sc_ipd);
362 }
363
364 void
cnmac_pko_init(struct cnmac_softc * sc)365 cnmac_pko_init(struct cnmac_softc *sc)
366 {
367 struct cn30xxpko_attach_args pko_aa;
368
369 pko_aa.aa_port = sc->sc_port;
370 pko_aa.aa_regt = sc->sc_regt;
371 pko_aa.aa_cmdptr = &sc->sc_cmdptr;
372 pko_aa.aa_cmd_buf_pool = OCTEON_POOL_NO_CMD;
373 pko_aa.aa_cmd_buf_size = OCTEON_POOL_NWORDS_CMD;
374 cn30xxpko_init(&pko_aa, &sc->sc_pko);
375 }
376
377 /* ---- XXX */
378
379 void
cnmac_board_mac_addr(uint8_t * enaddr)380 cnmac_board_mac_addr(uint8_t *enaddr)
381 {
382 int id;
383
384 /* Initialize MAC addresses from the global address base. */
385 if (cnmac_mac_addr == 0) {
386 memcpy((uint8_t *)&cnmac_mac_addr + 2,
387 octeon_boot_info->mac_addr_base, 6);
388
389 /*
390 * Should be allowed to fail hard if couldn't read the
391 * mac_addr_base address...
392 */
393 if (cnmac_mac_addr == 0)
394 return;
395
396 /*
397 * Calculate the offset from the mac_addr_base that will be used
398 * for the next sc->sc_port.
399 */
400 id = octeon_get_chipid();
401
402 switch (octeon_model_family(id)) {
403 case OCTEON_MODEL_FAMILY_CN56XX:
404 cnmac_mac_addr_offset = 1;
405 break;
406 /*
407 case OCTEON_MODEL_FAMILY_CN52XX:
408 case OCTEON_MODEL_FAMILY_CN63XX:
409 cnmac_mac_addr_offset = 2;
410 break;
411 */
412 default:
413 cnmac_mac_addr_offset = 0;
414 break;
415 }
416
417 enaddr += cnmac_mac_addr_offset;
418 }
419
420 /* No more MAC addresses to assign. */
421 if (cnmac_mac_addr_offset >= octeon_boot_info->mac_addr_count)
422 return;
423
424 if (enaddr)
425 memcpy(enaddr, (uint8_t *)&cnmac_mac_addr + 2, 6);
426
427 cnmac_mac_addr++;
428 cnmac_mac_addr_offset++;
429 }
430
431 /* ---- media */
432
433 int
cnmac_mii_readreg(struct device * self,int phy_no,int reg)434 cnmac_mii_readreg(struct device *self, int phy_no, int reg)
435 {
436 struct cnmac_softc *sc = (struct cnmac_softc *)self;
437 return cn30xxsmi_read(sc->sc_smi, phy_no, reg);
438 }
439
440 void
cnmac_mii_writereg(struct device * self,int phy_no,int reg,int value)441 cnmac_mii_writereg(struct device *self, int phy_no, int reg, int value)
442 {
443 struct cnmac_softc *sc = (struct cnmac_softc *)self;
444 cn30xxsmi_write(sc->sc_smi, phy_no, reg, value);
445 }
446
447 void
cnmac_mii_statchg(struct device * self)448 cnmac_mii_statchg(struct device *self)
449 {
450 struct cnmac_softc *sc = (struct cnmac_softc *)self;
451
452 cn30xxpko_port_enable(sc->sc_pko, 0);
453 cn30xxgmx_port_enable(sc->sc_gmx_port, 0);
454
455 cnmac_reset(sc);
456
457 cn30xxpko_port_enable(sc->sc_pko, 1);
458 cn30xxgmx_port_enable(sc->sc_gmx_port, 1);
459 }
460
461 int
cnmac_mediainit(struct cnmac_softc * sc)462 cnmac_mediainit(struct cnmac_softc *sc)
463 {
464 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
465 struct mii_softc *child;
466
467 sc->sc_mii.mii_ifp = ifp;
468 sc->sc_mii.mii_readreg = cnmac_mii_readreg;
469 sc->sc_mii.mii_writereg = cnmac_mii_writereg;
470 sc->sc_mii.mii_statchg = cnmac_mii_statchg;
471 ifmedia_init(&sc->sc_mii.mii_media, 0, cnmac_mediachange,
472 cnmac_mediastatus);
473
474 mii_attach(&sc->sc_dev, &sc->sc_mii,
475 0xffffffff, sc->sc_phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE);
476
477 child = LIST_FIRST(&sc->sc_mii.mii_phys);
478 if (child == NULL) {
479 /* No PHY attached. */
480 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
481 0, NULL);
482 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
483 } else {
484 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
485 }
486
487 return 0;
488 }
489
490 void
cnmac_mediastatus(struct ifnet * ifp,struct ifmediareq * ifmr)491 cnmac_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
492 {
493 struct cnmac_softc *sc = ifp->if_softc;
494
495 mii_pollstat(&sc->sc_mii);
496 ifmr->ifm_status = sc->sc_mii.mii_media_status;
497 ifmr->ifm_active = sc->sc_mii.mii_media_active;
498 ifmr->ifm_active = (sc->sc_mii.mii_media_active & ~IFM_ETH_FMASK) |
499 sc->sc_gmx_port->sc_port_flowflags;
500 }
501
502 int
cnmac_mediachange(struct ifnet * ifp)503 cnmac_mediachange(struct ifnet *ifp)
504 {
505 struct cnmac_softc *sc = ifp->if_softc;
506
507 if ((ifp->if_flags & IFF_UP) == 0)
508 return 0;
509
510 return mii_mediachg(&sc->sc_mii);
511 }
512
513 /* ---- send buffer garbage collection */
514
515 void
cnmac_send_queue_flush_prefetch(struct cnmac_softc * sc)516 cnmac_send_queue_flush_prefetch(struct cnmac_softc *sc)
517 {
518 OCTEON_ETH_KASSERT(sc->sc_prefetch == 0);
519 cn30xxfau_op_inc_fetch_8(&sc->sc_fau_done, 0);
520 sc->sc_prefetch = 1;
521 }
522
523 void
cnmac_send_queue_flush_fetch(struct cnmac_softc * sc)524 cnmac_send_queue_flush_fetch(struct cnmac_softc *sc)
525 {
526 #ifndef OCTEON_ETH_DEBUG
527 if (!sc->sc_prefetch)
528 return;
529 #endif
530 OCTEON_ETH_KASSERT(sc->sc_prefetch == 1);
531 sc->sc_hard_done_cnt = cn30xxfau_op_inc_read_8(&sc->sc_fau_done);
532 OCTEON_ETH_KASSERT(sc->sc_hard_done_cnt <= 0);
533 sc->sc_prefetch = 0;
534 }
535
536 void
cnmac_send_queue_flush(struct cnmac_softc * sc)537 cnmac_send_queue_flush(struct cnmac_softc *sc)
538 {
539 const int64_t sent_count = sc->sc_hard_done_cnt;
540 int i;
541
542 OCTEON_ETH_KASSERT(sent_count <= 0);
543
544 for (i = 0; i < 0 - sent_count; i++) {
545 struct mbuf *m;
546 uint64_t *gbuf;
547
548 cnmac_send_queue_del(sc, &m, &gbuf);
549
550 cn30xxfpa_buf_put_paddr(cnmac_fb_sg, XKPHYS_TO_PHYS(gbuf));
551
552 m_freem(m);
553 }
554
555 cn30xxfau_op_add_8(&sc->sc_fau_done, i);
556 }
557
558 int
cnmac_send_queue_is_full(struct cnmac_softc * sc)559 cnmac_send_queue_is_full(struct cnmac_softc *sc)
560 {
561 #ifdef OCTEON_ETH_SEND_QUEUE_CHECK
562 int64_t nofree_cnt;
563
564 nofree_cnt = ml_len(&sc->sc_sendq) + sc->sc_hard_done_cnt;
565
566 if (__predict_false(nofree_cnt == GATHER_QUEUE_SIZE - 1)) {
567 cnmac_send_queue_flush(sc);
568 return 1;
569 }
570
571 #endif
572 return 0;
573 }
574
575 void
cnmac_send_queue_add(struct cnmac_softc * sc,struct mbuf * m,uint64_t * gbuf)576 cnmac_send_queue_add(struct cnmac_softc *sc, struct mbuf *m,
577 uint64_t *gbuf)
578 {
579 OCTEON_ETH_KASSERT(m->m_flags & M_PKTHDR);
580
581 m->m_pkthdr.ph_cookie = gbuf;
582 ml_enqueue(&sc->sc_sendq, m);
583
584 if (m->m_ext.ext_free_fn != 0)
585 sc->sc_ext_callback_cnt++;
586 }
587
588 void
cnmac_send_queue_del(struct cnmac_softc * sc,struct mbuf ** rm,uint64_t ** rgbuf)589 cnmac_send_queue_del(struct cnmac_softc *sc, struct mbuf **rm,
590 uint64_t **rgbuf)
591 {
592 struct mbuf *m;
593 m = ml_dequeue(&sc->sc_sendq);
594 OCTEON_ETH_KASSERT(m != NULL);
595
596 *rm = m;
597 *rgbuf = m->m_pkthdr.ph_cookie;
598
599 if (m->m_ext.ext_free_fn != 0) {
600 sc->sc_ext_callback_cnt--;
601 OCTEON_ETH_KASSERT(sc->sc_ext_callback_cnt >= 0);
602 }
603 }
604
605 int
cnmac_buf_free_work(struct cnmac_softc * sc,uint64_t * work)606 cnmac_buf_free_work(struct cnmac_softc *sc, uint64_t *work)
607 {
608 paddr_t addr, pktbuf;
609 uint64_t word3;
610 unsigned int back, nbufs;
611
612 nbufs = (work[2] & PIP_WQE_WORD2_IP_BUFS) >>
613 PIP_WQE_WORD2_IP_BUFS_SHIFT;
614 word3 = work[3];
615 while (nbufs-- > 0) {
616 addr = word3 & PIP_WQE_WORD3_ADDR;
617 back = (word3 & PIP_WQE_WORD3_BACK) >>
618 PIP_WQE_WORD3_BACK_SHIFT;
619 pktbuf = (addr & ~(CACHELINESIZE - 1)) - back * CACHELINESIZE;
620
621 cn30xxfpa_store(pktbuf, OCTEON_POOL_NO_PKT,
622 OCTEON_POOL_SIZE_PKT / CACHELINESIZE);
623
624 if (nbufs > 0)
625 memcpy(&word3, (void *)PHYS_TO_XKPHYS(addr -
626 sizeof(word3), CCA_CACHED), sizeof(word3));
627 }
628
629 cn30xxfpa_buf_put_paddr(cnmac_fb_wqe, XKPHYS_TO_PHYS(work));
630
631 return 0;
632 }
633
634 /* ---- ifnet interfaces */
635
636 int
cnmac_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)637 cnmac_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
638 {
639 struct cnmac_softc *sc = ifp->if_softc;
640 struct ifreq *ifr = (struct ifreq *)data;
641 int s, error = 0;
642
643 s = splnet();
644
645 switch (cmd) {
646 case SIOCSIFADDR:
647 ifp->if_flags |= IFF_UP;
648 if (!(ifp->if_flags & IFF_RUNNING))
649 cnmac_init(ifp);
650 break;
651
652 case SIOCSIFFLAGS:
653 if (ifp->if_flags & IFF_UP) {
654 if (ifp->if_flags & IFF_RUNNING)
655 error = ENETRESET;
656 else
657 cnmac_init(ifp);
658 } else {
659 if (ifp->if_flags & IFF_RUNNING)
660 cnmac_stop(ifp, 0);
661 }
662 break;
663
664 case SIOCSIFMEDIA:
665 /* Flow control requires full-duplex mode. */
666 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
667 (ifr->ifr_media & IFM_FDX) == 0) {
668 ifr->ifr_media &= ~IFM_ETH_FMASK;
669 }
670 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
671 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
672 ifr->ifr_media |=
673 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
674 }
675 sc->sc_gmx_port->sc_port_flowflags =
676 ifr->ifr_media & IFM_ETH_FMASK;
677 }
678 /* FALLTHROUGH */
679 case SIOCGIFMEDIA:
680 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
681 break;
682
683 default:
684 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
685 }
686
687 if (error == ENETRESET) {
688 if (ISSET(ifp->if_flags, IFF_RUNNING))
689 cn30xxgmx_set_filter(sc->sc_gmx_port);
690 error = 0;
691 }
692
693 splx(s);
694 return (error);
695 }
696
697 /* ---- send (output) */
698
699 uint64_t
cnmac_send_makecmd_w0(uint64_t fau0,uint64_t fau1,size_t len,int segs,int ipoffp1)700 cnmac_send_makecmd_w0(uint64_t fau0, uint64_t fau1, size_t len, int segs,
701 int ipoffp1)
702 {
703 return cn30xxpko_cmd_word0(
704 OCT_FAU_OP_SIZE_64, /* sz1 */
705 OCT_FAU_OP_SIZE_64, /* sz0 */
706 1, fau1, 1, fau0, /* s1, reg1, s0, reg0 */
707 0, /* le */
708 cnmac_param_pko_cmd_w0_n2, /* n2 */
709 1, 0, /* q, r */
710 (segs == 1) ? 0 : 1, /* g */
711 ipoffp1, 0, 1, /* ipoffp1, ii, df */
712 segs, (int)len); /* segs, totalbytes */
713 }
714
715 uint64_t
cnmac_send_makecmd_w1(int size,paddr_t addr)716 cnmac_send_makecmd_w1(int size, paddr_t addr)
717 {
718 return cn30xxpko_cmd_word1(
719 0, 0, /* i, back */
720 OCTEON_POOL_NO_SG, /* pool */
721 size, addr); /* size, addr */
722 }
723
724 #define KVTOPHYS(addr) cnmac_kvtophys((vaddr_t)(addr))
725
726 static inline paddr_t
cnmac_kvtophys(vaddr_t kva)727 cnmac_kvtophys(vaddr_t kva)
728 {
729 KASSERT(IS_XKPHYS(kva));
730 return XKPHYS_TO_PHYS(kva);
731 }
732
733 int
cnmac_send_makecmd_gbuf(struct cnmac_softc * sc,struct mbuf * m0,uint64_t * gbuf,int * rsegs)734 cnmac_send_makecmd_gbuf(struct cnmac_softc *sc, struct mbuf *m0,
735 uint64_t *gbuf, int *rsegs)
736 {
737 struct mbuf *m;
738 int segs = 0;
739
740 for (m = m0; m != NULL; m = m->m_next) {
741 if (__predict_false(m->m_len == 0))
742 continue;
743
744 if (segs >= OCTEON_POOL_SIZE_SG / sizeof(uint64_t))
745 goto defrag;
746 gbuf[segs] = cnmac_send_makecmd_w1(m->m_len,
747 KVTOPHYS(m->m_data));
748 segs++;
749 }
750
751 *rsegs = segs;
752
753 return 0;
754
755 defrag:
756 if (m_defrag(m0, M_DONTWAIT) != 0)
757 return 1;
758 gbuf[0] = cnmac_send_makecmd_w1(m0->m_len, KVTOPHYS(m0->m_data));
759 *rsegs = 1;
760 return 0;
761 }
762
763 int
cnmac_send_makecmd(struct cnmac_softc * sc,struct mbuf * m,uint64_t * gbuf,uint64_t * rpko_cmd_w0,uint64_t * rpko_cmd_w1)764 cnmac_send_makecmd(struct cnmac_softc *sc, struct mbuf *m,
765 uint64_t *gbuf, uint64_t *rpko_cmd_w0, uint64_t *rpko_cmd_w1)
766 {
767 uint64_t pko_cmd_w0, pko_cmd_w1;
768 int ipoffp1;
769 int segs;
770 int result = 0;
771
772 if (cnmac_send_makecmd_gbuf(sc, m, gbuf, &segs)) {
773 log(LOG_WARNING, "%s: large number of transmission"
774 " data segments", sc->sc_dev.dv_xname);
775 result = 1;
776 goto done;
777 }
778
779 /* Get the IP packet offset for TCP/UDP checksum offloading. */
780 ipoffp1 = (m->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT))
781 ? (ETHER_HDR_LEN + 1) : 0;
782
783 /*
784 * segs == 1 -> link mode (single continuous buffer)
785 * WORD1[size] is number of bytes pointed by segment
786 *
787 * segs > 1 -> gather mode (scatter-gather buffer)
788 * WORD1[size] is number of segments
789 */
790 pko_cmd_w0 = cnmac_send_makecmd_w0(sc->sc_fau_done.fd_regno,
791 0, m->m_pkthdr.len, segs, ipoffp1);
792 pko_cmd_w1 = cnmac_send_makecmd_w1(
793 (segs == 1) ? m->m_pkthdr.len : segs,
794 (segs == 1) ?
795 KVTOPHYS(m->m_data) :
796 XKPHYS_TO_PHYS(gbuf));
797
798 *rpko_cmd_w0 = pko_cmd_w0;
799 *rpko_cmd_w1 = pko_cmd_w1;
800
801 done:
802 return result;
803 }
804
805 int
cnmac_send_cmd(struct cnmac_softc * sc,uint64_t pko_cmd_w0,uint64_t pko_cmd_w1)806 cnmac_send_cmd(struct cnmac_softc *sc, uint64_t pko_cmd_w0,
807 uint64_t pko_cmd_w1)
808 {
809 uint64_t *cmdptr;
810 int result = 0;
811
812 cmdptr = (uint64_t *)PHYS_TO_XKPHYS(sc->sc_cmdptr.cmdptr, CCA_CACHED);
813 cmdptr += sc->sc_cmdptr.cmdptr_idx;
814
815 OCTEON_ETH_KASSERT(cmdptr != NULL);
816
817 *cmdptr++ = pko_cmd_w0;
818 *cmdptr++ = pko_cmd_w1;
819
820 OCTEON_ETH_KASSERT(sc->sc_cmdptr.cmdptr_idx + 2 <= FPA_COMMAND_BUFFER_POOL_NWORDS - 1);
821
822 if (sc->sc_cmdptr.cmdptr_idx + 2 == FPA_COMMAND_BUFFER_POOL_NWORDS - 1) {
823 paddr_t buf;
824
825 buf = cn30xxfpa_buf_get_paddr(cnmac_fb_cmd);
826 if (buf == 0) {
827 log(LOG_WARNING,
828 "%s: cannot allocate command buffer from free pool allocator\n",
829 sc->sc_dev.dv_xname);
830 result = 1;
831 goto done;
832 }
833 *cmdptr++ = buf;
834 sc->sc_cmdptr.cmdptr = (uint64_t)buf;
835 sc->sc_cmdptr.cmdptr_idx = 0;
836 } else {
837 sc->sc_cmdptr.cmdptr_idx += 2;
838 }
839
840 cn30xxpko_op_doorbell_write(sc->sc_port, sc->sc_port, 2);
841
842 done:
843 return result;
844 }
845
846 int
cnmac_send_buf(struct cnmac_softc * sc,struct mbuf * m,uint64_t * gbuf)847 cnmac_send_buf(struct cnmac_softc *sc, struct mbuf *m, uint64_t *gbuf)
848 {
849 int result = 0, error;
850 uint64_t pko_cmd_w0, pko_cmd_w1;
851
852 error = cnmac_send_makecmd(sc, m, gbuf, &pko_cmd_w0, &pko_cmd_w1);
853 if (error != 0) {
854 /* already logging */
855 result = error;
856 goto done;
857 }
858
859 error = cnmac_send_cmd(sc, pko_cmd_w0, pko_cmd_w1);
860 if (error != 0) {
861 /* already logging */
862 result = error;
863 }
864
865 done:
866 return result;
867 }
868
869 int
cnmac_send(struct cnmac_softc * sc,struct mbuf * m)870 cnmac_send(struct cnmac_softc *sc, struct mbuf *m)
871 {
872 paddr_t gaddr = 0;
873 uint64_t *gbuf = NULL;
874 int result = 0, error;
875
876 gaddr = cn30xxfpa_buf_get_paddr(cnmac_fb_sg);
877 if (gaddr == 0) {
878 log(LOG_WARNING,
879 "%s: cannot allocate gather buffer from free pool allocator\n",
880 sc->sc_dev.dv_xname);
881 result = 1;
882 goto done;
883 }
884
885 gbuf = (uint64_t *)(uintptr_t)PHYS_TO_XKPHYS(gaddr, CCA_CACHED);
886
887 error = cnmac_send_buf(sc, m, gbuf);
888 if (error != 0) {
889 /* already logging */
890 cn30xxfpa_buf_put_paddr(cnmac_fb_sg, gaddr);
891 result = error;
892 goto done;
893 }
894
895 cnmac_send_queue_add(sc, m, gbuf);
896
897 done:
898 return result;
899 }
900
901 void
cnmac_start(struct ifqueue * ifq)902 cnmac_start(struct ifqueue *ifq)
903 {
904 struct ifnet *ifp = ifq->ifq_if;
905 struct cnmac_softc *sc = ifp->if_softc;
906 struct mbuf *m;
907
908 if (__predict_false(!cn30xxgmx_link_status(sc->sc_gmx_port))) {
909 ifq_purge(ifq);
910 return;
911 }
912
913 /*
914 * performance tuning
915 * presend iobdma request
916 */
917 cnmac_send_queue_flush_prefetch(sc);
918
919 for (;;) {
920 cnmac_send_queue_flush_fetch(sc); /* XXX */
921
922 /*
923 * XXXSEIL
924 * If no free send buffer is available, free all the sent buffer
925 * and bail out.
926 */
927 if (cnmac_send_queue_is_full(sc)) {
928 ifq_set_oactive(ifq);
929 timeout_add(&sc->sc_tick_free_ch, 1);
930 return;
931 }
932
933 m = ifq_dequeue(ifq);
934 if (m == NULL)
935 return;
936
937 #if NBPFILTER > 0
938 if (ifp->if_bpf != NULL)
939 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
940 #endif
941
942 /* XXX */
943 if (ml_len(&sc->sc_sendq) > sc->sc_soft_req_thresh)
944 cnmac_send_queue_flush(sc);
945 if (cnmac_send(sc, m)) {
946 ifp->if_oerrors++;
947 m_freem(m);
948 log(LOG_WARNING,
949 "%s: failed to transmit packet\n",
950 sc->sc_dev.dv_xname);
951 }
952 /* XXX */
953
954 /*
955 * send next iobdma request
956 */
957 cnmac_send_queue_flush_prefetch(sc);
958 }
959
960 cnmac_send_queue_flush_fetch(sc);
961 }
962
963 void
cnmac_watchdog(struct ifnet * ifp)964 cnmac_watchdog(struct ifnet *ifp)
965 {
966 struct cnmac_softc *sc = ifp->if_softc;
967
968 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
969
970 cnmac_stop(ifp, 0);
971
972 cnmac_configure(sc);
973
974 SET(ifp->if_flags, IFF_RUNNING);
975 ifp->if_timer = 0;
976
977 ifq_restart(&ifp->if_snd);
978 }
979
980 int
cnmac_init(struct ifnet * ifp)981 cnmac_init(struct ifnet *ifp)
982 {
983 struct cnmac_softc *sc = ifp->if_softc;
984
985 /* XXX don't disable commonly used parts!!! XXX */
986 if (sc->sc_init_flag == 0) {
987 /* Cancel any pending I/O. */
988 cnmac_stop(ifp, 0);
989
990 /* Initialize the device */
991 cnmac_configure(sc);
992
993 cn30xxpko_enable(sc->sc_pko);
994 cn30xxipd_enable(sc->sc_ipd);
995
996 sc->sc_init_flag = 1;
997 } else {
998 cn30xxgmx_port_enable(sc->sc_gmx_port, 1);
999 }
1000 cnmac_mediachange(ifp);
1001
1002 cn30xxpip_stats_init(sc->sc_pip);
1003 cn30xxgmx_stats_init(sc->sc_gmx_port);
1004 cn30xxgmx_set_filter(sc->sc_gmx_port);
1005
1006 timeout_add_sec(&sc->sc_tick_misc_ch, 1);
1007 timeout_add_sec(&sc->sc_tick_free_ch, 1);
1008
1009 SET(ifp->if_flags, IFF_RUNNING);
1010 ifq_clr_oactive(&ifp->if_snd);
1011
1012 return 0;
1013 }
1014
1015 int
cnmac_stop(struct ifnet * ifp,int disable)1016 cnmac_stop(struct ifnet *ifp, int disable)
1017 {
1018 struct cnmac_softc *sc = ifp->if_softc;
1019
1020 CLR(ifp->if_flags, IFF_RUNNING);
1021
1022 timeout_del(&sc->sc_tick_misc_ch);
1023 timeout_del(&sc->sc_tick_free_ch);
1024
1025 mii_down(&sc->sc_mii);
1026
1027 cn30xxgmx_port_enable(sc->sc_gmx_port, 0);
1028
1029 intr_barrier(sc->sc_ih);
1030 ifq_barrier(&ifp->if_snd);
1031
1032 ifq_clr_oactive(&ifp->if_snd);
1033 ifp->if_timer = 0;
1034
1035 return 0;
1036 }
1037
1038 /* ---- misc */
1039
1040 #define PKO_INDEX_MASK ((1ULL << 12/* XXX */) - 1)
1041
1042 int
cnmac_reset(struct cnmac_softc * sc)1043 cnmac_reset(struct cnmac_softc *sc)
1044 {
1045 cn30xxgmx_reset_speed(sc->sc_gmx_port);
1046 cn30xxgmx_reset_flowctl(sc->sc_gmx_port);
1047 cn30xxgmx_reset_timing(sc->sc_gmx_port);
1048
1049 return 0;
1050 }
1051
1052 int
cnmac_configure(struct cnmac_softc * sc)1053 cnmac_configure(struct cnmac_softc *sc)
1054 {
1055 cn30xxgmx_port_enable(sc->sc_gmx_port, 0);
1056
1057 cnmac_reset(sc);
1058
1059 cn30xxpko_port_config(sc->sc_pko);
1060 cn30xxpko_port_enable(sc->sc_pko, 1);
1061 cn30xxpow_config(sc->sc_pow, sc->sc_powgroup);
1062
1063 cn30xxgmx_port_enable(sc->sc_gmx_port, 1);
1064
1065 return 0;
1066 }
1067
1068 int
cnmac_configure_common(struct cnmac_softc * sc)1069 cnmac_configure_common(struct cnmac_softc *sc)
1070 {
1071 static int once;
1072
1073 uint64_t reg;
1074
1075 if (once == 1)
1076 return 0;
1077 once = 1;
1078
1079 cn30xxipd_config(sc->sc_ipd);
1080 cn30xxpko_config(sc->sc_pko);
1081
1082 /* Set padding for packets that Octeon does not recognize as IP. */
1083 reg = octeon_xkphys_read_8(PIP_GBL_CFG);
1084 reg &= ~PIP_GBL_CFG_NIP_SHF_MASK;
1085 reg |= ETHER_ALIGN << PIP_GBL_CFG_NIP_SHF_SHIFT;
1086 octeon_xkphys_write_8(PIP_GBL_CFG, reg);
1087
1088 return 0;
1089 }
1090
1091 int
cnmac_mbuf_alloc(int n)1092 cnmac_mbuf_alloc(int n)
1093 {
1094 struct mbuf *m;
1095 paddr_t pktbuf;
1096
1097 while (n > 0) {
1098 m = MCLGETL(NULL, M_NOWAIT,
1099 OCTEON_POOL_SIZE_PKT + CACHELINESIZE);
1100 if (m == NULL || !ISSET(m->m_flags, M_EXT)) {
1101 m_freem(m);
1102 break;
1103 }
1104
1105 m->m_data = (void *)(((vaddr_t)m->m_data + CACHELINESIZE) &
1106 ~(CACHELINESIZE - 1));
1107 ((struct mbuf **)m->m_data)[-1] = m;
1108
1109 pktbuf = KVTOPHYS(m->m_data);
1110 m->m_pkthdr.ph_cookie = (void *)pktbuf;
1111 cn30xxfpa_store(pktbuf, OCTEON_POOL_NO_PKT,
1112 OCTEON_POOL_SIZE_PKT / CACHELINESIZE);
1113
1114 n--;
1115 }
1116 return n;
1117 }
1118
1119 int
cnmac_recv_mbuf(struct cnmac_softc * sc,uint64_t * work,struct mbuf ** rm,int * nmbuf)1120 cnmac_recv_mbuf(struct cnmac_softc *sc, uint64_t *work,
1121 struct mbuf **rm, int *nmbuf)
1122 {
1123 struct mbuf *m, *m0, *mprev, **pm;
1124 paddr_t addr, pktbuf;
1125 uint64_t word1 = work[1];
1126 uint64_t word2 = work[2];
1127 uint64_t word3 = work[3];
1128 unsigned int back, i, nbufs;
1129 unsigned int left, total, size;
1130
1131 cn30xxfpa_buf_put_paddr(cnmac_fb_wqe, XKPHYS_TO_PHYS(work));
1132
1133 nbufs = (word2 & PIP_WQE_WORD2_IP_BUFS) >> PIP_WQE_WORD2_IP_BUFS_SHIFT;
1134 if (nbufs == 0)
1135 panic("%s: dynamic short packet", __func__);
1136
1137 m0 = mprev = NULL;
1138 total = left = (word1 & PIP_WQE_WORD1_LEN) >> 48;
1139 for (i = 0; i < nbufs; i++) {
1140 addr = word3 & PIP_WQE_WORD3_ADDR;
1141 back = (word3 & PIP_WQE_WORD3_BACK) >> PIP_WQE_WORD3_BACK_SHIFT;
1142 pktbuf = (addr & ~(CACHELINESIZE - 1)) - back * CACHELINESIZE;
1143 pm = (struct mbuf **)PHYS_TO_XKPHYS(pktbuf, CCA_CACHED) - 1;
1144 m = *pm;
1145 *pm = NULL;
1146 if ((paddr_t)m->m_pkthdr.ph_cookie != pktbuf)
1147 panic("%s: packet pool is corrupted, mbuf cookie %p != "
1148 "pktbuf %p", __func__, m->m_pkthdr.ph_cookie,
1149 (void *)pktbuf);
1150
1151 /*
1152 * Because of a hardware bug in some Octeon models the size
1153 * field of word3 can be wrong (erratum PKI-100).
1154 * However, the hardware uses all space in a buffer before
1155 * moving to the next one so it is possible to derive
1156 * the size of this data segment from the size
1157 * of packet data buffers.
1158 */
1159 size = OCTEON_POOL_SIZE_PKT - (addr - pktbuf);
1160 if (size > left)
1161 size = left;
1162
1163 m->m_pkthdr.ph_cookie = NULL;
1164 m->m_data += addr - pktbuf;
1165 m->m_len = size;
1166 left -= size;
1167
1168 if (m0 == NULL)
1169 m0 = m;
1170 else {
1171 m->m_flags &= ~M_PKTHDR;
1172 mprev->m_next = m;
1173 }
1174 mprev = m;
1175
1176 if (i + 1 < nbufs)
1177 memcpy(&word3, (void *)PHYS_TO_XKPHYS(addr -
1178 sizeof(word3), CCA_CACHED), sizeof(word3));
1179 }
1180
1181 m0->m_pkthdr.len = total;
1182 *rm = m0;
1183 *nmbuf = nbufs;
1184
1185 return 0;
1186 }
1187
1188 int
cnmac_recv_check(struct cnmac_softc * sc,uint64_t word2)1189 cnmac_recv_check(struct cnmac_softc *sc, uint64_t word2)
1190 {
1191 static struct timeval rxerr_log_interval = { 0, 250000 };
1192 uint64_t opecode;
1193
1194 if (__predict_true(!ISSET(word2, PIP_WQE_WORD2_NOIP_RE)))
1195 return 0;
1196
1197 opecode = word2 & PIP_WQE_WORD2_NOIP_OPECODE;
1198 if ((sc->sc_arpcom.ac_if.if_flags & IFF_DEBUG) &&
1199 ratecheck(&sc->sc_rxerr_log_last, &rxerr_log_interval))
1200 log(LOG_DEBUG, "%s: rx error (%lld)\n", sc->sc_dev.dv_xname,
1201 opecode);
1202
1203 /* XXX harmless error? */
1204 if (opecode == PIP_WQE_WORD2_RE_OPCODE_OVRRUN)
1205 return 0;
1206
1207 return 1;
1208 }
1209
1210 int
cnmac_recv(struct cnmac_softc * sc,uint64_t * work,struct mbuf_list * ml)1211 cnmac_recv(struct cnmac_softc *sc, uint64_t *work, struct mbuf_list *ml)
1212 {
1213 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1214 struct mbuf *m;
1215 uint64_t word2;
1216 int nmbuf = 0;
1217
1218 word2 = work[2];
1219
1220 if (!(ifp->if_flags & IFF_RUNNING))
1221 goto drop;
1222
1223 if (__predict_false(cnmac_recv_check(sc, word2) != 0)) {
1224 ifp->if_ierrors++;
1225 goto drop;
1226 }
1227
1228 /* On success, this releases the work queue entry. */
1229 if (__predict_false(cnmac_recv_mbuf(sc, work, &m, &nmbuf) != 0)) {
1230 ifp->if_ierrors++;
1231 goto drop;
1232 }
1233
1234 m->m_pkthdr.csum_flags = 0;
1235 if (__predict_true(!ISSET(word2, PIP_WQE_WORD2_IP_NI))) {
1236 /* Check IP checksum status. */
1237 if (!ISSET(word2, PIP_WQE_WORD2_IP_V6) &&
1238 !ISSET(word2, PIP_WQE_WORD2_IP_IE))
1239 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1240
1241 /* Check TCP/UDP checksum status. */
1242 if (ISSET(word2, PIP_WQE_WORD2_IP_TU) &&
1243 !ISSET(word2, PIP_WQE_WORD2_IP_FR) &&
1244 !ISSET(word2, PIP_WQE_WORD2_IP_LE))
1245 m->m_pkthdr.csum_flags |=
1246 M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
1247 }
1248
1249 ml_enqueue(ml, m);
1250
1251 return nmbuf;
1252
1253 drop:
1254 cnmac_buf_free_work(sc, work);
1255 return 0;
1256 }
1257
1258 int
cnmac_intr(void * arg)1259 cnmac_intr(void *arg)
1260 {
1261 struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1262 struct cnmac_softc *sc = arg;
1263 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1264 uint64_t *work;
1265 uint64_t wqmask = 1ull << sc->sc_powgroup;
1266 uint32_t coreid = octeon_get_coreid();
1267 uint32_t port;
1268 int nmbuf = 0;
1269
1270 _POW_WR8(sc->sc_pow, POW_PP_GRP_MSK_OFFSET(coreid), wqmask);
1271
1272 cn30xxpow_tag_sw_wait();
1273 cn30xxpow_work_request_async(OCTEON_CVMSEG_OFFSET(csm_pow_intr),
1274 POW_NO_WAIT);
1275
1276 for (;;) {
1277 work = (uint64_t *)cn30xxpow_work_response_async(
1278 OCTEON_CVMSEG_OFFSET(csm_pow_intr));
1279 if (work == NULL)
1280 break;
1281
1282 cn30xxpow_tag_sw_wait();
1283 cn30xxpow_work_request_async(
1284 OCTEON_CVMSEG_OFFSET(csm_pow_intr), POW_NO_WAIT);
1285
1286 port = (work[1] & PIP_WQE_WORD1_IPRT) >> 42;
1287 if (port != sc->sc_port) {
1288 printf("%s: unexpected wqe port %u, should be %u\n",
1289 sc->sc_dev.dv_xname, port, sc->sc_port);
1290 goto wqe_error;
1291 }
1292
1293 nmbuf += cnmac_recv(sc, work, &ml);
1294 }
1295
1296 _POW_WR8(sc->sc_pow, POW_WQ_INT_OFFSET, wqmask);
1297
1298 if_input(ifp, &ml);
1299
1300 nmbuf = cnmac_mbuf_alloc(nmbuf);
1301 if (nmbuf != 0)
1302 atomic_add_int(&cnmac_mbufs_to_alloc, nmbuf);
1303
1304 return 1;
1305
1306 wqe_error:
1307 printf("word0: 0x%016llx\n", work[0]);
1308 printf("word1: 0x%016llx\n", work[1]);
1309 printf("word2: 0x%016llx\n", work[2]);
1310 printf("word3: 0x%016llx\n", work[3]);
1311 panic("wqe error");
1312 }
1313
1314 /* ---- tick */
1315
1316 void
cnmac_free_task(void * arg)1317 cnmac_free_task(void *arg)
1318 {
1319 struct cnmac_softc *sc = arg;
1320 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1321 struct ifqueue *ifq = &ifp->if_snd;
1322 int resched = 1;
1323 int timeout;
1324
1325 if (ml_len(&sc->sc_sendq) > 0) {
1326 cnmac_send_queue_flush_prefetch(sc);
1327 cnmac_send_queue_flush_fetch(sc);
1328 cnmac_send_queue_flush(sc);
1329 }
1330
1331 if (ifq_is_oactive(ifq)) {
1332 ifq_clr_oactive(ifq);
1333 cnmac_start(ifq);
1334
1335 if (ifq_is_oactive(ifq)) {
1336 /* The start routine did rescheduling already. */
1337 resched = 0;
1338 }
1339 }
1340
1341 if (resched) {
1342 timeout = (sc->sc_ext_callback_cnt > 0) ? 1 : hz;
1343 timeout_add(&sc->sc_tick_free_ch, timeout);
1344 }
1345 }
1346
1347 /*
1348 * cnmac_tick_free
1349 *
1350 * => garbage collect send gather buffer / mbuf
1351 * => called at softclock
1352 */
1353 void
cnmac_tick_free(void * arg)1354 cnmac_tick_free(void *arg)
1355 {
1356 struct cnmac_softc *sc = arg;
1357 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1358 int to_alloc;
1359
1360 ifq_serialize(&ifp->if_snd, &sc->sc_free_task);
1361
1362 if (cnmac_mbufs_to_alloc != 0) {
1363 to_alloc = atomic_swap_uint(&cnmac_mbufs_to_alloc, 0);
1364 to_alloc = cnmac_mbuf_alloc(to_alloc);
1365 if (to_alloc != 0)
1366 atomic_add_int(&cnmac_mbufs_to_alloc, to_alloc);
1367 }
1368 }
1369
1370 /*
1371 * cnmac_tick_misc
1372 *
1373 * => collect statistics
1374 * => check link status
1375 * => called at softclock
1376 */
1377 void
cnmac_tick_misc(void * arg)1378 cnmac_tick_misc(void *arg)
1379 {
1380 struct cnmac_softc *sc = arg;
1381 int s;
1382
1383 s = splnet();
1384 mii_tick(&sc->sc_mii);
1385 splx(s);
1386
1387 #if NKSTAT > 0
1388 cnmac_kstat_tick(sc);
1389 #endif
1390
1391 timeout_add_sec(&sc->sc_tick_misc_ch, 1);
1392 }
1393
1394 #if NKSTAT > 0
1395 #define KVE(n, t) \
1396 KSTAT_KV_UNIT_INITIALIZER((n), KSTAT_KV_T_COUNTER64, (t))
1397
1398 static const struct kstat_kv cnmac_kstat_tpl[cnmac_stat_count] = {
1399 [cnmac_stat_rx_toto_gmx]= KVE("rx total gmx", KSTAT_KV_U_BYTES),
1400 [cnmac_stat_rx_totp_gmx]= KVE("rx total gmx", KSTAT_KV_U_PACKETS),
1401 [cnmac_stat_rx_toto_pip]= KVE("rx total pip", KSTAT_KV_U_BYTES),
1402 [cnmac_stat_rx_totp_pip]= KVE("rx total pip", KSTAT_KV_U_PACKETS),
1403 [cnmac_stat_rx_h64] = KVE("rx 64B", KSTAT_KV_U_PACKETS),
1404 [cnmac_stat_rx_h127] = KVE("rx 65-127B", KSTAT_KV_U_PACKETS),
1405 [cnmac_stat_rx_h255] = KVE("rx 128-255B", KSTAT_KV_U_PACKETS),
1406 [cnmac_stat_rx_h511] = KVE("rx 256-511B", KSTAT_KV_U_PACKETS),
1407 [cnmac_stat_rx_h1023] = KVE("rx 512-1023B", KSTAT_KV_U_PACKETS),
1408 [cnmac_stat_rx_h1518] = KVE("rx 1024-1518B", KSTAT_KV_U_PACKETS),
1409 [cnmac_stat_rx_hmax] = KVE("rx 1519-maxB", KSTAT_KV_U_PACKETS),
1410 [cnmac_stat_rx_bcast] = KVE("rx bcast", KSTAT_KV_U_PACKETS),
1411 [cnmac_stat_rx_mcast] = KVE("rx mcast", KSTAT_KV_U_PACKETS),
1412 [cnmac_stat_rx_qdpo] = KVE("rx qos drop", KSTAT_KV_U_BYTES),
1413 [cnmac_stat_rx_qdpp] = KVE("rx qos drop", KSTAT_KV_U_PACKETS),
1414 [cnmac_stat_rx_fcs] = KVE("rx fcs err", KSTAT_KV_U_PACKETS),
1415 [cnmac_stat_rx_frag] = KVE("rx fcs undersize",KSTAT_KV_U_PACKETS),
1416 [cnmac_stat_rx_undersz] = KVE("rx undersize", KSTAT_KV_U_PACKETS),
1417 [cnmac_stat_rx_jabber] = KVE("rx jabber", KSTAT_KV_U_PACKETS),
1418 [cnmac_stat_rx_oversz] = KVE("rx oversize", KSTAT_KV_U_PACKETS),
1419 [cnmac_stat_rx_raw] = KVE("rx raw", KSTAT_KV_U_PACKETS),
1420 [cnmac_stat_rx_bad] = KVE("rx bad", KSTAT_KV_U_PACKETS),
1421 [cnmac_stat_rx_drop] = KVE("rx drop", KSTAT_KV_U_PACKETS),
1422 [cnmac_stat_rx_ctl] = KVE("rx control", KSTAT_KV_U_PACKETS),
1423 [cnmac_stat_rx_dmac] = KVE("rx dmac", KSTAT_KV_U_PACKETS),
1424 [cnmac_stat_tx_toto] = KVE("tx total", KSTAT_KV_U_BYTES),
1425 [cnmac_stat_tx_totp] = KVE("tx total", KSTAT_KV_U_PACKETS),
1426 [cnmac_stat_tx_hmin] = KVE("tx min-63B", KSTAT_KV_U_PACKETS),
1427 [cnmac_stat_tx_h64] = KVE("tx 64B", KSTAT_KV_U_PACKETS),
1428 [cnmac_stat_tx_h127] = KVE("tx 65-127B", KSTAT_KV_U_PACKETS),
1429 [cnmac_stat_tx_h255] = KVE("tx 128-255B", KSTAT_KV_U_PACKETS),
1430 [cnmac_stat_tx_h511] = KVE("tx 256-511B", KSTAT_KV_U_PACKETS),
1431 [cnmac_stat_tx_h1023] = KVE("tx 512-1023B", KSTAT_KV_U_PACKETS),
1432 [cnmac_stat_tx_h1518] = KVE("tx 1024-1518B", KSTAT_KV_U_PACKETS),
1433 [cnmac_stat_tx_hmax] = KVE("tx 1519-maxB", KSTAT_KV_U_PACKETS),
1434 [cnmac_stat_tx_bcast] = KVE("tx bcast", KSTAT_KV_U_PACKETS),
1435 [cnmac_stat_tx_mcast] = KVE("tx mcast", KSTAT_KV_U_PACKETS),
1436 [cnmac_stat_tx_coll] = KVE("tx coll", KSTAT_KV_U_PACKETS),
1437 [cnmac_stat_tx_defer] = KVE("tx defer", KSTAT_KV_U_PACKETS),
1438 [cnmac_stat_tx_scol] = KVE("tx scoll", KSTAT_KV_U_PACKETS),
1439 [cnmac_stat_tx_mcol] = KVE("tx mcoll", KSTAT_KV_U_PACKETS),
1440 [cnmac_stat_tx_ctl] = KVE("tx control", KSTAT_KV_U_PACKETS),
1441 [cnmac_stat_tx_uflow] = KVE("tx underflow", KSTAT_KV_U_PACKETS),
1442 };
1443
1444 void
cnmac_kstat_attach(struct cnmac_softc * sc)1445 cnmac_kstat_attach(struct cnmac_softc *sc)
1446 {
1447 struct kstat *ks;
1448 struct kstat_kv *kvs;
1449
1450 mtx_init(&sc->sc_kstat_mtx, IPL_SOFTCLOCK);
1451
1452 ks = kstat_create(sc->sc_dev.dv_xname, 0, "cnmac-stats", 0,
1453 KSTAT_T_KV, 0);
1454 if (ks == NULL)
1455 return;
1456
1457 kvs = malloc(sizeof(cnmac_kstat_tpl), M_DEVBUF, M_WAITOK | M_ZERO);
1458 memcpy(kvs, cnmac_kstat_tpl, sizeof(cnmac_kstat_tpl));
1459
1460 kstat_set_mutex(ks, &sc->sc_kstat_mtx);
1461 ks->ks_softc = sc;
1462 ks->ks_data = kvs;
1463 ks->ks_datalen = sizeof(cnmac_kstat_tpl);
1464 ks->ks_read = cnmac_kstat_read;
1465
1466 sc->sc_kstat = ks;
1467 kstat_install(ks);
1468 }
1469
1470 int
cnmac_kstat_read(struct kstat * ks)1471 cnmac_kstat_read(struct kstat *ks)
1472 {
1473 struct cnmac_softc *sc = ks->ks_softc;
1474 struct kstat_kv *kvs = ks->ks_data;
1475
1476 cn30xxpip_kstat_read(sc->sc_pip, kvs);
1477 cn30xxgmx_kstat_read(sc->sc_gmx_port, kvs);
1478
1479 getnanouptime(&ks->ks_updated);
1480
1481 return 0;
1482 }
1483
1484 void
cnmac_kstat_tick(struct cnmac_softc * sc)1485 cnmac_kstat_tick(struct cnmac_softc *sc)
1486 {
1487 if (sc->sc_kstat == NULL)
1488 return;
1489 if (!mtx_enter_try(&sc->sc_kstat_mtx))
1490 return;
1491 cnmac_kstat_read(sc->sc_kstat);
1492 mtx_leave(&sc->sc_kstat_mtx);
1493 }
1494 #endif
1495