xref: /openbsd/sys/arch/octeon/dev/if_cnmac.c (revision 4cfece93)
1 /*	$OpenBSD: if_cnmac.c,v 1.77 2020/07/10 13:26:36 patrick Exp $	*/
2 
3 /*
4  * Copyright (c) 2007 Internet Initiative Japan, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 #include "bpfilter.h"
29 
30 /*
31  * XXXSEIL
32  * If no free send buffer is available, free all the sent buffer and bail out.
33  */
34 #define OCTEON_ETH_SEND_QUEUE_CHECK
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/pool.h>
39 #include <sys/proc.h>
40 #include <sys/mbuf.h>
41 #include <sys/malloc.h>
42 #include <sys/kernel.h>
43 #include <sys/socket.h>
44 #include <sys/ioctl.h>
45 #include <sys/errno.h>
46 #include <sys/device.h>
47 #include <sys/queue.h>
48 #include <sys/conf.h>
49 #include <sys/stdint.h> /* uintptr_t */
50 #include <sys/syslog.h>
51 #include <sys/endian.h>
52 #include <sys/atomic.h>
53 
54 #include <net/if.h>
55 #include <net/if_media.h>
56 #include <netinet/in.h>
57 #include <netinet/if_ether.h>
58 
59 #if NBPFILTER > 0
60 #include <net/bpf.h>
61 #endif
62 
63 #include <machine/bus.h>
64 #include <machine/intr.h>
65 #include <machine/octeonvar.h>
66 #include <machine/octeon_model.h>
67 
68 #include <dev/mii/mii.h>
69 #include <dev/mii/miivar.h>
70 
71 #include <octeon/dev/cn30xxciureg.h>
72 #include <octeon/dev/cn30xxnpireg.h>
73 #include <octeon/dev/cn30xxgmxreg.h>
74 #include <octeon/dev/cn30xxipdreg.h>
75 #include <octeon/dev/cn30xxpipreg.h>
76 #include <octeon/dev/cn30xxpowreg.h>
77 #include <octeon/dev/cn30xxfaureg.h>
78 #include <octeon/dev/cn30xxfpareg.h>
79 #include <octeon/dev/cn30xxbootbusreg.h>
80 #include <octeon/dev/cn30xxfpavar.h>
81 #include <octeon/dev/cn30xxgmxvar.h>
82 #include <octeon/dev/cn30xxfauvar.h>
83 #include <octeon/dev/cn30xxpowvar.h>
84 #include <octeon/dev/cn30xxipdvar.h>
85 #include <octeon/dev/cn30xxpipvar.h>
86 #include <octeon/dev/cn30xxpkovar.h>
87 #include <octeon/dev/cn30xxsmivar.h>
88 #include <octeon/dev/iobusvar.h>
89 #include <octeon/dev/if_cnmacvar.h>
90 
91 #ifdef OCTEON_ETH_DEBUG
92 #define	OCTEON_ETH_KASSERT(x)	KASSERT(x)
93 #define	OCTEON_ETH_KDASSERT(x)	KDASSERT(x)
94 #else
95 #define	OCTEON_ETH_KASSERT(x)
96 #define	OCTEON_ETH_KDASSERT(x)
97 #endif
98 
99 /*
100  * Set the PKO to think command buffers are an odd length.  This makes it so we
101  * never have to divide a comamnd across two buffers.
102  */
103 #define OCTEON_POOL_NWORDS_CMD	\
104 	    (((uint32_t)OCTEON_POOL_SIZE_CMD / sizeof(uint64_t)) - 1)
105 #define FPA_COMMAND_BUFFER_POOL_NWORDS	OCTEON_POOL_NWORDS_CMD	/* XXX */
106 
107 CTASSERT(MCLBYTES >= OCTEON_POOL_SIZE_PKT + CACHELINESIZE);
108 
109 void	cnmac_buf_init(struct cnmac_softc *);
110 
111 int	cnmac_match(struct device *, void *, void *);
112 void	cnmac_attach(struct device *, struct device *, void *);
113 void	cnmac_pip_init(struct cnmac_softc *);
114 void	cnmac_ipd_init(struct cnmac_softc *);
115 void	cnmac_pko_init(struct cnmac_softc *);
116 void	cnmac_smi_init(struct cnmac_softc *);
117 
118 void	cnmac_board_mac_addr(uint8_t *);
119 
120 int	cnmac_mii_readreg(struct device *, int, int);
121 void	cnmac_mii_writereg(struct device *, int, int, int);
122 void	cnmac_mii_statchg(struct device *);
123 
124 int	cnmac_mediainit(struct cnmac_softc *);
125 void	cnmac_mediastatus(struct ifnet *, struct ifmediareq *);
126 int	cnmac_mediachange(struct ifnet *);
127 
128 void	cnmac_send_queue_flush_prefetch(struct cnmac_softc *);
129 void	cnmac_send_queue_flush_fetch(struct cnmac_softc *);
130 void	cnmac_send_queue_flush(struct cnmac_softc *);
131 int	cnmac_send_queue_is_full(struct cnmac_softc *);
132 void	cnmac_send_queue_add(struct cnmac_softc *,
133 	    struct mbuf *, uint64_t *);
134 void	cnmac_send_queue_del(struct cnmac_softc *,
135 	    struct mbuf **, uint64_t **);
136 int	cnmac_buf_free_work(struct cnmac_softc *, uint64_t *);
137 void	cnmac_buf_ext_free(caddr_t, u_int, void *);
138 
139 int	cnmac_ioctl(struct ifnet *, u_long, caddr_t);
140 void	cnmac_watchdog(struct ifnet *);
141 int	cnmac_init(struct ifnet *);
142 int	cnmac_stop(struct ifnet *, int);
143 void	cnmac_start(struct ifqueue *);
144 
145 int	cnmac_send_cmd(struct cnmac_softc *, uint64_t, uint64_t);
146 uint64_t cnmac_send_makecmd_w1(int, paddr_t);
147 uint64_t cnmac_send_makecmd_w0(uint64_t, uint64_t, size_t, int, int);
148 int	cnmac_send_makecmd_gbuf(struct cnmac_softc *,
149 	    struct mbuf *, uint64_t *, int *);
150 int	cnmac_send_makecmd(struct cnmac_softc *,
151 	    struct mbuf *, uint64_t *, uint64_t *, uint64_t *);
152 int	cnmac_send_buf(struct cnmac_softc *,
153 	    struct mbuf *, uint64_t *);
154 int	cnmac_send(struct cnmac_softc *, struct mbuf *);
155 
156 int	cnmac_reset(struct cnmac_softc *);
157 int	cnmac_configure(struct cnmac_softc *);
158 int	cnmac_configure_common(struct cnmac_softc *);
159 
160 void	cnmac_free_task(void *);
161 void	cnmac_tick_free(void *arg);
162 void	cnmac_tick_misc(void *);
163 
164 int	cnmac_recv_mbuf(struct cnmac_softc *,
165 	    uint64_t *, struct mbuf **, int *);
166 int	cnmac_recv_check(struct cnmac_softc *, uint64_t);
167 int	cnmac_recv(struct cnmac_softc *, uint64_t *, struct mbuf_list *);
168 int	cnmac_intr(void *);
169 
170 int	cnmac_mbuf_alloc(int);
171 
172 /* device parameters */
173 int	cnmac_param_pko_cmd_w0_n2 = 1;
174 
175 const struct cfattach cnmac_ca = {
176 	sizeof(struct cnmac_softc), cnmac_match, cnmac_attach
177 };
178 
179 struct cfdriver cnmac_cd = { NULL, "cnmac", DV_IFNET };
180 
181 /* ---- buffer management */
182 
183 const struct cnmac_pool_param {
184 	int			poolno;
185 	size_t			size;
186 	size_t			nelems;
187 } cnmac_pool_params[] = {
188 #define	_ENTRY(x)	{ OCTEON_POOL_NO_##x, OCTEON_POOL_SIZE_##x, OCTEON_POOL_NELEMS_##x }
189 	_ENTRY(WQE),
190 	_ENTRY(CMD),
191 	_ENTRY(SG)
192 #undef	_ENTRY
193 };
194 struct cn30xxfpa_buf	*cnmac_pools[8];
195 #define	cnmac_fb_wqe	cnmac_pools[OCTEON_POOL_NO_WQE]
196 #define	cnmac_fb_cmd	cnmac_pools[OCTEON_POOL_NO_CMD]
197 #define	cnmac_fb_sg	cnmac_pools[OCTEON_POOL_NO_SG]
198 
199 uint64_t cnmac_mac_addr = 0;
200 uint32_t cnmac_mac_addr_offset = 0;
201 
202 int	cnmac_mbufs_to_alloc;
203 int	cnmac_npowgroups = 0;
204 
205 void
206 cnmac_buf_init(struct cnmac_softc *sc)
207 {
208 	static int once;
209 	int i;
210 	const struct cnmac_pool_param *pp;
211 	struct cn30xxfpa_buf *fb;
212 
213 	if (once == 1)
214 		return;
215 	once = 1;
216 
217 	for (i = 0; i < (int)nitems(cnmac_pool_params); i++) {
218 		pp = &cnmac_pool_params[i];
219 		cn30xxfpa_buf_init(pp->poolno, pp->size, pp->nelems, &fb);
220 		cnmac_pools[pp->poolno] = fb;
221 	}
222 }
223 
224 /* ---- autoconf */
225 
226 int
227 cnmac_match(struct device *parent, void *match, void *aux)
228 {
229 	struct cfdata *cf = (struct cfdata *)match;
230 	struct cn30xxgmx_attach_args *ga = aux;
231 
232 	if (strcmp(cf->cf_driver->cd_name, ga->ga_name) != 0) {
233 		return 0;
234 	}
235 	return 1;
236 }
237 
238 void
239 cnmac_attach(struct device *parent, struct device *self, void *aux)
240 {
241 	struct cnmac_softc *sc = (void *)self;
242 	struct cn30xxgmx_attach_args *ga = aux;
243 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
244 	uint8_t enaddr[ETHER_ADDR_LEN];
245 
246 	if (cnmac_npowgroups >= OCTEON_POW_GROUP_MAX) {
247 		printf(": out of POW groups\n");
248 		return;
249 	}
250 
251 	atomic_add_int(&cnmac_mbufs_to_alloc,
252 	    cnmac_mbuf_alloc(CNMAC_MBUFS_PER_PORT));
253 
254 	sc->sc_regt = ga->ga_regt;
255 	sc->sc_dmat = ga->ga_dmat;
256 	sc->sc_port = ga->ga_portno;
257 	sc->sc_port_type = ga->ga_port_type;
258 	sc->sc_gmx = ga->ga_gmx;
259 	sc->sc_gmx_port = ga->ga_gmx_port;
260 	sc->sc_smi = ga->ga_smi;
261 	sc->sc_phy_addr = ga->ga_phy_addr;
262 	sc->sc_powgroup = cnmac_npowgroups++;
263 
264 	sc->sc_init_flag = 0;
265 
266 	/*
267 	 * XXX
268 	 * Setting PIP_IP_OFFSET[OFFSET] to 8 causes panic ... why???
269 	 */
270 	sc->sc_ip_offset = 0/* XXX */;
271 
272 	cnmac_board_mac_addr(enaddr);
273 	printf(", address %s\n", ether_sprintf(enaddr));
274 
275 	ml_init(&sc->sc_sendq);
276 	sc->sc_soft_req_thresh = 15/* XXX */;
277 	sc->sc_ext_callback_cnt = 0;
278 
279 	cn30xxgmx_stats_init(sc->sc_gmx_port);
280 
281 	task_set(&sc->sc_free_task, cnmac_free_task, sc);
282 	timeout_set(&sc->sc_tick_misc_ch, cnmac_tick_misc, sc);
283 	timeout_set(&sc->sc_tick_free_ch, cnmac_tick_free, sc);
284 
285 	cn30xxfau_op_init(&sc->sc_fau_done,
286 	    OCTEON_CVMSEG_ETHER_OFFSET(sc->sc_dev.dv_unit, csm_ether_fau_done),
287 	    OCT_FAU_REG_ADDR_END - (8 * (sc->sc_dev.dv_unit + 1))/* XXX */);
288 	cn30xxfau_op_set_8(&sc->sc_fau_done, 0);
289 
290 	cnmac_pip_init(sc);
291 	cnmac_ipd_init(sc);
292 	cnmac_pko_init(sc);
293 
294 	cnmac_configure_common(sc);
295 
296 	sc->sc_gmx_port->sc_ipd = sc->sc_ipd;
297 	sc->sc_gmx_port->sc_port_mii = &sc->sc_mii;
298 	sc->sc_gmx_port->sc_port_ac = &sc->sc_arpcom;
299 
300 	/* XXX */
301 	sc->sc_pow = &cn30xxpow_softc;
302 
303 	cnmac_mediainit(sc);
304 
305 	strncpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof(ifp->if_xname));
306 	ifp->if_softc = sc;
307 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
308 	ifp->if_xflags = IFXF_MPSAFE;
309 	ifp->if_ioctl = cnmac_ioctl;
310 	ifp->if_qstart = cnmac_start;
311 	ifp->if_watchdog = cnmac_watchdog;
312 	ifp->if_hardmtu = CNMAC_MAX_MTU;
313 	ifq_set_maxlen(&ifp->if_snd, max(GATHER_QUEUE_SIZE, IFQ_MAXLEN));
314 
315 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_TCPv4 |
316 	    IFCAP_CSUM_UDPv4 | IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6;
317 
318 	cn30xxgmx_set_mac_addr(sc->sc_gmx_port, enaddr);
319 	cn30xxgmx_set_filter(sc->sc_gmx_port);
320 
321 	if_attach(ifp);
322 
323 	memcpy(sc->sc_arpcom.ac_enaddr, enaddr, ETHER_ADDR_LEN);
324 	ether_ifattach(ifp);
325 
326 	cnmac_buf_init(sc);
327 
328 	sc->sc_ih = octeon_intr_establish(POW_WORKQ_IRQ(sc->sc_powgroup),
329 	    IPL_NET | IPL_MPSAFE, cnmac_intr, sc, sc->sc_dev.dv_xname);
330 	if (sc->sc_ih == NULL)
331 		panic("%s: could not set up interrupt", sc->sc_dev.dv_xname);
332 }
333 
334 /* ---- submodules */
335 
336 void
337 cnmac_pip_init(struct cnmac_softc *sc)
338 {
339 	struct cn30xxpip_attach_args pip_aa;
340 
341 	pip_aa.aa_port = sc->sc_port;
342 	pip_aa.aa_regt = sc->sc_regt;
343 	pip_aa.aa_tag_type = POW_TAG_TYPE_ORDERED/* XXX */;
344 	pip_aa.aa_receive_group = sc->sc_powgroup;
345 	pip_aa.aa_ip_offset = sc->sc_ip_offset;
346 	cn30xxpip_init(&pip_aa, &sc->sc_pip);
347 	cn30xxpip_port_config(sc->sc_pip);
348 }
349 
350 void
351 cnmac_ipd_init(struct cnmac_softc *sc)
352 {
353 	struct cn30xxipd_attach_args ipd_aa;
354 
355 	ipd_aa.aa_port = sc->sc_port;
356 	ipd_aa.aa_regt = sc->sc_regt;
357 	ipd_aa.aa_first_mbuff_skip = 0/* XXX */;
358 	ipd_aa.aa_not_first_mbuff_skip = 0/* XXX */;
359 	cn30xxipd_init(&ipd_aa, &sc->sc_ipd);
360 }
361 
362 void
363 cnmac_pko_init(struct cnmac_softc *sc)
364 {
365 	struct cn30xxpko_attach_args pko_aa;
366 
367 	pko_aa.aa_port = sc->sc_port;
368 	pko_aa.aa_regt = sc->sc_regt;
369 	pko_aa.aa_cmdptr = &sc->sc_cmdptr;
370 	pko_aa.aa_cmd_buf_pool = OCTEON_POOL_NO_CMD;
371 	pko_aa.aa_cmd_buf_size = OCTEON_POOL_NWORDS_CMD;
372 	cn30xxpko_init(&pko_aa, &sc->sc_pko);
373 }
374 
375 /* ---- XXX */
376 
377 void
378 cnmac_board_mac_addr(uint8_t *enaddr)
379 {
380 	int id;
381 
382 	/* Initialize MAC addresses from the global address base. */
383 	if (cnmac_mac_addr == 0) {
384 		memcpy((uint8_t *)&cnmac_mac_addr + 2,
385 		    octeon_boot_info->mac_addr_base, 6);
386 
387 		/*
388 		 * Should be allowed to fail hard if couldn't read the
389 		 * mac_addr_base address...
390 		 */
391 		if (cnmac_mac_addr == 0)
392 			return;
393 
394 		/*
395 		 * Calculate the offset from the mac_addr_base that will be used
396 		 * for the next sc->sc_port.
397 		 */
398 		id = octeon_get_chipid();
399 
400 		switch (octeon_model_family(id)) {
401 		case OCTEON_MODEL_FAMILY_CN56XX:
402 			cnmac_mac_addr_offset = 1;
403 			break;
404 		/*
405 		case OCTEON_MODEL_FAMILY_CN52XX:
406 		case OCTEON_MODEL_FAMILY_CN63XX:
407 			cnmac_mac_addr_offset = 2;
408 			break;
409 		*/
410 		default:
411 			cnmac_mac_addr_offset = 0;
412 			break;
413 		}
414 
415 		enaddr += cnmac_mac_addr_offset;
416 	}
417 
418 	/* No more MAC addresses to assign. */
419 	if (cnmac_mac_addr_offset >= octeon_boot_info->mac_addr_count)
420 		return;
421 
422 	if (enaddr)
423 		memcpy(enaddr, (uint8_t *)&cnmac_mac_addr + 2, 6);
424 
425 	cnmac_mac_addr++;
426 	cnmac_mac_addr_offset++;
427 }
428 
429 /* ---- media */
430 
431 int
432 cnmac_mii_readreg(struct device *self, int phy_no, int reg)
433 {
434 	struct cnmac_softc *sc = (struct cnmac_softc *)self;
435 	return cn30xxsmi_read(sc->sc_smi, phy_no, reg);
436 }
437 
438 void
439 cnmac_mii_writereg(struct device *self, int phy_no, int reg, int value)
440 {
441 	struct cnmac_softc *sc = (struct cnmac_softc *)self;
442 	cn30xxsmi_write(sc->sc_smi, phy_no, reg, value);
443 }
444 
445 void
446 cnmac_mii_statchg(struct device *self)
447 {
448 	struct cnmac_softc *sc = (struct cnmac_softc *)self;
449 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
450 
451 	cn30xxpko_port_enable(sc->sc_pko, 0);
452 	cn30xxgmx_port_enable(sc->sc_gmx_port, 0);
453 
454 	cnmac_reset(sc);
455 
456 	if (ISSET(ifp->if_flags, IFF_RUNNING))
457 		cn30xxgmx_set_filter(sc->sc_gmx_port);
458 
459 	cn30xxpko_port_enable(sc->sc_pko, 1);
460 	cn30xxgmx_port_enable(sc->sc_gmx_port, 1);
461 }
462 
463 int
464 cnmac_mediainit(struct cnmac_softc *sc)
465 {
466 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
467 	struct mii_softc *child;
468 
469 	sc->sc_mii.mii_ifp = ifp;
470 	sc->sc_mii.mii_readreg = cnmac_mii_readreg;
471 	sc->sc_mii.mii_writereg = cnmac_mii_writereg;
472 	sc->sc_mii.mii_statchg = cnmac_mii_statchg;
473 	ifmedia_init(&sc->sc_mii.mii_media, 0, cnmac_mediachange,
474 	    cnmac_mediastatus);
475 
476 	mii_attach(&sc->sc_dev, &sc->sc_mii,
477 	    0xffffffff, sc->sc_phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE);
478 
479 	child = LIST_FIRST(&sc->sc_mii.mii_phys);
480 	if (child == NULL) {
481                 /* No PHY attached. */
482 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
483 			    0, NULL);
484 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
485 	} else {
486 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
487 	}
488 
489 	return 0;
490 }
491 
492 void
493 cnmac_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
494 {
495 	struct cnmac_softc *sc = ifp->if_softc;
496 
497 	mii_pollstat(&sc->sc_mii);
498 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
499 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
500 	ifmr->ifm_active = (sc->sc_mii.mii_media_active & ~IFM_ETH_FMASK) |
501 	    sc->sc_gmx_port->sc_port_flowflags;
502 }
503 
504 int
505 cnmac_mediachange(struct ifnet *ifp)
506 {
507 	struct cnmac_softc *sc = ifp->if_softc;
508 
509 	if ((ifp->if_flags & IFF_UP) == 0)
510 		return 0;
511 
512 	return mii_mediachg(&sc->sc_mii);
513 }
514 
515 /* ---- send buffer garbage collection */
516 
517 void
518 cnmac_send_queue_flush_prefetch(struct cnmac_softc *sc)
519 {
520 	OCTEON_ETH_KASSERT(sc->sc_prefetch == 0);
521 	cn30xxfau_op_inc_fetch_8(&sc->sc_fau_done, 0);
522 	sc->sc_prefetch = 1;
523 }
524 
525 void
526 cnmac_send_queue_flush_fetch(struct cnmac_softc *sc)
527 {
528 #ifndef  OCTEON_ETH_DEBUG
529 	if (!sc->sc_prefetch)
530 		return;
531 #endif
532 	OCTEON_ETH_KASSERT(sc->sc_prefetch == 1);
533 	sc->sc_hard_done_cnt = cn30xxfau_op_inc_read_8(&sc->sc_fau_done);
534 	OCTEON_ETH_KASSERT(sc->sc_hard_done_cnt <= 0);
535 	sc->sc_prefetch = 0;
536 }
537 
538 void
539 cnmac_send_queue_flush(struct cnmac_softc *sc)
540 {
541 	const int64_t sent_count = sc->sc_hard_done_cnt;
542 	int i;
543 
544 	OCTEON_ETH_KASSERT(sent_count <= 0);
545 
546 	for (i = 0; i < 0 - sent_count; i++) {
547 		struct mbuf *m;
548 		uint64_t *gbuf;
549 
550 		cnmac_send_queue_del(sc, &m, &gbuf);
551 
552 		cn30xxfpa_buf_put_paddr(cnmac_fb_sg, XKPHYS_TO_PHYS(gbuf));
553 
554 		m_freem(m);
555 	}
556 
557 	cn30xxfau_op_add_8(&sc->sc_fau_done, i);
558 }
559 
560 int
561 cnmac_send_queue_is_full(struct cnmac_softc *sc)
562 {
563 #ifdef OCTEON_ETH_SEND_QUEUE_CHECK
564 	int64_t nofree_cnt;
565 
566 	nofree_cnt = ml_len(&sc->sc_sendq) + sc->sc_hard_done_cnt;
567 
568 	if (__predict_false(nofree_cnt == GATHER_QUEUE_SIZE - 1)) {
569 		cnmac_send_queue_flush(sc);
570 		return 1;
571 	}
572 
573 #endif
574 	return 0;
575 }
576 
577 void
578 cnmac_send_queue_add(struct cnmac_softc *sc, struct mbuf *m,
579     uint64_t *gbuf)
580 {
581 	OCTEON_ETH_KASSERT(m->m_flags & M_PKTHDR);
582 
583 	m->m_pkthdr.ph_cookie = gbuf;
584 	ml_enqueue(&sc->sc_sendq, m);
585 
586 	if (m->m_ext.ext_free_fn != 0)
587 		sc->sc_ext_callback_cnt++;
588 }
589 
590 void
591 cnmac_send_queue_del(struct cnmac_softc *sc, struct mbuf **rm,
592     uint64_t **rgbuf)
593 {
594 	struct mbuf *m;
595 	m = ml_dequeue(&sc->sc_sendq);
596 	OCTEON_ETH_KASSERT(m != NULL);
597 
598 	*rm = m;
599 	*rgbuf = m->m_pkthdr.ph_cookie;
600 
601 	if (m->m_ext.ext_free_fn != 0) {
602 		sc->sc_ext_callback_cnt--;
603 		OCTEON_ETH_KASSERT(sc->sc_ext_callback_cnt >= 0);
604 	}
605 }
606 
607 int
608 cnmac_buf_free_work(struct cnmac_softc *sc, uint64_t *work)
609 {
610 	paddr_t addr, pktbuf;
611 	uint64_t word3;
612 	unsigned int back, nbufs;
613 
614 	nbufs = (work[2] & PIP_WQE_WORD2_IP_BUFS) >>
615 	    PIP_WQE_WORD2_IP_BUFS_SHIFT;
616 	word3 = work[3];
617 	while (nbufs-- > 0) {
618 		addr = word3 & PIP_WQE_WORD3_ADDR;
619 		back = (word3 & PIP_WQE_WORD3_BACK) >>
620 		    PIP_WQE_WORD3_BACK_SHIFT;
621 		pktbuf = (addr & ~(CACHELINESIZE - 1)) - back * CACHELINESIZE;
622 
623 		cn30xxfpa_store(pktbuf, OCTEON_POOL_NO_PKT,
624 		    OCTEON_POOL_SIZE_PKT / CACHELINESIZE);
625 
626 		if (nbufs > 0)
627 			memcpy(&word3, (void *)PHYS_TO_XKPHYS(addr -
628 			    sizeof(word3), CCA_CACHED), sizeof(word3));
629 	}
630 
631 	cn30xxfpa_buf_put_paddr(cnmac_fb_wqe, XKPHYS_TO_PHYS(work));
632 
633 	return 0;
634 }
635 
636 /* ---- ifnet interfaces */
637 
638 int
639 cnmac_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
640 {
641 	struct cnmac_softc *sc = ifp->if_softc;
642 	struct ifreq *ifr = (struct ifreq *)data;
643 	int s, error = 0;
644 
645 	s = splnet();
646 
647 	switch (cmd) {
648 	case SIOCSIFADDR:
649 		ifp->if_flags |= IFF_UP;
650 		if (!(ifp->if_flags & IFF_RUNNING))
651 			cnmac_init(ifp);
652 		break;
653 
654 	case SIOCSIFFLAGS:
655 		if (ifp->if_flags & IFF_UP) {
656 			if (ifp->if_flags & IFF_RUNNING)
657 				error = ENETRESET;
658 			else
659 				cnmac_init(ifp);
660 		} else {
661 			if (ifp->if_flags & IFF_RUNNING)
662 				cnmac_stop(ifp, 0);
663 		}
664 		break;
665 
666 	case SIOCSIFMEDIA:
667 		/* Flow control requires full-duplex mode. */
668 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
669 		    (ifr->ifr_media & IFM_FDX) == 0) {
670 			ifr->ifr_media &= ~IFM_ETH_FMASK;
671 		}
672 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
673 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
674 				ifr->ifr_media |=
675 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
676 			}
677 			sc->sc_gmx_port->sc_port_flowflags =
678 				ifr->ifr_media & IFM_ETH_FMASK;
679 		}
680 		/* FALLTHROUGH */
681 	case SIOCGIFMEDIA:
682 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
683 		break;
684 
685 	default:
686 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
687 	}
688 
689 	if (error == ENETRESET) {
690 		if (ISSET(ifp->if_flags, IFF_RUNNING))
691 			cn30xxgmx_set_filter(sc->sc_gmx_port);
692 		error = 0;
693 	}
694 
695 	splx(s);
696 	return (error);
697 }
698 
699 /* ---- send (output) */
700 
701 uint64_t
702 cnmac_send_makecmd_w0(uint64_t fau0, uint64_t fau1, size_t len, int segs,
703     int ipoffp1)
704 {
705 	return cn30xxpko_cmd_word0(
706 		OCT_FAU_OP_SIZE_64,		/* sz1 */
707 		OCT_FAU_OP_SIZE_64,		/* sz0 */
708 		1, fau1, 1, fau0,		/* s1, reg1, s0, reg0 */
709 		0,				/* le */
710 		cnmac_param_pko_cmd_w0_n2,	/* n2 */
711 		1, 0,				/* q, r */
712 		(segs == 1) ? 0 : 1,		/* g */
713 		ipoffp1, 0, 1,			/* ipoffp1, ii, df */
714 		segs, (int)len);		/* segs, totalbytes */
715 }
716 
717 uint64_t
718 cnmac_send_makecmd_w1(int size, paddr_t addr)
719 {
720 	return cn30xxpko_cmd_word1(
721 		0, 0,				/* i, back */
722 		OCTEON_POOL_NO_SG,		/* pool */
723 		size, addr);			/* size, addr */
724 }
725 
726 #define KVTOPHYS(addr)	cnmac_kvtophys((vaddr_t)(addr))
727 
728 static inline paddr_t
729 cnmac_kvtophys(vaddr_t kva)
730 {
731 	KASSERT(IS_XKPHYS(kva));
732 	return XKPHYS_TO_PHYS(kva);
733 }
734 
735 int
736 cnmac_send_makecmd_gbuf(struct cnmac_softc *sc, struct mbuf *m0,
737     uint64_t *gbuf, int *rsegs)
738 {
739 	struct mbuf *m;
740 	int segs = 0;
741 
742 	for (m = m0; m != NULL; m = m->m_next) {
743 		if (__predict_false(m->m_len == 0))
744 			continue;
745 
746 		if (segs >= OCTEON_POOL_SIZE_SG / sizeof(uint64_t))
747 			goto defrag;
748 		gbuf[segs] = cnmac_send_makecmd_w1(m->m_len,
749 		    KVTOPHYS(m->m_data));
750 		segs++;
751 	}
752 
753 	*rsegs = segs;
754 
755 	return 0;
756 
757 defrag:
758 	if (m_defrag(m0, M_DONTWAIT) != 0)
759 		return 1;
760 	gbuf[0] = cnmac_send_makecmd_w1(m0->m_len, KVTOPHYS(m0->m_data));
761 	*rsegs = 1;
762 	return 0;
763 }
764 
765 int
766 cnmac_send_makecmd(struct cnmac_softc *sc, struct mbuf *m,
767     uint64_t *gbuf, uint64_t *rpko_cmd_w0, uint64_t *rpko_cmd_w1)
768 {
769 	uint64_t pko_cmd_w0, pko_cmd_w1;
770 	int ipoffp1;
771 	int segs;
772 	int result = 0;
773 
774 	if (cnmac_send_makecmd_gbuf(sc, m, gbuf, &segs)) {
775 		log(LOG_WARNING, "%s: large number of transmission"
776 		    " data segments", sc->sc_dev.dv_xname);
777 		result = 1;
778 		goto done;
779 	}
780 
781 	/* Get the IP packet offset for TCP/UDP checksum offloading. */
782 	ipoffp1 = (m->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT))
783 	    ? (ETHER_HDR_LEN + 1) : 0;
784 
785 	/*
786 	 * segs == 1	-> link mode (single continuous buffer)
787 	 *		   WORD1[size] is number of bytes pointed by segment
788 	 *
789 	 * segs > 1	-> gather mode (scatter-gather buffer)
790 	 *		   WORD1[size] is number of segments
791 	 */
792 	pko_cmd_w0 = cnmac_send_makecmd_w0(sc->sc_fau_done.fd_regno,
793 	    0, m->m_pkthdr.len, segs, ipoffp1);
794 	pko_cmd_w1 = cnmac_send_makecmd_w1(
795 	    (segs == 1) ? m->m_pkthdr.len : segs,
796 	    (segs == 1) ?
797 		KVTOPHYS(m->m_data) :
798 		XKPHYS_TO_PHYS(gbuf));
799 
800 	*rpko_cmd_w0 = pko_cmd_w0;
801 	*rpko_cmd_w1 = pko_cmd_w1;
802 
803 done:
804 	return result;
805 }
806 
807 int
808 cnmac_send_cmd(struct cnmac_softc *sc, uint64_t pko_cmd_w0,
809     uint64_t pko_cmd_w1)
810 {
811 	uint64_t *cmdptr;
812 	int result = 0;
813 
814 	cmdptr = (uint64_t *)PHYS_TO_XKPHYS(sc->sc_cmdptr.cmdptr, CCA_CACHED);
815 	cmdptr += sc->sc_cmdptr.cmdptr_idx;
816 
817 	OCTEON_ETH_KASSERT(cmdptr != NULL);
818 
819 	*cmdptr++ = pko_cmd_w0;
820 	*cmdptr++ = pko_cmd_w1;
821 
822 	OCTEON_ETH_KASSERT(sc->sc_cmdptr.cmdptr_idx + 2 <= FPA_COMMAND_BUFFER_POOL_NWORDS - 1);
823 
824 	if (sc->sc_cmdptr.cmdptr_idx + 2 == FPA_COMMAND_BUFFER_POOL_NWORDS - 1) {
825 		paddr_t buf;
826 
827 		buf = cn30xxfpa_buf_get_paddr(cnmac_fb_cmd);
828 		if (buf == 0) {
829 			log(LOG_WARNING,
830 			    "%s: cannot allocate command buffer from free pool allocator\n",
831 			    sc->sc_dev.dv_xname);
832 			result = 1;
833 			goto done;
834 		}
835 		*cmdptr++ = buf;
836 		sc->sc_cmdptr.cmdptr = (uint64_t)buf;
837 		sc->sc_cmdptr.cmdptr_idx = 0;
838 	} else {
839 		sc->sc_cmdptr.cmdptr_idx += 2;
840 	}
841 
842 	cn30xxpko_op_doorbell_write(sc->sc_port, sc->sc_port, 2);
843 
844 done:
845 	return result;
846 }
847 
848 int
849 cnmac_send_buf(struct cnmac_softc *sc, struct mbuf *m, uint64_t *gbuf)
850 {
851 	int result = 0, error;
852 	uint64_t pko_cmd_w0, pko_cmd_w1;
853 
854 	error = cnmac_send_makecmd(sc, m, gbuf, &pko_cmd_w0, &pko_cmd_w1);
855 	if (error != 0) {
856 		/* already logging */
857 		result = error;
858 		goto done;
859 	}
860 
861 	error = cnmac_send_cmd(sc, pko_cmd_w0, pko_cmd_w1);
862 	if (error != 0) {
863 		/* already logging */
864 		result = error;
865 	}
866 
867 done:
868 	return result;
869 }
870 
871 int
872 cnmac_send(struct cnmac_softc *sc, struct mbuf *m)
873 {
874 	paddr_t gaddr = 0;
875 	uint64_t *gbuf = NULL;
876 	int result = 0, error;
877 
878 	gaddr = cn30xxfpa_buf_get_paddr(cnmac_fb_sg);
879 	if (gaddr == 0) {
880 		log(LOG_WARNING,
881 		    "%s: cannot allocate gather buffer from free pool allocator\n",
882 		    sc->sc_dev.dv_xname);
883 		result = 1;
884 		goto done;
885 	}
886 
887 	gbuf = (uint64_t *)(uintptr_t)PHYS_TO_XKPHYS(gaddr, CCA_CACHED);
888 
889 	error = cnmac_send_buf(sc, m, gbuf);
890 	if (error != 0) {
891 		/* already logging */
892 		cn30xxfpa_buf_put_paddr(cnmac_fb_sg, gaddr);
893 		result = error;
894 		goto done;
895 	}
896 
897 	cnmac_send_queue_add(sc, m, gbuf);
898 
899 done:
900 	return result;
901 }
902 
903 void
904 cnmac_start(struct ifqueue *ifq)
905 {
906 	struct ifnet *ifp = ifq->ifq_if;
907 	struct cnmac_softc *sc = ifp->if_softc;
908 	struct mbuf *m;
909 
910 	if (__predict_false(!cn30xxgmx_link_status(sc->sc_gmx_port))) {
911 		ifq_purge(ifq);
912 		return;
913 	}
914 
915 	/*
916 	 * performance tuning
917 	 * presend iobdma request
918 	 */
919 	cnmac_send_queue_flush_prefetch(sc);
920 
921 	for (;;) {
922 		cnmac_send_queue_flush_fetch(sc); /* XXX */
923 
924 		/*
925 		 * XXXSEIL
926 		 * If no free send buffer is available, free all the sent buffer
927 		 * and bail out.
928 		 */
929 		if (cnmac_send_queue_is_full(sc)) {
930 			ifq_set_oactive(ifq);
931 			timeout_add(&sc->sc_tick_free_ch, 1);
932 			return;
933 		}
934 
935 		m = ifq_dequeue(ifq);
936 		if (m == NULL)
937 			return;
938 
939 #if NBPFILTER > 0
940 		if (ifp->if_bpf != NULL)
941 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
942 #endif
943 
944 		/* XXX */
945 		if (ml_len(&sc->sc_sendq) > sc->sc_soft_req_thresh)
946 			cnmac_send_queue_flush(sc);
947 		if (cnmac_send(sc, m)) {
948 			ifp->if_oerrors++;
949 			m_freem(m);
950 			log(LOG_WARNING,
951 		  	  "%s: failed to transmit packet\n",
952 		    	  sc->sc_dev.dv_xname);
953 		}
954 		/* XXX */
955 
956 		/*
957 		 * send next iobdma request
958 		 */
959 		cnmac_send_queue_flush_prefetch(sc);
960 	}
961 
962 	cnmac_send_queue_flush_fetch(sc);
963 }
964 
965 void
966 cnmac_watchdog(struct ifnet *ifp)
967 {
968 	struct cnmac_softc *sc = ifp->if_softc;
969 
970 	printf("%s: device timeout\n", sc->sc_dev.dv_xname);
971 
972 	cnmac_stop(ifp, 0);
973 
974 	cnmac_configure(sc);
975 
976 	SET(ifp->if_flags, IFF_RUNNING);
977 	ifp->if_timer = 0;
978 
979 	ifq_restart(&ifp->if_snd);
980 }
981 
982 int
983 cnmac_init(struct ifnet *ifp)
984 {
985 	struct cnmac_softc *sc = ifp->if_softc;
986 
987 	/* XXX don't disable commonly used parts!!! XXX */
988 	if (sc->sc_init_flag == 0) {
989 		/* Cancel any pending I/O. */
990 		cnmac_stop(ifp, 0);
991 
992 		/* Initialize the device */
993 		cnmac_configure(sc);
994 
995 		cn30xxpko_enable(sc->sc_pko);
996 		cn30xxipd_enable(sc->sc_ipd);
997 
998 		sc->sc_init_flag = 1;
999 	} else {
1000 		cn30xxgmx_port_enable(sc->sc_gmx_port, 1);
1001 	}
1002 	cnmac_mediachange(ifp);
1003 
1004 	cn30xxgmx_set_mac_addr(sc->sc_gmx_port, sc->sc_arpcom.ac_enaddr);
1005 	cn30xxgmx_set_filter(sc->sc_gmx_port);
1006 
1007 	timeout_add_sec(&sc->sc_tick_misc_ch, 1);
1008 	timeout_add_sec(&sc->sc_tick_free_ch, 1);
1009 
1010 	SET(ifp->if_flags, IFF_RUNNING);
1011 	ifq_clr_oactive(&ifp->if_snd);
1012 
1013 	return 0;
1014 }
1015 
1016 int
1017 cnmac_stop(struct ifnet *ifp, int disable)
1018 {
1019 	struct cnmac_softc *sc = ifp->if_softc;
1020 
1021 	CLR(ifp->if_flags, IFF_RUNNING);
1022 
1023 	timeout_del(&sc->sc_tick_misc_ch);
1024 	timeout_del(&sc->sc_tick_free_ch);
1025 
1026 	mii_down(&sc->sc_mii);
1027 
1028 	cn30xxgmx_port_enable(sc->sc_gmx_port, 0);
1029 
1030 	intr_barrier(sc->sc_ih);
1031 	ifq_barrier(&ifp->if_snd);
1032 
1033 	ifq_clr_oactive(&ifp->if_snd);
1034 	ifp->if_timer = 0;
1035 
1036 	return 0;
1037 }
1038 
1039 /* ---- misc */
1040 
1041 #define PKO_INDEX_MASK	((1ULL << 12/* XXX */) - 1)
1042 
1043 int
1044 cnmac_reset(struct cnmac_softc *sc)
1045 {
1046 	cn30xxgmx_reset_speed(sc->sc_gmx_port);
1047 	cn30xxgmx_reset_flowctl(sc->sc_gmx_port);
1048 	cn30xxgmx_reset_timing(sc->sc_gmx_port);
1049 
1050 	return 0;
1051 }
1052 
1053 int
1054 cnmac_configure(struct cnmac_softc *sc)
1055 {
1056 	cn30xxgmx_port_enable(sc->sc_gmx_port, 0);
1057 
1058 	cnmac_reset(sc);
1059 
1060 	cn30xxpko_port_config(sc->sc_pko);
1061 	cn30xxpko_port_enable(sc->sc_pko, 1);
1062 	cn30xxpow_config(sc->sc_pow, sc->sc_powgroup);
1063 
1064 	cn30xxgmx_tx_stats_rd_clr(sc->sc_gmx_port, 1);
1065 	cn30xxgmx_rx_stats_rd_clr(sc->sc_gmx_port, 1);
1066 
1067 	cn30xxgmx_port_enable(sc->sc_gmx_port, 1);
1068 
1069 	return 0;
1070 }
1071 
1072 int
1073 cnmac_configure_common(struct cnmac_softc *sc)
1074 {
1075 	static int once;
1076 
1077 	uint64_t reg;
1078 
1079 	if (once == 1)
1080 		return 0;
1081 	once = 1;
1082 
1083 	cn30xxipd_config(sc->sc_ipd);
1084 	cn30xxpko_config(sc->sc_pko);
1085 
1086 	/* Set padding for packets that Octeon does not recognize as IP. */
1087 	reg = octeon_xkphys_read_8(PIP_GBL_CFG);
1088 	reg &= ~PIP_GBL_CFG_NIP_SHF_MASK;
1089 	reg |= ETHER_ALIGN << PIP_GBL_CFG_NIP_SHF_SHIFT;
1090 	octeon_xkphys_write_8(PIP_GBL_CFG, reg);
1091 
1092 	return 0;
1093 }
1094 
1095 int
1096 cnmac_mbuf_alloc(int n)
1097 {
1098 	struct mbuf *m;
1099 	paddr_t pktbuf;
1100 
1101 	while (n > 0) {
1102 		m = MCLGETI(NULL, M_NOWAIT, NULL,
1103 		    OCTEON_POOL_SIZE_PKT + CACHELINESIZE);
1104 		if (m == NULL || !ISSET(m->m_flags, M_EXT)) {
1105 			m_freem(m);
1106 			break;
1107 		}
1108 
1109 		m->m_data = (void *)(((vaddr_t)m->m_data + CACHELINESIZE) &
1110 		    ~(CACHELINESIZE - 1));
1111 		((struct mbuf **)m->m_data)[-1] = m;
1112 
1113 		pktbuf = KVTOPHYS(m->m_data);
1114 		m->m_pkthdr.ph_cookie = (void *)pktbuf;
1115 		cn30xxfpa_store(pktbuf, OCTEON_POOL_NO_PKT,
1116 		    OCTEON_POOL_SIZE_PKT / CACHELINESIZE);
1117 
1118 		n--;
1119 	}
1120 	return n;
1121 }
1122 
1123 int
1124 cnmac_recv_mbuf(struct cnmac_softc *sc, uint64_t *work,
1125     struct mbuf **rm, int *nmbuf)
1126 {
1127 	struct mbuf *m, *m0, *mprev, **pm;
1128 	paddr_t addr, pktbuf;
1129 	uint64_t word1 = work[1];
1130 	uint64_t word2 = work[2];
1131 	uint64_t word3 = work[3];
1132 	unsigned int back, i, nbufs;
1133 	unsigned int left, total, size;
1134 
1135 	cn30xxfpa_buf_put_paddr(cnmac_fb_wqe, XKPHYS_TO_PHYS(work));
1136 
1137 	nbufs = (word2 & PIP_WQE_WORD2_IP_BUFS) >> PIP_WQE_WORD2_IP_BUFS_SHIFT;
1138 	if (nbufs == 0)
1139 		panic("%s: dynamic short packet", __func__);
1140 
1141 	m0 = mprev = NULL;
1142 	total = left = (word1 & PIP_WQE_WORD1_LEN) >> 48;
1143 	for (i = 0; i < nbufs; i++) {
1144 		addr = word3 & PIP_WQE_WORD3_ADDR;
1145 		back = (word3 & PIP_WQE_WORD3_BACK) >> PIP_WQE_WORD3_BACK_SHIFT;
1146 		pktbuf = (addr & ~(CACHELINESIZE - 1)) - back * CACHELINESIZE;
1147 		pm = (struct mbuf **)PHYS_TO_XKPHYS(pktbuf, CCA_CACHED) - 1;
1148 		m = *pm;
1149 		*pm = NULL;
1150 		if ((paddr_t)m->m_pkthdr.ph_cookie != pktbuf)
1151 			panic("%s: packet pool is corrupted, mbuf cookie %p != "
1152 			    "pktbuf %p", __func__, m->m_pkthdr.ph_cookie,
1153 			    (void *)pktbuf);
1154 
1155 		/*
1156 		 * Because of a hardware bug in some Octeon models the size
1157 		 * field of word3 can be wrong (erratum PKI-100).
1158 		 * However, the hardware uses all space in a buffer before
1159 		 * moving to the next one so it is possible to derive
1160 		 * the size of this data segment from the size
1161 		 * of packet data buffers.
1162 		 */
1163 		size = OCTEON_POOL_SIZE_PKT - (addr - pktbuf);
1164 		if (size > left)
1165 			size = left;
1166 
1167 		m->m_pkthdr.ph_cookie = NULL;
1168 		m->m_data += addr - pktbuf;
1169 		m->m_len = size;
1170 		left -= size;
1171 
1172 		if (m0 == NULL)
1173 			m0 = m;
1174 		else {
1175 			m->m_flags &= ~M_PKTHDR;
1176 			mprev->m_next = m;
1177 		}
1178 		mprev = m;
1179 
1180 		if (i + 1 < nbufs)
1181 			memcpy(&word3, (void *)PHYS_TO_XKPHYS(addr -
1182 			    sizeof(word3), CCA_CACHED), sizeof(word3));
1183 	}
1184 
1185 	m0->m_pkthdr.len = total;
1186 	*rm = m0;
1187 	*nmbuf = nbufs;
1188 
1189 	return 0;
1190 }
1191 
1192 int
1193 cnmac_recv_check(struct cnmac_softc *sc, uint64_t word2)
1194 {
1195 	static struct timeval rxerr_log_interval = { 0, 250000 };
1196 	uint64_t opecode;
1197 
1198 	if (__predict_true(!ISSET(word2, PIP_WQE_WORD2_NOIP_RE)))
1199 		return 0;
1200 
1201 	opecode = word2 & PIP_WQE_WORD2_NOIP_OPECODE;
1202 	if ((sc->sc_arpcom.ac_if.if_flags & IFF_DEBUG) &&
1203 	    ratecheck(&sc->sc_rxerr_log_last, &rxerr_log_interval))
1204 		log(LOG_DEBUG, "%s: rx error (%lld)\n", sc->sc_dev.dv_xname,
1205 		    opecode);
1206 
1207 	/* XXX harmless error? */
1208 	if (opecode == PIP_WQE_WORD2_RE_OPCODE_OVRRUN)
1209 		return 0;
1210 
1211 	return 1;
1212 }
1213 
1214 int
1215 cnmac_recv(struct cnmac_softc *sc, uint64_t *work, struct mbuf_list *ml)
1216 {
1217 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1218 	struct mbuf *m;
1219 	uint64_t word2;
1220 	int nmbuf = 0;
1221 
1222 	word2 = work[2];
1223 
1224 	if (!(ifp->if_flags & IFF_RUNNING))
1225 		goto drop;
1226 
1227 	if (__predict_false(cnmac_recv_check(sc, word2) != 0)) {
1228 		ifp->if_ierrors++;
1229 		goto drop;
1230 	}
1231 
1232 	/* On success, this releases the work queue entry. */
1233 	if (__predict_false(cnmac_recv_mbuf(sc, work, &m, &nmbuf) != 0)) {
1234 		ifp->if_ierrors++;
1235 		goto drop;
1236 	}
1237 
1238 	cn30xxipd_offload(word2, &m->m_pkthdr.csum_flags);
1239 
1240 	ml_enqueue(ml, m);
1241 
1242 	return nmbuf;
1243 
1244 drop:
1245 	cnmac_buf_free_work(sc, work);
1246 	return 0;
1247 }
1248 
1249 int
1250 cnmac_intr(void *arg)
1251 {
1252 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1253 	struct cnmac_softc *sc = arg;
1254 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1255 	uint64_t *work;
1256 	uint64_t wqmask = 1ull << sc->sc_powgroup;
1257 	uint32_t coreid = octeon_get_coreid();
1258 	uint32_t port;
1259 	int nmbuf = 0;
1260 
1261 	_POW_WR8(sc->sc_pow, POW_PP_GRP_MSK_OFFSET(coreid), wqmask);
1262 
1263 	cn30xxpow_tag_sw_wait();
1264 	cn30xxpow_work_request_async(OCTEON_CVMSEG_OFFSET(csm_pow_intr),
1265 	    POW_NO_WAIT);
1266 
1267 	for (;;) {
1268 		work = (uint64_t *)cn30xxpow_work_response_async(
1269 		    OCTEON_CVMSEG_OFFSET(csm_pow_intr));
1270 		if (work == NULL)
1271 			break;
1272 
1273 		cn30xxpow_tag_sw_wait();
1274 		cn30xxpow_work_request_async(
1275 		    OCTEON_CVMSEG_OFFSET(csm_pow_intr), POW_NO_WAIT);
1276 
1277 		port = (work[1] & PIP_WQE_WORD1_IPRT) >> 42;
1278 		if (port != sc->sc_port) {
1279 			printf("%s: unexpected wqe port %u, should be %u\n",
1280 			    sc->sc_dev.dv_xname, port, sc->sc_port);
1281 			goto wqe_error;
1282 		}
1283 
1284 		nmbuf += cnmac_recv(sc, work, &ml);
1285 	}
1286 
1287 	_POW_WR8(sc->sc_pow, POW_WQ_INT_OFFSET, wqmask);
1288 
1289 	if_input(ifp, &ml);
1290 
1291 	nmbuf = cnmac_mbuf_alloc(nmbuf);
1292 	if (nmbuf != 0)
1293 		atomic_add_int(&cnmac_mbufs_to_alloc, nmbuf);
1294 
1295 	return 1;
1296 
1297 wqe_error:
1298 	printf("word0: 0x%016llx\n", work[0]);
1299 	printf("word1: 0x%016llx\n", work[1]);
1300 	printf("word2: 0x%016llx\n", work[2]);
1301 	printf("word3: 0x%016llx\n", work[3]);
1302 	panic("wqe error");
1303 }
1304 
1305 /* ---- tick */
1306 
1307 void
1308 cnmac_free_task(void *arg)
1309 {
1310 	struct cnmac_softc *sc = arg;
1311 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1312 	struct ifqueue *ifq = &ifp->if_snd;
1313 	int resched = 1;
1314 	int timeout;
1315 
1316 	if (ml_len(&sc->sc_sendq) > 0) {
1317 		cnmac_send_queue_flush_prefetch(sc);
1318 		cnmac_send_queue_flush_fetch(sc);
1319 		cnmac_send_queue_flush(sc);
1320 	}
1321 
1322 	if (ifq_is_oactive(ifq)) {
1323 		ifq_clr_oactive(ifq);
1324 		cnmac_start(ifq);
1325 
1326 		if (ifq_is_oactive(ifq)) {
1327 			/* The start routine did rescheduling already. */
1328 			resched = 0;
1329 		}
1330 	}
1331 
1332 	if (resched) {
1333 		timeout = (sc->sc_ext_callback_cnt > 0) ? 1 : hz;
1334 		timeout_add(&sc->sc_tick_free_ch, timeout);
1335 	}
1336 }
1337 
1338 /*
1339  * cnmac_tick_free
1340  *
1341  * => garbage collect send gather buffer / mbuf
1342  * => called at softclock
1343  */
1344 void
1345 cnmac_tick_free(void *arg)
1346 {
1347 	struct cnmac_softc *sc = arg;
1348 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1349 	int to_alloc;
1350 
1351 	ifq_serialize(&ifp->if_snd, &sc->sc_free_task);
1352 
1353 	if (cnmac_mbufs_to_alloc != 0) {
1354 		to_alloc = atomic_swap_uint(&cnmac_mbufs_to_alloc, 0);
1355 		to_alloc = cnmac_mbuf_alloc(to_alloc);
1356 		if (to_alloc != 0)
1357 			atomic_add_int(&cnmac_mbufs_to_alloc, to_alloc);
1358 	}
1359 }
1360 
1361 /*
1362  * cnmac_tick_misc
1363  *
1364  * => collect statistics
1365  * => check link status
1366  * => called at softclock
1367  */
1368 void
1369 cnmac_tick_misc(void *arg)
1370 {
1371 	struct cnmac_softc *sc = arg;
1372 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1373 	int s;
1374 
1375 	s = splnet();
1376 
1377 	cn30xxgmx_stats(sc->sc_gmx_port);
1378 	cn30xxpip_stats(sc->sc_pip, ifp, sc->sc_port);
1379 	mii_tick(&sc->sc_mii);
1380 
1381 	splx(s);
1382 
1383 	timeout_add_sec(&sc->sc_tick_misc_ch, 1);
1384 }
1385