xref: /openbsd/sys/arch/octeon/dev/if_cnmac.c (revision fc61954a)
1 /*	$OpenBSD: if_cnmac.c,v 1.59 2016/11/02 01:29:43 visa Exp $	*/
2 
3 /*
4  * Copyright (c) 2007 Internet Initiative Japan, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 #include "bpfilter.h"
29 
30 /*
31  * XXXSEIL
32  * If no free send buffer is available, free all the sent buffer and bail out.
33  */
34 #define OCTEON_ETH_SEND_QUEUE_CHECK
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/pool.h>
39 #include <sys/proc.h>
40 #include <sys/mbuf.h>
41 #include <sys/malloc.h>
42 #include <sys/kernel.h>
43 #include <sys/socket.h>
44 #include <sys/ioctl.h>
45 #include <sys/errno.h>
46 #include <sys/device.h>
47 #include <sys/queue.h>
48 #include <sys/conf.h>
49 #include <sys/stdint.h> /* uintptr_t */
50 #include <sys/syslog.h>
51 #include <sys/endian.h>
52 #include <sys/atomic.h>
53 #ifdef MBUF_TIMESTAMP
54 #include <sys/time.h>
55 #endif
56 
57 #include <net/if.h>
58 #include <net/if_media.h>
59 #include <netinet/in.h>
60 #include <netinet/if_ether.h>
61 
62 #if NBPFILTER > 0
63 #include <net/bpf.h>
64 #endif
65 
66 #include <machine/bus.h>
67 #include <machine/intr.h>
68 #include <machine/octeonvar.h>
69 #include <machine/octeon_model.h>
70 
71 #include <dev/mii/mii.h>
72 #include <dev/mii/miivar.h>
73 
74 #include <octeon/dev/cn30xxciureg.h>
75 #include <octeon/dev/cn30xxnpireg.h>
76 #include <octeon/dev/cn30xxgmxreg.h>
77 #include <octeon/dev/cn30xxipdreg.h>
78 #include <octeon/dev/cn30xxpipreg.h>
79 #include <octeon/dev/cn30xxpowreg.h>
80 #include <octeon/dev/cn30xxfaureg.h>
81 #include <octeon/dev/cn30xxfpareg.h>
82 #include <octeon/dev/cn30xxbootbusreg.h>
83 #include <octeon/dev/cn30xxfpavar.h>
84 #include <octeon/dev/cn30xxgmxvar.h>
85 #include <octeon/dev/cn30xxfauvar.h>
86 #include <octeon/dev/cn30xxpowvar.h>
87 #include <octeon/dev/cn30xxipdvar.h>
88 #include <octeon/dev/cn30xxpipvar.h>
89 #include <octeon/dev/cn30xxpkovar.h>
90 #include <octeon/dev/cn30xxsmivar.h>
91 #include <octeon/dev/iobusvar.h>
92 #include <octeon/dev/if_cnmacvar.h>
93 
94 #ifdef OCTEON_ETH_DEBUG
95 #define	OCTEON_ETH_KASSERT(x)	KASSERT(x)
96 #define	OCTEON_ETH_KDASSERT(x)	KDASSERT(x)
97 #else
98 #define	OCTEON_ETH_KASSERT(x)
99 #define	OCTEON_ETH_KDASSERT(x)
100 #endif
101 
102 /*
103  * Set the PKO to think command buffers are an odd length.  This makes it so we
104  * never have to divide a comamnd across two buffers.
105  */
106 #define OCTEON_POOL_NWORDS_CMD	\
107 	    (((uint32_t)OCTEON_POOL_SIZE_CMD / sizeof(uint64_t)) - 1)
108 #define FPA_COMMAND_BUFFER_POOL_NWORDS	OCTEON_POOL_NWORDS_CMD	/* XXX */
109 
110 void	octeon_eth_buf_init(struct octeon_eth_softc *);
111 
112 int	octeon_eth_match(struct device *, void *, void *);
113 void	octeon_eth_attach(struct device *, struct device *, void *);
114 void	octeon_eth_pip_init(struct octeon_eth_softc *);
115 void	octeon_eth_ipd_init(struct octeon_eth_softc *);
116 void	octeon_eth_pko_init(struct octeon_eth_softc *);
117 void	octeon_eth_smi_init(struct octeon_eth_softc *);
118 
119 void	octeon_eth_board_mac_addr(uint8_t *);
120 
121 int	octeon_eth_mii_readreg(struct device *, int, int);
122 void	octeon_eth_mii_writereg(struct device *, int, int, int);
123 void	octeon_eth_mii_statchg(struct device *);
124 
125 int	octeon_eth_mediainit(struct octeon_eth_softc *);
126 void	octeon_eth_mediastatus(struct ifnet *, struct ifmediareq *);
127 int	octeon_eth_mediachange(struct ifnet *);
128 
129 void	octeon_eth_send_queue_flush_prefetch(struct octeon_eth_softc *);
130 void	octeon_eth_send_queue_flush_fetch(struct octeon_eth_softc *);
131 void	octeon_eth_send_queue_flush(struct octeon_eth_softc *);
132 int	octeon_eth_send_queue_is_full(struct octeon_eth_softc *);
133 void	octeon_eth_send_queue_add(struct octeon_eth_softc *,
134 	    struct mbuf *, uint64_t *);
135 void	octeon_eth_send_queue_del(struct octeon_eth_softc *,
136 	    struct mbuf **, uint64_t **);
137 int	octeon_eth_buf_free_work(struct octeon_eth_softc *, uint64_t *);
138 void	octeon_eth_buf_ext_free(caddr_t, u_int, void *);
139 
140 int	octeon_eth_ioctl(struct ifnet *, u_long, caddr_t);
141 void	octeon_eth_watchdog(struct ifnet *);
142 int	octeon_eth_init(struct ifnet *);
143 int	octeon_eth_stop(struct ifnet *, int);
144 void	octeon_eth_start(struct ifnet *);
145 
146 int	octeon_eth_send_cmd(struct octeon_eth_softc *, uint64_t, uint64_t);
147 uint64_t octeon_eth_send_makecmd_w1(int, paddr_t);
148 uint64_t octeon_eth_send_makecmd_w0(uint64_t, uint64_t, size_t, int, int);
149 int	octeon_eth_send_makecmd_gbuf(struct octeon_eth_softc *,
150 	    struct mbuf *, uint64_t *, int *);
151 int	octeon_eth_send_makecmd(struct octeon_eth_softc *,
152 	    struct mbuf *, uint64_t *, uint64_t *, uint64_t *);
153 int	octeon_eth_send_buf(struct octeon_eth_softc *,
154 	    struct mbuf *, uint64_t *);
155 int	octeon_eth_send(struct octeon_eth_softc *, struct mbuf *);
156 
157 int	octeon_eth_reset(struct octeon_eth_softc *);
158 int	octeon_eth_configure(struct octeon_eth_softc *);
159 int	octeon_eth_configure_common(struct octeon_eth_softc *);
160 
161 void	octeon_eth_free_task(void *);
162 void	octeon_eth_tick_free(void *arg);
163 void	octeon_eth_tick_misc(void *);
164 
165 int	octeon_eth_recv_mbuf(struct octeon_eth_softc *,
166 	    uint64_t *, struct mbuf **, int *);
167 int	octeon_eth_recv_check(struct octeon_eth_softc *, uint64_t);
168 int	octeon_eth_recv(struct octeon_eth_softc *, uint64_t *);
169 void	octeon_eth_recv_intr(void *, uint64_t *);
170 
171 int	octeon_eth_mbuf_alloc(int);
172 
173 /* device driver context */
174 struct	octeon_eth_softc *octeon_eth_gsc[GMX_PORT_NUNITS];
175 void	*octeon_eth_pow_recv_ih;
176 
177 /* device parameters */
178 int	octeon_eth_param_pko_cmd_w0_n2 = 1;
179 
180 const struct cfattach cnmac_ca =
181     { sizeof(struct octeon_eth_softc), octeon_eth_match, octeon_eth_attach };
182 
183 struct cfdriver cnmac_cd = { NULL, "cnmac", DV_IFNET };
184 
185 /* ---- buffer management */
186 
187 const struct octeon_eth_pool_param {
188 	int			poolno;
189 	size_t			size;
190 	size_t			nelems;
191 } octeon_eth_pool_params[] = {
192 #define	_ENTRY(x)	{ OCTEON_POOL_NO_##x, OCTEON_POOL_SIZE_##x, OCTEON_POOL_NELEMS_##x }
193 	_ENTRY(WQE),
194 	_ENTRY(CMD),
195 	_ENTRY(SG)
196 #undef	_ENTRY
197 };
198 struct cn30xxfpa_buf	*octeon_eth_pools[8/* XXX */];
199 #define	octeon_eth_fb_wqe	octeon_eth_pools[OCTEON_POOL_NO_WQE]
200 #define	octeon_eth_fb_cmd	octeon_eth_pools[OCTEON_POOL_NO_CMD]
201 #define	octeon_eth_fb_sg	octeon_eth_pools[OCTEON_POOL_NO_SG]
202 
203 uint64_t octeon_eth_mac_addr = 0;
204 uint32_t octeon_eth_mac_addr_offset = 0;
205 
206 int	octeon_eth_mbufs_to_alloc;
207 
208 void
209 octeon_eth_buf_init(struct octeon_eth_softc *sc)
210 {
211 	static int once;
212 	int i;
213 	const struct octeon_eth_pool_param *pp;
214 	struct cn30xxfpa_buf *fb;
215 
216 	if (once == 1)
217 		return;
218 	once = 1;
219 
220 	for (i = 0; i < (int)nitems(octeon_eth_pool_params); i++) {
221 		pp = &octeon_eth_pool_params[i];
222 		cn30xxfpa_buf_init(pp->poolno, pp->size, pp->nelems, &fb);
223 		octeon_eth_pools[pp->poolno] = fb;
224 	}
225 }
226 
227 /* ---- autoconf */
228 
229 int
230 octeon_eth_match(struct device *parent, void *match, void *aux)
231 {
232 	struct cfdata *cf = (struct cfdata *)match;
233 	struct cn30xxgmx_attach_args *ga = aux;
234 
235 	if (strcmp(cf->cf_driver->cd_name, ga->ga_name) != 0) {
236 		return 0;
237 	}
238 	return 1;
239 }
240 
241 void
242 octeon_eth_attach(struct device *parent, struct device *self, void *aux)
243 {
244 	struct octeon_eth_softc *sc = (void *)self;
245 	struct cn30xxgmx_attach_args *ga = aux;
246 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
247 	uint8_t enaddr[ETHER_ADDR_LEN];
248 
249 	KASSERT(MCLBYTES >= OCTEON_POOL_SIZE_PKT + CACHE_LINE_SIZE);
250 
251 	atomic_add_int(&octeon_eth_mbufs_to_alloc,
252 	    octeon_eth_mbuf_alloc(OCTEON_ETH_MBUFS_PER_PORT));
253 
254 	sc->sc_regt = ga->ga_regt;
255 	sc->sc_dmat = ga->ga_dmat;
256 	sc->sc_port = ga->ga_portno;
257 	sc->sc_port_type = ga->ga_port_type;
258 	sc->sc_gmx = ga->ga_gmx;
259 	sc->sc_gmx_port = ga->ga_gmx_port;
260 	sc->sc_phy_addr = ga->ga_phy_addr;
261 
262 	sc->sc_init_flag = 0;
263 
264 	/*
265 	 * XXX
266 	 * Setting PIP_IP_OFFSET[OFFSET] to 8 causes panic ... why???
267 	 */
268 	sc->sc_ip_offset = 0/* XXX */;
269 
270 	octeon_eth_board_mac_addr(enaddr);
271 	printf(", address %s\n", ether_sprintf(enaddr));
272 
273 	octeon_eth_gsc[sc->sc_port] = sc;
274 
275 	ml_init(&sc->sc_sendq);
276 	sc->sc_soft_req_thresh = 15/* XXX */;
277 	sc->sc_ext_callback_cnt = 0;
278 
279 	cn30xxgmx_stats_init(sc->sc_gmx_port);
280 
281 	task_set(&sc->sc_free_task, octeon_eth_free_task, sc);
282 	timeout_set(&sc->sc_tick_misc_ch, octeon_eth_tick_misc, sc);
283 	timeout_set(&sc->sc_tick_free_ch, octeon_eth_tick_free, sc);
284 
285 	cn30xxfau_op_init(&sc->sc_fau_done,
286 	    OCTEON_CVMSEG_ETHER_OFFSET(sc->sc_dev.dv_unit, csm_ether_fau_done),
287 	    OCT_FAU_REG_ADDR_END - (8 * (sc->sc_dev.dv_unit + 1))/* XXX */);
288 	cn30xxfau_op_set_8(&sc->sc_fau_done, 0);
289 
290 	octeon_eth_pip_init(sc);
291 	octeon_eth_ipd_init(sc);
292 	octeon_eth_pko_init(sc);
293 	octeon_eth_smi_init(sc);
294 
295 	sc->sc_gmx_port->sc_ipd = sc->sc_ipd;
296 	sc->sc_gmx_port->sc_port_mii = &sc->sc_mii;
297 	sc->sc_gmx_port->sc_port_ac = &sc->sc_arpcom;
298 
299 	/* XXX */
300 	sc->sc_pow = &cn30xxpow_softc;
301 
302 	octeon_eth_mediainit(sc);
303 
304 	strncpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof(ifp->if_xname));
305 	ifp->if_softc = sc;
306 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
307 	ifp->if_xflags = IFXF_MPSAFE;
308 	ifp->if_ioctl = octeon_eth_ioctl;
309 	ifp->if_start = octeon_eth_start;
310 	ifp->if_watchdog = octeon_eth_watchdog;
311 	ifp->if_hardmtu = OCTEON_ETH_MAX_MTU;
312 	IFQ_SET_MAXLEN(&ifp->if_snd, max(GATHER_QUEUE_SIZE, IFQ_MAXLEN));
313 
314 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_TCPv4 |
315 	    IFCAP_CSUM_UDPv4 | IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6;
316 
317 	cn30xxgmx_set_mac_addr(sc->sc_gmx_port, enaddr);
318 	cn30xxgmx_set_filter(sc->sc_gmx_port);
319 
320 	if_attach(ifp);
321 
322 	memcpy(sc->sc_arpcom.ac_enaddr, enaddr, ETHER_ADDR_LEN);
323 	ether_ifattach(ifp);
324 
325 #if 1
326 	octeon_eth_buf_init(sc);
327 #endif
328 
329 	if (octeon_eth_pow_recv_ih == NULL)
330 		octeon_eth_pow_recv_ih = cn30xxpow_intr_establish(
331 		    OCTEON_POW_GROUP_PIP, IPL_NET | IPL_MPSAFE,
332 		    octeon_eth_recv_intr, NULL, NULL, sc->sc_dev.dv_xname);
333 }
334 
335 /* ---- submodules */
336 
337 /* XXX */
338 void
339 octeon_eth_pip_init(struct octeon_eth_softc *sc)
340 {
341 	struct cn30xxpip_attach_args pip_aa;
342 
343 	pip_aa.aa_port = sc->sc_port;
344 	pip_aa.aa_regt = sc->sc_regt;
345 	pip_aa.aa_tag_type = POW_TAG_TYPE_ORDERED/* XXX */;
346 	pip_aa.aa_receive_group = OCTEON_POW_GROUP_PIP;
347 	pip_aa.aa_ip_offset = sc->sc_ip_offset;
348 	cn30xxpip_init(&pip_aa, &sc->sc_pip);
349 }
350 
351 /* XXX */
352 void
353 octeon_eth_ipd_init(struct octeon_eth_softc *sc)
354 {
355 	struct cn30xxipd_attach_args ipd_aa;
356 
357 	ipd_aa.aa_port = sc->sc_port;
358 	ipd_aa.aa_regt = sc->sc_regt;
359 	ipd_aa.aa_first_mbuff_skip = 0/* XXX */;
360 	ipd_aa.aa_not_first_mbuff_skip = 0/* XXX */;
361 	cn30xxipd_init(&ipd_aa, &sc->sc_ipd);
362 }
363 
364 /* XXX */
365 void
366 octeon_eth_pko_init(struct octeon_eth_softc *sc)
367 {
368 	struct cn30xxpko_attach_args pko_aa;
369 
370 	pko_aa.aa_port = sc->sc_port;
371 	pko_aa.aa_regt = sc->sc_regt;
372 	pko_aa.aa_cmdptr = &sc->sc_cmdptr;
373 	pko_aa.aa_cmd_buf_pool = OCTEON_POOL_NO_CMD;
374 	pko_aa.aa_cmd_buf_size = OCTEON_POOL_NWORDS_CMD;
375 	cn30xxpko_init(&pko_aa, &sc->sc_pko);
376 }
377 
378 void
379 octeon_eth_smi_init(struct octeon_eth_softc *sc)
380 {
381 	struct cn30xxsmi_attach_args smi_aa;
382 
383 	smi_aa.aa_port = sc->sc_port;
384 	smi_aa.aa_regt = sc->sc_regt;
385 	cn30xxsmi_init(&smi_aa, &sc->sc_smi);
386 	cn30xxsmi_set_clock(sc->sc_smi, 0x1464ULL); /* XXX */
387 }
388 
389 /* ---- XXX */
390 
391 void
392 octeon_eth_board_mac_addr(uint8_t *enaddr)
393 {
394 	int id;
395 
396 	/* Initialize MAC addresses from the global address base. */
397 	if (octeon_eth_mac_addr == 0) {
398 		memcpy((uint8_t *)&octeon_eth_mac_addr + 2,
399 		    octeon_boot_info->mac_addr_base, 6);
400 
401 		/*
402 		 * Should be allowed to fail hard if couldn't read the
403 		 * mac_addr_base address...
404 		 */
405 		if (octeon_eth_mac_addr == 0)
406 			return;
407 
408 		/*
409 		 * Calculate the offset from the mac_addr_base that will be used
410 		 * for the next sc->sc_port.
411 		 */
412 		id = octeon_get_chipid();
413 
414 		switch (octeon_model_family(id)) {
415 		case OCTEON_MODEL_FAMILY_CN56XX:
416 			octeon_eth_mac_addr_offset = 1;
417 			break;
418 		/*
419 		case OCTEON_MODEL_FAMILY_CN52XX:
420 		case OCTEON_MODEL_FAMILY_CN63XX:
421 			octeon_eth_mac_addr_offset = 2;
422 			break;
423 		*/
424 		default:
425 			octeon_eth_mac_addr_offset = 0;
426 			break;
427 		}
428 
429 		enaddr += octeon_eth_mac_addr_offset;
430 	}
431 
432 	/* No more MAC addresses to assign. */
433 	if (octeon_eth_mac_addr_offset >= octeon_boot_info->mac_addr_count)
434 		return;
435 
436 	if (enaddr)
437 		memcpy(enaddr, (uint8_t *)&octeon_eth_mac_addr + 2, 6);
438 
439 	octeon_eth_mac_addr++;
440 	octeon_eth_mac_addr_offset++;
441 }
442 
443 /* ---- media */
444 
445 int
446 octeon_eth_mii_readreg(struct device *self, int phy_no, int reg)
447 {
448 	struct octeon_eth_softc *sc = (struct octeon_eth_softc *)self;
449 	return cn30xxsmi_read(sc->sc_smi, phy_no, reg);
450 }
451 
452 void
453 octeon_eth_mii_writereg(struct device *self, int phy_no, int reg, int value)
454 {
455 	struct octeon_eth_softc *sc = (struct octeon_eth_softc *)self;
456 	cn30xxsmi_write(sc->sc_smi, phy_no, reg, value);
457 }
458 
459 void
460 octeon_eth_mii_statchg(struct device *self)
461 {
462 	struct octeon_eth_softc *sc = (struct octeon_eth_softc *)self;
463 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
464 
465 	cn30xxpko_port_enable(sc->sc_pko, 0);
466 	cn30xxgmx_port_enable(sc->sc_gmx_port, 0);
467 
468 	octeon_eth_reset(sc);
469 
470 	if (ISSET(ifp->if_flags, IFF_RUNNING))
471 		cn30xxgmx_set_filter(sc->sc_gmx_port);
472 
473 	cn30xxpko_port_enable(sc->sc_pko, 1);
474 	cn30xxgmx_port_enable(sc->sc_gmx_port, 1);
475 }
476 
477 int
478 octeon_eth_mediainit(struct octeon_eth_softc *sc)
479 {
480 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
481 	struct mii_softc *child;
482 
483 	sc->sc_mii.mii_ifp = ifp;
484 	sc->sc_mii.mii_readreg = octeon_eth_mii_readreg;
485 	sc->sc_mii.mii_writereg = octeon_eth_mii_writereg;
486 	sc->sc_mii.mii_statchg = octeon_eth_mii_statchg;
487 	ifmedia_init(&sc->sc_mii.mii_media, 0, octeon_eth_mediachange,
488 	    octeon_eth_mediastatus);
489 
490 	mii_attach(&sc->sc_dev, &sc->sc_mii,
491 	    0xffffffff, sc->sc_phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE);
492 
493 	child = LIST_FIRST(&sc->sc_mii.mii_phys);
494 	if (child == NULL) {
495                 /* No PHY attached. */
496 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
497 			    0, NULL);
498 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
499 	} else {
500 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
501 	}
502 
503 	return 0;
504 }
505 
506 void
507 octeon_eth_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
508 {
509 	struct octeon_eth_softc *sc = ifp->if_softc;
510 
511 	mii_pollstat(&sc->sc_mii);
512 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
513 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
514 	ifmr->ifm_active = (sc->sc_mii.mii_media_active & ~IFM_ETH_FMASK) |
515 	    sc->sc_gmx_port->sc_port_flowflags;
516 }
517 
518 int
519 octeon_eth_mediachange(struct ifnet *ifp)
520 {
521 	struct octeon_eth_softc *sc = ifp->if_softc;
522 
523 	if ((ifp->if_flags & IFF_UP) == 0)
524 		return 0;
525 
526 	return mii_mediachg(&sc->sc_mii);
527 }
528 
529 /* ---- send buffer garbage collection */
530 
531 void
532 octeon_eth_send_queue_flush_prefetch(struct octeon_eth_softc *sc)
533 {
534 	OCTEON_ETH_KASSERT(sc->sc_prefetch == 0);
535 	cn30xxfau_op_inc_fetch_8(&sc->sc_fau_done, 0);
536 	sc->sc_prefetch = 1;
537 }
538 
539 void
540 octeon_eth_send_queue_flush_fetch(struct octeon_eth_softc *sc)
541 {
542 #ifndef  OCTEON_ETH_DEBUG
543 	if (!sc->sc_prefetch)
544 		return;
545 #endif
546 	OCTEON_ETH_KASSERT(sc->sc_prefetch == 1);
547 	sc->sc_hard_done_cnt = cn30xxfau_op_inc_read_8(&sc->sc_fau_done);
548 	OCTEON_ETH_KASSERT(sc->sc_hard_done_cnt <= 0);
549 	sc->sc_prefetch = 0;
550 }
551 
552 void
553 octeon_eth_send_queue_flush(struct octeon_eth_softc *sc)
554 {
555 	const int64_t sent_count = sc->sc_hard_done_cnt;
556 	int i;
557 
558 	OCTEON_ETH_KASSERT(sent_count <= 0);
559 
560 	for (i = 0; i < 0 - sent_count; i++) {
561 		struct mbuf *m;
562 		uint64_t *gbuf;
563 
564 		octeon_eth_send_queue_del(sc, &m, &gbuf);
565 
566 		cn30xxfpa_buf_put_paddr(octeon_eth_fb_sg, XKPHYS_TO_PHYS(gbuf));
567 
568 		m_freem(m);
569 	}
570 
571 	cn30xxfau_op_add_8(&sc->sc_fau_done, i);
572 }
573 
574 int
575 octeon_eth_send_queue_is_full(struct octeon_eth_softc *sc)
576 {
577 #ifdef OCTEON_ETH_SEND_QUEUE_CHECK
578 	int64_t nofree_cnt;
579 
580 	nofree_cnt = ml_len(&sc->sc_sendq) + sc->sc_hard_done_cnt;
581 
582 	if (__predict_false(nofree_cnt == GATHER_QUEUE_SIZE - 1)) {
583 		octeon_eth_send_queue_flush(sc);
584 		return 1;
585 	}
586 
587 #endif
588 	return 0;
589 }
590 
591 void
592 octeon_eth_send_queue_add(struct octeon_eth_softc *sc, struct mbuf *m,
593     uint64_t *gbuf)
594 {
595 	OCTEON_ETH_KASSERT(m->m_flags & M_PKTHDR);
596 
597 	m->m_pkthdr.ph_cookie = gbuf;
598 	ml_enqueue(&sc->sc_sendq, m);
599 
600 	if (m->m_ext.ext_free_fn != 0)
601 		sc->sc_ext_callback_cnt++;
602 }
603 
604 void
605 octeon_eth_send_queue_del(struct octeon_eth_softc *sc, struct mbuf **rm,
606     uint64_t **rgbuf)
607 {
608 	struct mbuf *m;
609 	m = ml_dequeue(&sc->sc_sendq);
610 	OCTEON_ETH_KASSERT(m != NULL);
611 
612 	*rm = m;
613 	*rgbuf = m->m_pkthdr.ph_cookie;
614 
615 	if (m->m_ext.ext_free_fn != 0) {
616 		sc->sc_ext_callback_cnt--;
617 		OCTEON_ETH_KASSERT(sc->sc_ext_callback_cnt >= 0);
618 	}
619 }
620 
621 int
622 octeon_eth_buf_free_work(struct octeon_eth_softc *sc, uint64_t *work)
623 {
624 	paddr_t addr, pktbuf;
625 	uint64_t word3;
626 	unsigned int back, nbufs;
627 
628 	nbufs = (work[2] & PIP_WQE_WORD2_IP_BUFS) >>
629 	    PIP_WQE_WORD2_IP_BUFS_SHIFT;
630 	word3 = work[3];
631 	while (nbufs-- > 0) {
632 		addr = word3 & PIP_WQE_WORD3_ADDR, CCA_CACHED;
633 		back = (word3 & PIP_WQE_WORD3_BACK) >>
634 		    PIP_WQE_WORD3_BACK_SHIFT;
635 		pktbuf = (addr & ~(CACHE_LINE_SIZE - 1)) -
636 		    back * CACHE_LINE_SIZE;
637 
638 		cn30xxfpa_store(pktbuf, OCTEON_POOL_NO_PKT,
639 		    OCTEON_POOL_SIZE_PKT / CACHE_LINE_SIZE);
640 
641 		if (nbufs > 0)
642 			memcpy(&word3, (void *)PHYS_TO_XKPHYS(addr -
643 			    sizeof(word3), CCA_CACHED), sizeof(word3));
644 	}
645 
646 	cn30xxfpa_buf_put_paddr(octeon_eth_fb_wqe, XKPHYS_TO_PHYS(work));
647 
648 	return 0;
649 }
650 
651 /* ---- ifnet interfaces */
652 
653 int
654 octeon_eth_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
655 {
656 	struct octeon_eth_softc *sc = ifp->if_softc;
657 	struct ifreq *ifr = (struct ifreq *)data;
658 	int s, error = 0;
659 
660 	s = splnet();
661 
662 	switch (cmd) {
663 	case SIOCSIFADDR:
664 		ifp->if_flags |= IFF_UP;
665 		if (!(ifp->if_flags & IFF_RUNNING))
666 			octeon_eth_init(ifp);
667 		break;
668 
669 	case SIOCSIFFLAGS:
670 		if (ifp->if_flags & IFF_UP) {
671 			if (ifp->if_flags & IFF_RUNNING)
672 				error = ENETRESET;
673 			else
674 				octeon_eth_init(ifp);
675 		} else {
676 			if (ifp->if_flags & IFF_RUNNING)
677 				octeon_eth_stop(ifp, 0);
678 		}
679 		break;
680 
681 	case SIOCSIFMEDIA:
682 		/* Flow control requires full-duplex mode. */
683 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
684 		    (ifr->ifr_media & IFM_FDX) == 0) {
685 			ifr->ifr_media &= ~IFM_ETH_FMASK;
686 		}
687 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
688 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
689 				ifr->ifr_media |=
690 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
691 			}
692 			sc->sc_gmx_port->sc_port_flowflags =
693 				ifr->ifr_media & IFM_ETH_FMASK;
694 		}
695 		/* FALLTHROUGH */
696 	case SIOCGIFMEDIA:
697 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
698 		break;
699 
700 	default:
701 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
702 	}
703 
704 	if (error == ENETRESET) {
705 		if (ISSET(ifp->if_flags, IFF_RUNNING))
706 			cn30xxgmx_set_filter(sc->sc_gmx_port);
707 		error = 0;
708 	}
709 
710 	if_start(ifp);
711 
712 	splx(s);
713 	return (error);
714 }
715 
716 /* ---- send (output) */
717 
718 uint64_t
719 octeon_eth_send_makecmd_w0(uint64_t fau0, uint64_t fau1, size_t len, int segs,
720     int ipoffp1)
721 {
722 	return cn30xxpko_cmd_word0(
723 		OCT_FAU_OP_SIZE_64,		/* sz1 */
724 		OCT_FAU_OP_SIZE_64,		/* sz0 */
725 		1, fau1, 1, fau0,		/* s1, reg1, s0, reg0 */
726 		0,				/* le */
727 		octeon_eth_param_pko_cmd_w0_n2,	/* n2 */
728 		1, 0,				/* q, r */
729 		(segs == 1) ? 0 : 1,		/* g */
730 		ipoffp1, 0, 1,			/* ipoffp1, ii, df */
731 		segs, (int)len);		/* segs, totalbytes */
732 }
733 
734 uint64_t
735 octeon_eth_send_makecmd_w1(int size, paddr_t addr)
736 {
737 	return cn30xxpko_cmd_word1(
738 		0, 0,				/* i, back */
739 		OCTEON_POOL_NO_SG,		/* pool */
740 		size, addr);			/* size, addr */
741 }
742 
743 #define KVTOPHYS(addr)	if_cnmac_kvtophys((vaddr_t)(addr))
744 paddr_t if_cnmac_kvtophys(vaddr_t);
745 
746 paddr_t
747 if_cnmac_kvtophys(vaddr_t kva)
748 {
749 	if (IS_XKPHYS(kva))
750 		return XKPHYS_TO_PHYS(kva);
751 	else if (kva >= CKSEG0_BASE && kva < CKSEG0_BASE + CKSEG_SIZE)
752 		return CKSEG0_TO_PHYS(kva);
753 	else if (kva >= CKSEG1_BASE && kva < CKSEG1_BASE + CKSEG_SIZE)
754 		return CKSEG1_TO_PHYS(kva);
755 
756 	panic("%s: non-direct mapped address %p", __func__, (void *)kva);
757 }
758 
759 int
760 octeon_eth_send_makecmd_gbuf(struct octeon_eth_softc *sc, struct mbuf *m0,
761     uint64_t *gbuf, int *rsegs)
762 {
763 	struct mbuf *m;
764 	int segs = 0;
765 
766 	for (m = m0; m != NULL; m = m->m_next) {
767 		if (__predict_false(m->m_len == 0))
768 			continue;
769 
770 		if (segs >= OCTEON_POOL_SIZE_SG / sizeof(uint64_t))
771 			goto defrag;
772 		gbuf[segs] = octeon_eth_send_makecmd_w1(m->m_len,
773 		    KVTOPHYS(m->m_data));
774 		segs++;
775 	}
776 
777 	*rsegs = segs;
778 
779 	return 0;
780 
781 defrag:
782 	if (m_defrag(m0, M_DONTWAIT) != 0)
783 		return 1;
784 	gbuf[0] = octeon_eth_send_makecmd_w1(m0->m_len, KVTOPHYS(m0->m_data));
785 	*rsegs = 1;
786 	return 0;
787 }
788 
789 int
790 octeon_eth_send_makecmd(struct octeon_eth_softc *sc, struct mbuf *m,
791     uint64_t *gbuf, uint64_t *rpko_cmd_w0, uint64_t *rpko_cmd_w1)
792 {
793 	uint64_t pko_cmd_w0, pko_cmd_w1;
794 	int ipoffp1;
795 	int segs;
796 	int result = 0;
797 
798 	if (octeon_eth_send_makecmd_gbuf(sc, m, gbuf, &segs)) {
799 		log(LOG_WARNING, "%s: large number of transmission"
800 		    " data segments", sc->sc_dev.dv_xname);
801 		result = 1;
802 		goto done;
803 	}
804 
805 	/* Get the IP packet offset for TCP/UDP checksum offloading. */
806 	ipoffp1 = (m->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT))
807 	    ? (ETHER_HDR_LEN + 1) : 0;
808 
809 	/*
810 	 * segs == 1	-> link mode (single continuous buffer)
811 	 *		   WORD1[size] is number of bytes pointed by segment
812 	 *
813 	 * segs > 1	-> gather mode (scatter-gather buffer)
814 	 *		   WORD1[size] is number of segments
815 	 */
816 	pko_cmd_w0 = octeon_eth_send_makecmd_w0(sc->sc_fau_done.fd_regno,
817 	    0, m->m_pkthdr.len, segs, ipoffp1);
818 	pko_cmd_w1 = octeon_eth_send_makecmd_w1(
819 	    (segs == 1) ? m->m_pkthdr.len : segs,
820 	    (segs == 1) ?
821 		KVTOPHYS(m->m_data) :
822 		XKPHYS_TO_PHYS(gbuf));
823 
824 	*rpko_cmd_w0 = pko_cmd_w0;
825 	*rpko_cmd_w1 = pko_cmd_w1;
826 
827 done:
828 	return result;
829 }
830 
831 int
832 octeon_eth_send_cmd(struct octeon_eth_softc *sc, uint64_t pko_cmd_w0,
833     uint64_t pko_cmd_w1)
834 {
835 	uint64_t *cmdptr;
836 	int result = 0;
837 
838 	cmdptr = (uint64_t *)PHYS_TO_XKPHYS(sc->sc_cmdptr.cmdptr, CCA_CACHED);
839 	cmdptr += sc->sc_cmdptr.cmdptr_idx;
840 
841 	OCTEON_ETH_KASSERT(cmdptr != NULL);
842 
843 	*cmdptr++ = pko_cmd_w0;
844 	*cmdptr++ = pko_cmd_w1;
845 
846 	OCTEON_ETH_KASSERT(sc->sc_cmdptr.cmdptr_idx + 2 <= FPA_COMMAND_BUFFER_POOL_NWORDS - 1);
847 
848 	if (sc->sc_cmdptr.cmdptr_idx + 2 == FPA_COMMAND_BUFFER_POOL_NWORDS - 1) {
849 		paddr_t buf;
850 
851 		buf = cn30xxfpa_buf_get_paddr(octeon_eth_fb_cmd);
852 		if (buf == 0) {
853 			log(LOG_WARNING,
854 			    "%s: cannot allocate command buffer from free pool allocator\n",
855 			    sc->sc_dev.dv_xname);
856 			result = 1;
857 			goto done;
858 		}
859 		*cmdptr++ = buf;
860 		sc->sc_cmdptr.cmdptr = (uint64_t)buf;
861 		sc->sc_cmdptr.cmdptr_idx = 0;
862 	} else {
863 		sc->sc_cmdptr.cmdptr_idx += 2;
864 	}
865 
866 	cn30xxpko_op_doorbell_write(sc->sc_port, sc->sc_port, 2);
867 
868 done:
869 	return result;
870 }
871 
872 int
873 octeon_eth_send_buf(struct octeon_eth_softc *sc, struct mbuf *m,
874     uint64_t *gbuf)
875 {
876 	int result = 0, error;
877 	uint64_t pko_cmd_w0, pko_cmd_w1;
878 
879 	error = octeon_eth_send_makecmd(sc, m, gbuf, &pko_cmd_w0, &pko_cmd_w1);
880 	if (error != 0) {
881 		/* already logging */
882 		result = error;
883 		goto done;
884 	}
885 
886 	error = octeon_eth_send_cmd(sc, pko_cmd_w0, pko_cmd_w1);
887 	if (error != 0) {
888 		/* already logging */
889 		result = error;
890 	}
891 
892 done:
893 	return result;
894 }
895 
896 int
897 octeon_eth_send(struct octeon_eth_softc *sc, struct mbuf *m)
898 {
899 	paddr_t gaddr = 0;
900 	uint64_t *gbuf = NULL;
901 	int result = 0, error;
902 
903 	gaddr = cn30xxfpa_buf_get_paddr(octeon_eth_fb_sg);
904 	if (gaddr == 0) {
905 		log(LOG_WARNING,
906 		    "%s: cannot allocate gather buffer from free pool allocator\n",
907 		    sc->sc_dev.dv_xname);
908 		result = 1;
909 		goto done;
910 	}
911 
912 	gbuf = (uint64_t *)(uintptr_t)PHYS_TO_XKPHYS(gaddr, CCA_CACHED);
913 
914 	error = octeon_eth_send_buf(sc, m, gbuf);
915 	if (error != 0) {
916 		/* already logging */
917 		cn30xxfpa_buf_put_paddr(octeon_eth_fb_sg, gaddr);
918 		result = error;
919 		goto done;
920 	}
921 
922 	octeon_eth_send_queue_add(sc, m, gbuf);
923 
924 done:
925 	return result;
926 }
927 
928 void
929 octeon_eth_start(struct ifnet *ifp)
930 {
931 	struct octeon_eth_softc *sc = ifp->if_softc;
932 	struct mbuf *m;
933 
934 	if (__predict_false(!cn30xxgmx_link_status(sc->sc_gmx_port))) {
935 		ifq_purge(&ifp->if_snd);
936 		return;
937 	}
938 
939 	/*
940 	 * performance tuning
941 	 * presend iobdma request
942 	 */
943 	octeon_eth_send_queue_flush_prefetch(sc);
944 
945 	for (;;) {
946 		octeon_eth_send_queue_flush_fetch(sc); /* XXX */
947 
948 		/*
949 		 * XXXSEIL
950 		 * If no free send buffer is available, free all the sent buffer
951 		 * and bail out.
952 		 */
953 		if (octeon_eth_send_queue_is_full(sc)) {
954 			ifq_set_oactive(&ifp->if_snd);
955 			timeout_add(&sc->sc_tick_free_ch, 1);
956 			return;
957 		}
958 
959 		m = ifq_dequeue(&ifp->if_snd);
960 		if (m == NULL)
961 			return;
962 
963 #if NBPFILTER > 0
964 		if (ifp->if_bpf != NULL)
965 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
966 #endif
967 
968 		/* XXX */
969 		if (ml_len(&sc->sc_sendq) > sc->sc_soft_req_thresh)
970 			octeon_eth_send_queue_flush(sc);
971 		if (octeon_eth_send(sc, m)) {
972 			ifp->if_oerrors++;
973 			m_freem(m);
974 			log(LOG_WARNING,
975 		  	  "%s: failed to transmit packet\n",
976 		    	  sc->sc_dev.dv_xname);
977 		}
978 		/* XXX */
979 
980 		/*
981 		 * send next iobdma request
982 		 */
983 		octeon_eth_send_queue_flush_prefetch(sc);
984 	}
985 
986 	octeon_eth_send_queue_flush_fetch(sc);
987 }
988 
989 void
990 octeon_eth_watchdog(struct ifnet *ifp)
991 {
992 	struct octeon_eth_softc *sc = ifp->if_softc;
993 
994 	printf("%s: device timeout\n", sc->sc_dev.dv_xname);
995 
996 	octeon_eth_stop(ifp, 0);
997 
998 	octeon_eth_configure(sc);
999 
1000 	SET(ifp->if_flags, IFF_RUNNING);
1001 	ifp->if_timer = 0;
1002 
1003 	ifq_restart(&ifp->if_snd);
1004 }
1005 
1006 int
1007 octeon_eth_init(struct ifnet *ifp)
1008 {
1009 	struct octeon_eth_softc *sc = ifp->if_softc;
1010 
1011 	/* XXX don't disable commonly used parts!!! XXX */
1012 	if (sc->sc_init_flag == 0) {
1013 		/* Cancel any pending I/O. */
1014 		octeon_eth_stop(ifp, 0);
1015 
1016 		/* Initialize the device */
1017 		octeon_eth_configure(sc);
1018 
1019 		cn30xxpko_enable(sc->sc_pko);
1020 		cn30xxipd_enable(sc->sc_ipd);
1021 
1022 		sc->sc_init_flag = 1;
1023 	} else {
1024 		cn30xxgmx_port_enable(sc->sc_gmx_port, 1);
1025 	}
1026 	octeon_eth_mediachange(ifp);
1027 
1028 	cn30xxgmx_set_mac_addr(sc->sc_gmx_port, sc->sc_arpcom.ac_enaddr);
1029 	cn30xxgmx_set_filter(sc->sc_gmx_port);
1030 
1031 	timeout_add_sec(&sc->sc_tick_misc_ch, 1);
1032 	timeout_add_sec(&sc->sc_tick_free_ch, 1);
1033 
1034 	SET(ifp->if_flags, IFF_RUNNING);
1035 	ifq_clr_oactive(&ifp->if_snd);
1036 
1037 	return 0;
1038 }
1039 
1040 int
1041 octeon_eth_stop(struct ifnet *ifp, int disable)
1042 {
1043 	struct octeon_eth_softc *sc = ifp->if_softc;
1044 
1045 	CLR(ifp->if_flags, IFF_RUNNING);
1046 
1047 	timeout_del(&sc->sc_tick_misc_ch);
1048 	timeout_del(&sc->sc_tick_free_ch);
1049 
1050 	mii_down(&sc->sc_mii);
1051 
1052 	cn30xxgmx_port_enable(sc->sc_gmx_port, 0);
1053 
1054 	intr_barrier(octeon_eth_pow_recv_ih);
1055 	ifq_barrier(&ifp->if_snd);
1056 
1057 	ifq_clr_oactive(&ifp->if_snd);
1058 	ifp->if_timer = 0;
1059 
1060 	return 0;
1061 }
1062 
1063 /* ---- misc */
1064 
1065 #define PKO_INDEX_MASK	((1ULL << 12/* XXX */) - 1)
1066 
1067 int
1068 octeon_eth_reset(struct octeon_eth_softc *sc)
1069 {
1070 	cn30xxgmx_reset_speed(sc->sc_gmx_port);
1071 	cn30xxgmx_reset_flowctl(sc->sc_gmx_port);
1072 	cn30xxgmx_reset_timing(sc->sc_gmx_port);
1073 	cn30xxgmx_reset_board(sc->sc_gmx_port);
1074 
1075 	return 0;
1076 }
1077 
1078 int
1079 octeon_eth_configure(struct octeon_eth_softc *sc)
1080 {
1081 	cn30xxgmx_port_enable(sc->sc_gmx_port, 0);
1082 
1083 	octeon_eth_reset(sc);
1084 
1085 	octeon_eth_configure_common(sc);
1086 
1087 	cn30xxpko_port_config(sc->sc_pko);
1088 	cn30xxpko_port_enable(sc->sc_pko, 1);
1089 	cn30xxpip_port_config(sc->sc_pip);
1090 
1091 	cn30xxgmx_tx_stats_rd_clr(sc->sc_gmx_port, 1);
1092 	cn30xxgmx_rx_stats_rd_clr(sc->sc_gmx_port, 1);
1093 
1094 	cn30xxgmx_port_enable(sc->sc_gmx_port, 1);
1095 
1096 	return 0;
1097 }
1098 
1099 int
1100 octeon_eth_configure_common(struct octeon_eth_softc *sc)
1101 {
1102 	static int once;
1103 
1104 	uint64_t reg;
1105 
1106 	if (once == 1)
1107 		return 0;
1108 	once = 1;
1109 
1110 #if 0
1111 	octeon_eth_buf_init(sc);
1112 #endif
1113 
1114 	cn30xxipd_config(sc->sc_ipd);
1115 	cn30xxpko_config(sc->sc_pko);
1116 
1117 	cn30xxpow_config(sc->sc_pow, OCTEON_POW_GROUP_PIP);
1118 
1119 	/* Set padding for packets that Octeon does not recognize as IP. */
1120 	reg = octeon_xkphys_read_8(PIP_GBL_CFG);
1121 	reg &= ~PIP_GBL_CFG_NIP_SHF_MASK;
1122 	reg |= ETHER_ALIGN << PIP_GBL_CFG_NIP_SHF_SHIFT;
1123 	octeon_xkphys_write_8(PIP_GBL_CFG, reg);
1124 
1125 	return 0;
1126 }
1127 
1128 int
1129 octeon_eth_mbuf_alloc(int n)
1130 {
1131 	struct mbuf *m;
1132 	paddr_t pktbuf;
1133 
1134 	while (n > 0) {
1135 		m = MCLGETI(NULL, M_NOWAIT, NULL,
1136 		    OCTEON_POOL_SIZE_PKT + CACHE_LINE_SIZE);
1137 		if (m == NULL || !ISSET(m->m_flags, M_EXT)) {
1138 			m_freem(m);
1139 			break;
1140 		}
1141 
1142 		m->m_data = (void *)(((vaddr_t)m->m_data + CACHE_LINE_SIZE) &
1143 		    ~(CACHE_LINE_SIZE - 1));
1144 		((struct mbuf **)m->m_data)[-1] = m;
1145 
1146 		pktbuf = KVTOPHYS(m->m_data);
1147 		m->m_pkthdr.ph_cookie = (void *)pktbuf;
1148 		cn30xxfpa_store(pktbuf, OCTEON_POOL_NO_PKT,
1149 		    OCTEON_POOL_SIZE_PKT / CACHE_LINE_SIZE);
1150 
1151 		n--;
1152 	}
1153 	return n;
1154 }
1155 
1156 int
1157 octeon_eth_recv_mbuf(struct octeon_eth_softc *sc, uint64_t *work,
1158     struct mbuf **rm, int *nmbuf)
1159 {
1160 	struct mbuf *m, *m0, *mprev, **pm;
1161 	paddr_t addr, pktbuf;
1162 	uint64_t word1 = work[1];
1163 	uint64_t word2 = work[2];
1164 	uint64_t word3 = work[3];
1165 	unsigned int back, i, nbufs;
1166 	unsigned int left, total, size;
1167 
1168 	cn30xxfpa_buf_put_paddr(octeon_eth_fb_wqe, XKPHYS_TO_PHYS(work));
1169 
1170 	nbufs = (word2 & PIP_WQE_WORD2_IP_BUFS) >> PIP_WQE_WORD2_IP_BUFS_SHIFT;
1171 	if (nbufs == 0)
1172 		panic("%s: dynamic short packet", __func__);
1173 
1174 	m0 = mprev = NULL;
1175 	total = left = (word1 & PIP_WQE_WORD1_LEN) >> 48;
1176 	for (i = 0; i < nbufs; i++) {
1177 		addr = word3 & PIP_WQE_WORD3_ADDR;
1178 		back = (word3 & PIP_WQE_WORD3_BACK) >> PIP_WQE_WORD3_BACK_SHIFT;
1179 		pktbuf = (addr & ~(CACHE_LINE_SIZE - 1)) -
1180 		    back * CACHE_LINE_SIZE;
1181 		pm = (struct mbuf **)PHYS_TO_XKPHYS(pktbuf, CCA_CACHED) - 1;
1182 		m = *pm;
1183 		*pm = NULL;
1184 		if ((paddr_t)m->m_pkthdr.ph_cookie != pktbuf)
1185 			panic("%s: packet pool is corrupted, mbuf cookie %p != "
1186 			    "pktbuf %p", __func__, m->m_pkthdr.ph_cookie,
1187 			    (void *)pktbuf);
1188 
1189 		/*
1190 		 * Because of a hardware bug in some Octeon models the size
1191 		 * field of word3 can be wrong. However, the hardware uses
1192 		 * all space in a buffer before moving to the next one so
1193 		 * it is possible to derive the size of this data segment
1194 		 * from the size of packet data buffers.
1195 		 */
1196 		size = OCTEON_POOL_SIZE_PKT - (addr - pktbuf);
1197 		if (size > left)
1198 			size = left;
1199 
1200 		m->m_pkthdr.ph_cookie = NULL;
1201 		m->m_data += addr - pktbuf;
1202 		m->m_len = size;
1203 		left -= size;
1204 
1205 		if (m0 == NULL)
1206 			m0 = m;
1207 		else {
1208 			m->m_flags &= ~M_PKTHDR;
1209 			mprev->m_next = m;
1210 		}
1211 		mprev = m;
1212 
1213 		if (i + 1 < nbufs)
1214 			memcpy(&word3, (void *)PHYS_TO_XKPHYS(addr -
1215 			    sizeof(word3), CCA_CACHED), sizeof(word3));
1216 	}
1217 
1218 	m0->m_pkthdr.len = total;
1219 	*rm = m0;
1220 	*nmbuf = nbufs;
1221 
1222 	return 0;
1223 }
1224 
1225 int
1226 octeon_eth_recv_check(struct octeon_eth_softc *sc, uint64_t word2)
1227 {
1228 	static struct timeval rxerr_log_interval = { 0, 250000 };
1229 	uint64_t opecode;
1230 
1231 	if (__predict_true(!ISSET(word2, PIP_WQE_WORD2_NOIP_RE)))
1232 		return 0;
1233 
1234 	opecode = word2 & PIP_WQE_WORD2_NOIP_OPECODE;
1235 	if ((sc->sc_arpcom.ac_if.if_flags & IFF_DEBUG) &&
1236 	    ratecheck(&sc->sc_rxerr_log_last, &rxerr_log_interval))
1237 		log(LOG_DEBUG, "%s: rx error (%lld)\n", sc->sc_dev.dv_xname,
1238 		    opecode);
1239 
1240 	/* XXX harmless error? */
1241 	if (opecode == PIP_WQE_WORD2_RE_OPCODE_OVRRUN)
1242 		return 0;
1243 
1244 	return 1;
1245 }
1246 
1247 int
1248 octeon_eth_recv(struct octeon_eth_softc *sc, uint64_t *work)
1249 {
1250 	struct ifnet *ifp;
1251 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1252 	struct mbuf *m;
1253 	uint64_t word2;
1254 	int nmbuf;
1255 
1256 	OCTEON_ETH_KASSERT(sc != NULL);
1257 	OCTEON_ETH_KASSERT(work != NULL);
1258 
1259 	word2 = work[2];
1260 	ifp = &sc->sc_arpcom.ac_if;
1261 
1262 	OCTEON_ETH_KASSERT(ifp != NULL);
1263 
1264 	if (!(ifp->if_flags & IFF_RUNNING))
1265 		goto drop;
1266 
1267 	if (__predict_false(octeon_eth_recv_check(sc, word2) != 0)) {
1268 		ifp->if_ierrors++;
1269 		goto drop;
1270 	}
1271 
1272 	if (__predict_false(octeon_eth_recv_mbuf(sc, work, &m, &nmbuf) != 0)) {
1273 		ifp->if_ierrors++;
1274 		goto drop;
1275 	}
1276 
1277 	/* work[0] .. work[3] may not be valid any more */
1278 
1279 	OCTEON_ETH_KASSERT(m != NULL);
1280 
1281 	cn30xxipd_offload(word2, &m->m_pkthdr.csum_flags);
1282 
1283 	ml_enqueue(&ml, m);
1284 	if_input(ifp, &ml);
1285 
1286 	nmbuf = octeon_eth_mbuf_alloc(nmbuf);
1287 	if (nmbuf != 0)
1288 		atomic_add_int(&octeon_eth_mbufs_to_alloc, nmbuf);
1289 
1290 	return 0;
1291 
1292 drop:
1293 	octeon_eth_buf_free_work(sc, work);
1294 	return 1;
1295 }
1296 
1297 void
1298 octeon_eth_recv_intr(void *data, uint64_t *work)
1299 {
1300 	struct octeon_eth_softc *sc;
1301 	int port;
1302 
1303 	OCTEON_ETH_KASSERT(work != NULL);
1304 
1305 	port = (work[1] & PIP_WQE_WORD1_IPRT) >> 42;
1306 
1307 	OCTEON_ETH_KASSERT(port < GMX_PORT_NUNITS);
1308 
1309 	sc = octeon_eth_gsc[port];
1310 
1311 	OCTEON_ETH_KASSERT(sc != NULL);
1312 	OCTEON_ETH_KASSERT(port == sc->sc_port);
1313 
1314 	/* XXX process all work queue entries anyway */
1315 
1316 	(void)octeon_eth_recv(sc, work);
1317 }
1318 
1319 /* ---- tick */
1320 
1321 void
1322 octeon_eth_free_task(void *arg)
1323 {
1324 	struct octeon_eth_softc *sc = arg;
1325 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1326 	int resched = 1;
1327 	int timeout;
1328 
1329 	if (ml_len(&sc->sc_sendq) > 0) {
1330 		octeon_eth_send_queue_flush_prefetch(sc);
1331 		octeon_eth_send_queue_flush_fetch(sc);
1332 		octeon_eth_send_queue_flush(sc);
1333 	}
1334 
1335 	if (ifq_is_oactive(&ifp->if_snd)) {
1336 		ifq_clr_oactive(&ifp->if_snd);
1337 		octeon_eth_start(ifp);
1338 
1339 		if (ifq_is_oactive(&ifp->if_snd))
1340 			/* The start routine did rescheduling already. */
1341 			resched = 0;
1342 	}
1343 
1344 	if (resched) {
1345 		timeout = (sc->sc_ext_callback_cnt > 0) ? 1 : hz;
1346 		timeout_add(&sc->sc_tick_free_ch, timeout);
1347 	}
1348 }
1349 
1350 /*
1351  * octeon_eth_tick_free
1352  *
1353  * => garbage collect send gather buffer / mbuf
1354  * => called at softclock
1355  */
1356 void
1357 octeon_eth_tick_free(void *arg)
1358 {
1359 	struct octeon_eth_softc *sc = arg;
1360 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1361 	int to_alloc;
1362 
1363 	ifq_serialize(&ifp->if_snd, &sc->sc_free_task);
1364 
1365 	if (octeon_eth_mbufs_to_alloc != 0) {
1366 		to_alloc = atomic_swap_uint(&octeon_eth_mbufs_to_alloc, 0);
1367 		to_alloc = octeon_eth_mbuf_alloc(to_alloc);
1368 		if (to_alloc != 0)
1369 			atomic_add_int(&octeon_eth_mbufs_to_alloc, to_alloc);
1370 	}
1371 }
1372 
1373 /*
1374  * octeon_eth_tick_misc
1375  *
1376  * => collect statistics
1377  * => check link status
1378  * => called at softclock
1379  */
1380 void
1381 octeon_eth_tick_misc(void *arg)
1382 {
1383 	struct octeon_eth_softc *sc = arg;
1384 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1385 	int s;
1386 
1387 	s = splnet();
1388 
1389 	cn30xxgmx_stats(sc->sc_gmx_port);
1390 	cn30xxpip_stats(sc->sc_pip, ifp, sc->sc_port);
1391 	mii_tick(&sc->sc_mii);
1392 
1393 	splx(s);
1394 
1395 	timeout_add_sec(&sc->sc_tick_misc_ch, 1);
1396 }
1397