xref: /openbsd/sys/dev/pv/if_xnf.c (revision fc61954a)
1 /*	$OpenBSD: if_xnf.c,v 1.41 2016/10/06 17:02:22 mikeb Exp $	*/
2 
3 /*
4  * Copyright (c) 2015, 2016 Mike Belopuhov
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bpfilter.h"
20 #include "vlan.h"
21 #include "xen.h"
22 
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/atomic.h>
26 #include <sys/device.h>
27 #include <sys/kernel.h>
28 #include <sys/malloc.h>
29 #include <sys/mbuf.h>
30 #include <sys/pool.h>
31 #include <sys/queue.h>
32 #include <sys/socket.h>
33 #include <sys/sockio.h>
34 #include <sys/task.h>
35 #include <sys/timeout.h>
36 
37 #include <machine/bus.h>
38 
39 #include <dev/pv/xenreg.h>
40 #include <dev/pv/xenvar.h>
41 
42 #include <net/if.h>
43 #include <net/if_media.h>
44 
45 #include <netinet/in.h>
46 #include <netinet/if_ether.h>
47 
48 #ifdef INET6
49 #include <netinet/ip6.h>
50 #endif
51 
52 #if NBPFILTER > 0
53 #include <net/bpf.h>
54 #endif
55 
56 
57 /*
58  * Rx ring
59  */
60 
61 struct xnf_rx_req {
62 	uint16_t		 rxq_id;
63 	uint16_t		 rxq_pad;
64 	uint32_t		 rxq_ref;
65 } __packed;
66 
67 struct xnf_rx_rsp {
68 	uint16_t		 rxp_id;
69 	uint16_t		 rxp_offset;
70 	uint16_t		 rxp_flags;
71 #define  XNF_RXF_CSUM_VALID	  0x0001
72 #define  XNF_RXF_CSUM_BLANK	  0x0002
73 #define  XNF_RXF_CHUNK		  0x0004
74 #define  XNF_RXF_MGMT		  0x0008
75 	int16_t			 rxp_status;
76 } __packed;
77 
78 union xnf_rx_desc {
79 	struct xnf_rx_req	 rxd_req;
80 	struct xnf_rx_rsp	 rxd_rsp;
81 } __packed;
82 
83 #define XNF_RX_DESC		256
84 #define XNF_MCLEN		PAGE_SIZE
85 #define XNF_RX_MIN		32
86 
87 struct xnf_rx_ring {
88 	volatile uint32_t	 rxr_prod;
89 	volatile uint32_t	 rxr_prod_event;
90 	volatile uint32_t	 rxr_cons;
91 	volatile uint32_t	 rxr_cons_event;
92 	uint32_t		 rxr_reserved[12];
93 	union xnf_rx_desc	 rxr_desc[XNF_RX_DESC];
94 } __packed;
95 
96 
97 /*
98  * Tx ring
99  */
100 
101 struct xnf_tx_req {
102 	uint32_t		 txq_ref;
103 	uint16_t		 txq_offset;
104 	uint16_t		 txq_flags;
105 #define  XNF_TXF_CSUM_BLANK	  0x0001
106 #define  XNF_TXF_CSUM_VALID	  0x0002
107 #define  XNF_TXF_CHUNK		  0x0004
108 #define  XNF_TXF_ETXRA		  0x0008
109 	uint16_t		 txq_id;
110 	uint16_t		 txq_size;
111 } __packed;
112 
113 struct xnf_tx_rsp {
114 	uint16_t		 txp_id;
115 	int16_t			 txp_status;
116 } __packed;
117 
118 union xnf_tx_desc {
119 	struct xnf_tx_req	 txd_req;
120 	struct xnf_tx_rsp	 txd_rsp;
121 } __packed;
122 
123 #define XNF_TX_DESC		256
124 #define XNF_TX_FRAG		18
125 
126 struct xnf_tx_ring {
127 	volatile uint32_t	 txr_prod;
128 	volatile uint32_t	 txr_prod_event;
129 	volatile uint32_t	 txr_cons;
130 	volatile uint32_t	 txr_cons_event;
131 	uint32_t		 txr_reserved[12];
132 	union xnf_tx_desc	 txr_desc[XNF_TX_DESC];
133 } __packed;
134 
135 
136 /* Management frame, "extra info" in Xen parlance */
137 struct xnf_mgmt {
138 	uint8_t			 mg_type;
139 #define  XNF_MGMT_MCAST_ADD	2
140 #define  XNF_MGMT_MCAST_DEL	3
141 	uint8_t			 mg_flags;
142 	union {
143 		uint8_t		 mgu_mcaddr[ETHER_ADDR_LEN];
144 		uint16_t	 mgu_pad[3];
145 	} u;
146 #define mg_mcaddr		 u.mgu_mcaddr
147 } __packed;
148 
149 
150 struct xnf_softc {
151 	struct device		 sc_dev;
152 	struct xen_attach_args	 sc_xa;
153 	struct xen_softc	*sc_xen;
154 	bus_dma_tag_t		 sc_dmat;
155 	int			 sc_domid;
156 
157 	struct arpcom		 sc_ac;
158 	struct ifmedia		 sc_media;
159 
160 	xen_intr_handle_t	 sc_xih;
161 
162 	int			 sc_caps;
163 #define  XNF_CAP_SG		  0x0001
164 #define  XNF_CAP_CSUM4		  0x0002
165 #define  XNF_CAP_CSUM6		  0x0004
166 #define  XNF_CAP_MCAST		  0x0008
167 #define  XNF_CAP_SPLIT		  0x0010
168 #define  XNF_CAP_MULTIQ		  0x0020
169 
170 	/* Rx ring */
171 	struct xnf_rx_ring	*sc_rx_ring;
172 	int			 sc_rx_cons;
173 	bus_dmamap_t		 sc_rx_rmap;		  /* map for the ring */
174 	bus_dma_segment_t	 sc_rx_seg;
175 	uint32_t		 sc_rx_ref;		  /* grant table ref */
176 	struct mbuf		*sc_rx_buf[XNF_RX_DESC];
177 	bus_dmamap_t		 sc_rx_dmap[XNF_RX_DESC]; /* maps for packets */
178 	struct mbuf		*sc_rx_cbuf[2];	  	  /* chain handling */
179 
180 	/* Tx ring */
181 	struct xnf_tx_ring	*sc_tx_ring;
182 	int			 sc_tx_cons;
183 	bus_dmamap_t		 sc_tx_rmap;		  /* map for the ring */
184 	bus_dma_segment_t	 sc_tx_seg;
185 	uint32_t		 sc_tx_ref;		  /* grant table ref */
186 	int			 sc_tx_frags;
187 	struct mbuf		*sc_tx_buf[XNF_TX_DESC];
188 	bus_dmamap_t		 sc_tx_dmap[XNF_TX_DESC]; /* maps for packets */
189 };
190 
191 int	xnf_match(struct device *, void *, void *);
192 void	xnf_attach(struct device *, struct device *, void *);
193 int	xnf_lladdr(struct xnf_softc *);
194 int	xnf_ioctl(struct ifnet *, u_long, caddr_t);
195 int	xnf_media_change(struct ifnet *);
196 void	xnf_media_status(struct ifnet *, struct ifmediareq *);
197 int	xnf_iff(struct xnf_softc *);
198 void	xnf_init(struct xnf_softc *);
199 void	xnf_stop(struct xnf_softc *);
200 void	xnf_start(struct ifnet *);
201 int	xnf_encap(struct xnf_softc *, struct mbuf *, uint32_t *);
202 void	xnf_intr(void *);
203 void	xnf_watchdog(struct ifnet *);
204 void	xnf_txeof(struct xnf_softc *);
205 void	xnf_rxeof(struct xnf_softc *);
206 int	xnf_rx_ring_fill(struct xnf_softc *);
207 int	xnf_rx_ring_create(struct xnf_softc *);
208 void	xnf_rx_ring_drain(struct xnf_softc *);
209 void	xnf_rx_ring_destroy(struct xnf_softc *);
210 int	xnf_tx_ring_create(struct xnf_softc *);
211 void	xnf_tx_ring_drain(struct xnf_softc *);
212 void	xnf_tx_ring_destroy(struct xnf_softc *);
213 int	xnf_capabilities(struct xnf_softc *sc);
214 int	xnf_init_backend(struct xnf_softc *);
215 
216 struct cfdriver xnf_cd = {
217 	NULL, "xnf", DV_IFNET
218 };
219 
220 const struct cfattach xnf_ca = {
221 	sizeof(struct xnf_softc), xnf_match, xnf_attach
222 };
223 
224 int
225 xnf_match(struct device *parent, void *match, void *aux)
226 {
227 	struct xen_attach_args *xa = aux;
228 
229 	if (strcmp("vif", xa->xa_name))
230 		return (0);
231 
232 	return (1);
233 }
234 
235 void
236 xnf_attach(struct device *parent, struct device *self, void *aux)
237 {
238 	struct xen_attach_args *xa = aux;
239 	struct xnf_softc *sc = (struct xnf_softc *)self;
240 	struct ifnet *ifp = &sc->sc_ac.ac_if;
241 
242 	sc->sc_xa = *xa;
243 	sc->sc_xen = xa->xa_parent;
244 	sc->sc_dmat = xa->xa_dmat;
245 	sc->sc_domid = xa->xa_domid;
246 
247 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
248 
249 	if (xnf_lladdr(sc)) {
250 		printf(": failed to obtain MAC address\n");
251 		return;
252 	}
253 
254 	if (xen_intr_establish(0, &sc->sc_xih, sc->sc_domid, xnf_intr, sc,
255 	    ifp->if_xname)) {
256 		printf(": failed to establish an interrupt\n");
257 		return;
258 	}
259 	xen_intr_mask(sc->sc_xih);
260 
261 	printf(": backend %d, event channel %u, address %s\n", sc->sc_domid,
262 	    sc->sc_xih, ether_sprintf(sc->sc_ac.ac_enaddr));
263 
264 	if (xnf_capabilities(sc)) {
265 		xen_intr_disestablish(sc->sc_xih);
266 		return;
267 	}
268 
269 	if (sc->sc_caps & XNF_CAP_SG)
270 		ifp->if_hardmtu = 9000;
271 
272 	if (xnf_rx_ring_create(sc)) {
273 		xen_intr_disestablish(sc->sc_xih);
274 		return;
275 	}
276 	if (xnf_tx_ring_create(sc)) {
277 		xen_intr_disestablish(sc->sc_xih);
278 		xnf_rx_ring_destroy(sc);
279 		return;
280 	}
281 	if (xnf_init_backend(sc)) {
282 		xen_intr_disestablish(sc->sc_xih);
283 		xnf_rx_ring_destroy(sc);
284 		xnf_tx_ring_destroy(sc);
285 		return;
286 	}
287 
288 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
289 	ifp->if_xflags = IFXF_MPSAFE;
290 	ifp->if_ioctl = xnf_ioctl;
291 	ifp->if_start = xnf_start;
292 	ifp->if_watchdog = xnf_watchdog;
293 	ifp->if_softc = sc;
294 
295 	ifp->if_capabilities = IFCAP_VLAN_MTU;
296 	if (sc->sc_caps & XNF_CAP_CSUM4)
297 		ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
298 	if (sc->sc_caps & XNF_CAP_CSUM6)
299 		ifp->if_capabilities |= IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6;
300 
301 	IFQ_SET_MAXLEN(&ifp->if_snd, XNF_TX_DESC - 1);
302 
303 	ifmedia_init(&sc->sc_media, IFM_IMASK, xnf_media_change,
304 	    xnf_media_status);
305 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
306 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_MANUAL);
307 
308 	if_attach(ifp);
309 	ether_ifattach(ifp);
310 
311 	/* Kick out emulated em's and re's */
312 	sc->sc_xen->sc_flags |= XSF_UNPLUG_NIC;
313 }
314 
315 static int
316 nibble(int ch)
317 {
318 	if (ch >= '0' && ch <= '9')
319 		return (ch - '0');
320 	if (ch >= 'A' && ch <= 'F')
321 		return (10 + ch - 'A');
322 	if (ch >= 'a' && ch <= 'f')
323 		return (10 + ch - 'a');
324 	return (-1);
325 }
326 
327 int
328 xnf_lladdr(struct xnf_softc *sc)
329 {
330 	char enaddr[ETHER_ADDR_LEN];
331 	char mac[32];
332 	int i, j, lo, hi;
333 
334 	if (xs_getprop(sc->sc_xa.xa_parent, sc->sc_xa.xa_backend, "mac",
335 	    mac, sizeof(mac)))
336 		return (-1);
337 
338 	for (i = 0, j = 0; j < ETHER_ADDR_LEN; i += 3) {
339 		if ((hi = nibble(mac[i])) == -1 ||
340 		    (lo = nibble(mac[i+1])) == -1)
341 			return (-1);
342 		enaddr[j++] = hi << 4 | lo;
343 	}
344 
345 	memcpy(sc->sc_ac.ac_enaddr, enaddr, ETHER_ADDR_LEN);
346 	return (0);
347 }
348 
349 int
350 xnf_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
351 {
352 	struct xnf_softc *sc = ifp->if_softc;
353 	struct ifreq *ifr = (struct ifreq *)data;
354 	int s, error = 0;
355 
356 	s = splnet();
357 
358 	switch (command) {
359 	case SIOCSIFADDR:
360 		ifp->if_flags |= IFF_UP;
361 		if (!(ifp->if_flags & IFF_RUNNING))
362 			xnf_init(sc);
363 		break;
364 	case SIOCSIFFLAGS:
365 		if (ifp->if_flags & IFF_UP) {
366 			if (ifp->if_flags & IFF_RUNNING)
367 				error = ENETRESET;
368 			else
369 				xnf_init(sc);
370 		} else {
371 			if (ifp->if_flags & IFF_RUNNING)
372 				xnf_stop(sc);
373 		}
374 		break;
375 	case SIOCGIFMEDIA:
376 	case SIOCSIFMEDIA:
377 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command);
378 		break;
379 	default:
380 		error = ether_ioctl(ifp, &sc->sc_ac, command, data);
381 		break;
382 	}
383 
384 	if (error == ENETRESET) {
385 		if (ifp->if_flags & IFF_RUNNING)
386 			xnf_iff(sc);
387 		error = 0;
388 	}
389 
390 	splx(s);
391 
392 	return (error);
393 }
394 
395 int
396 xnf_media_change(struct ifnet *ifp)
397 {
398 	return (0);
399 }
400 
401 void
402 xnf_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
403 {
404 	ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID;
405 	ifmr->ifm_active = IFM_ETHER | IFM_MANUAL;
406 }
407 
408 int
409 xnf_iff(struct xnf_softc *sc)
410 {
411 	return (0);
412 }
413 
414 void
415 xnf_init(struct xnf_softc *sc)
416 {
417 	struct ifnet *ifp = &sc->sc_ac.ac_if;
418 
419 	xnf_stop(sc);
420 
421 	xnf_iff(sc);
422 
423 	if (xen_intr_unmask(sc->sc_xih)) {
424 		printf("%s: failed to enable interrupts\n", ifp->if_xname);
425 		xnf_stop(sc);
426 		return;
427 	}
428 
429 	ifp->if_flags |= IFF_RUNNING;
430 	ifq_clr_oactive(&ifp->if_snd);
431 }
432 
433 void
434 xnf_stop(struct xnf_softc *sc)
435 {
436 	struct ifnet *ifp = &sc->sc_ac.ac_if;
437 
438 	ifp->if_flags &= ~IFF_RUNNING;
439 
440 	xen_intr_mask(sc->sc_xih);
441 
442 	ifp->if_timer = 0;
443 
444 	ifq_barrier(&ifp->if_snd);
445 	intr_barrier(&sc->sc_xih);
446 
447 	ifq_clr_oactive(&ifp->if_snd);
448 
449 	if (sc->sc_tx_ring)
450 		xnf_tx_ring_drain(sc);
451 	if (sc->sc_rx_ring)
452 		xnf_rx_ring_drain(sc);
453 }
454 
455 void
456 xnf_start(struct ifnet *ifp)
457 {
458 	struct xnf_softc *sc = ifp->if_softc;
459 	struct xnf_tx_ring *txr = sc->sc_tx_ring;
460 	struct mbuf *m;
461 	int pkts = 0;
462 	uint32_t prod, oprod;
463 
464 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
465 		return;
466 
467 	bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
468 	    BUS_DMASYNC_POSTREAD);
469 
470 	prod = oprod = txr->txr_prod;
471 
472 	for (;;) {
473 		if ((XNF_TX_DESC - (prod - sc->sc_tx_cons)) <
474 		    sc->sc_tx_frags) {
475 			/* transient */
476 			ifq_set_oactive(&ifp->if_snd);
477 			break;
478 		}
479 		m = ifq_dequeue(&ifp->if_snd);
480 		if (m == NULL)
481 			break;
482 
483 		if (xnf_encap(sc, m, &prod)) {
484 			/* the chain is too large */
485 			ifp->if_oerrors++;
486 			m_freem(m);
487 			continue;
488 		}
489 		ifp->if_opackets++;
490 
491 #if NBPFILTER > 0
492 		if (ifp->if_bpf)
493 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
494 #endif
495 		pkts++;
496 	}
497 	if (pkts > 0) {
498 		txr->txr_prod = prod;
499 		if (txr->txr_cons_event < txr->txr_cons)
500 			txr->txr_cons_event = txr->txr_cons +
501 			    ((txr->txr_prod - txr->txr_cons) >> 1) + 1;
502 		bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
503 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
504 		if (prod - txr->txr_prod_event < prod - oprod)
505 			xen_intr_signal(sc->sc_xih);
506 		ifp->if_timer = 5;
507 	}
508 }
509 
510 static inline int
511 xnf_fragcount(struct mbuf *m_head)
512 {
513 	struct mbuf *m;
514 	vaddr_t va, va0;
515 	int n = 0;
516 
517 	for (m = m_head; m != NULL; m = m->m_next) {
518 		if (m->m_len == 0)
519 			continue;
520 		     /* start of the buffer */
521 		for (va0 = va = mtod(m, vaddr_t);
522 		     /* does the buffer end on this page? */
523 		     va + (PAGE_SIZE - (va & PAGE_MASK)) < va0 + m->m_len;
524 		     /* move on to the next page */
525 		     va += PAGE_SIZE - (va & PAGE_MASK))
526 			n++;
527 		n++;
528 	}
529 	return (n);
530 }
531 
532 int
533 xnf_encap(struct xnf_softc *sc, struct mbuf *m_head, uint32_t *prod)
534 {
535 	struct ifnet *ifp = &sc->sc_ac.ac_if;
536 	struct xnf_tx_ring *txr = sc->sc_tx_ring;
537 	union xnf_tx_desc *txd;
538 	struct mbuf *m;
539 	bus_dmamap_t dmap;
540 	uint32_t oprod = *prod;
541 	int i, id, flags, n;
542 
543 	if ((xnf_fragcount(m_head) > sc->sc_tx_frags) &&
544 	    m_defrag(m_head, M_DONTWAIT))
545 		goto errout;
546 
547 	for (m = m_head; m != NULL && m->m_len > 0; m = m->m_next) {
548 		i = *prod & (XNF_TX_DESC - 1);
549 		dmap = sc->sc_tx_dmap[i];
550 		txd = &txr->txr_desc[i];
551 		if (sc->sc_tx_buf[i])
552 			panic("%s: cons %u(%u) prod %u next %u seg %d/%d\n",
553 			    ifp->if_xname, txr->txr_cons, sc->sc_tx_cons,
554 			    txr->txr_prod, *prod, *prod - oprod,
555 			    xnf_fragcount(m_head));
556 
557 		flags = (sc->sc_domid << 16) | BUS_DMA_WRITE | BUS_DMA_WAITOK;
558 		if (bus_dmamap_load(sc->sc_dmat, dmap, m->m_data, m->m_len,
559 		    NULL, flags)) {
560 			DPRINTF("%s: failed to load %d bytes @%lu\n",
561 			    sc->sc_dev.dv_xname, m->m_len,
562 			    mtod(m, vaddr_t) & PAGE_MASK);
563 			goto unroll;
564 		}
565 
566 		if (m == m_head) {
567 			if (m->m_pkthdr.csum_flags &
568 			    (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT))
569 				txd->txd_req.txq_flags = XNF_TXF_CSUM_BLANK |
570 				    XNF_TXF_CSUM_VALID;
571 			txd->txd_req.txq_size = m->m_pkthdr.len;
572 		}
573 		for (n = 0; n < dmap->dm_nsegs; n++) {
574 			i = *prod & (XNF_TX_DESC - 1);
575 			txd = &txr->txr_desc[i];
576 			if (sc->sc_tx_buf[i])
577 				panic("%s: cons %u(%u) prod %u next %u "
578 				    "seg %d/%d\n", ifp->if_xname,
579 				    txr->txr_cons, sc->sc_tx_cons,
580 				    txr->txr_prod, *prod, *prod - oprod,
581 				    xnf_fragcount(m_head));
582 
583 			/* Don't overwrite lenght of the very first one */
584 			if (!(m == m_head && n == 0))
585 				txd->txd_req.txq_size = dmap->dm_segs[n].ds_len;
586 			/* The chunk flag will be removed from the last one */
587 			txd->txd_req.txq_flags |= XNF_TXF_CHUNK;
588 			txd->txd_req.txq_ref = dmap->dm_segs[n].ds_addr;
589 			if (n == 0)
590 				txd->txd_req.txq_offset =
591 				    mtod(m, vaddr_t) & PAGE_MASK;
592 			(*prod)++;
593 		}
594 	}
595 	/* Clear the chunk flag from the last segment */
596 	txd->txd_req.txq_flags &= ~XNF_TXF_CHUNK;
597 	sc->sc_tx_buf[i] = m_head;
598 	bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
599 	    BUS_DMASYNC_PREWRITE);
600 
601 	return (0);
602 
603  unroll:
604 	for (; *prod != oprod; (*prod)--) {
605 		i = (*prod - 1) & (XNF_TX_DESC - 1);
606 		dmap = sc->sc_tx_dmap[i];
607 		txd = &txr->txr_desc[i];
608 
609 		id = txd->txd_rsp.txp_id;
610 		memset(txd, 0, sizeof(*txd));
611 		txd->txd_req.txq_id = id;
612 
613 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, 0,
614 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
615 		bus_dmamap_unload(sc->sc_dmat, dmap);
616 
617 		if (sc->sc_tx_buf[i])
618 			sc->sc_tx_buf[i] = NULL;
619 	}
620 
621  errout:
622 	return (ENOBUFS);
623 }
624 
625 void
626 xnf_intr(void *arg)
627 {
628 	struct xnf_softc *sc = arg;
629 	struct ifnet *ifp = &sc->sc_ac.ac_if;
630 
631 	if (ifp->if_flags & IFF_RUNNING) {
632 		xnf_txeof(sc);
633 		xnf_rxeof(sc);
634 	}
635 }
636 
637 void
638 xnf_watchdog(struct ifnet *ifp)
639 {
640 	struct xnf_softc *sc = ifp->if_softc;
641 	struct xnf_tx_ring *txr = sc->sc_tx_ring;
642 
643 	printf("%s: tx stuck: prod %u cons %u,%u evt %u,%u\n",
644 	    ifp->if_xname, txr->txr_prod, txr->txr_cons, sc->sc_tx_cons,
645 	    txr->txr_prod_event, txr->txr_cons_event);
646 }
647 
648 void
649 xnf_txeof(struct xnf_softc *sc)
650 {
651 	struct ifnet *ifp = &sc->sc_ac.ac_if;
652 	struct xnf_tx_ring *txr = sc->sc_tx_ring;
653 	union xnf_tx_desc *txd;
654 	bus_dmamap_t dmap;
655 	uint32_t cons;
656 	int i, id;
657 
658 	bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
659 	    BUS_DMASYNC_POSTWRITE);
660 
661 	for (cons = sc->sc_tx_cons; cons != txr->txr_cons; cons++) {
662 		i = cons & (XNF_TX_DESC - 1);
663 		txd = &txr->txr_desc[i];
664 		dmap = sc->sc_tx_dmap[i];
665 
666 		id = txd->txd_rsp.txp_id;
667 		memset(txd, 0, sizeof(*txd));
668 		txd->txd_req.txq_id = id;
669 
670 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, 0,
671 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
672 		bus_dmamap_unload(sc->sc_dmat, dmap);
673 
674 		if (sc->sc_tx_buf[i] != NULL) {
675 			m_freem(sc->sc_tx_buf[i]);
676 			sc->sc_tx_buf[i] = NULL;
677 		}
678 	}
679 
680 	sc->sc_tx_cons = cons;
681 	txr->txr_cons_event = sc->sc_tx_cons +
682 	    ((txr->txr_prod - sc->sc_tx_cons) >> 1) + 1;
683 	bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
684 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
685 
686 	if (txr->txr_cons == txr->txr_prod)
687 		ifp->if_timer = 0;
688 	if (ifq_is_oactive(&ifp->if_snd))
689 		ifq_restart(&ifp->if_snd);
690 }
691 
692 void
693 xnf_rxeof(struct xnf_softc *sc)
694 {
695 	struct ifnet *ifp = &sc->sc_ac.ac_if;
696 	struct xnf_rx_ring *rxr = sc->sc_rx_ring;
697 	union xnf_rx_desc *rxd;
698 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
699 	struct mbuf *fmp = sc->sc_rx_cbuf[0];
700 	struct mbuf *lmp = sc->sc_rx_cbuf[1];
701 	struct mbuf *m;
702 	bus_dmamap_t dmap;
703 	uint32_t cons;
704 	int i, id, flags, len, offset;
705 
706 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_rmap, 0, 0,
707 	    BUS_DMASYNC_POSTREAD);
708 
709 	for (cons = sc->sc_rx_cons; cons != rxr->rxr_cons; cons++) {
710 		i = cons & (XNF_RX_DESC - 1);
711 		rxd = &rxr->rxr_desc[i];
712 		dmap = sc->sc_rx_dmap[i];
713 
714 		len = rxd->rxd_rsp.rxp_status;
715 		flags = rxd->rxd_rsp.rxp_flags;
716 		offset = rxd->rxd_rsp.rxp_offset;
717 		id = rxd->rxd_rsp.rxp_id;
718 		memset(rxd, 0, sizeof(*rxd));
719 		rxd->rxd_req.rxq_id = id;
720 
721 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, 0,
722 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
723 		bus_dmamap_unload(sc->sc_dmat, dmap);
724 
725 		m = sc->sc_rx_buf[i];
726 		KASSERT(m != NULL);
727 		sc->sc_rx_buf[i] = NULL;
728 
729 		if (flags & XNF_RXF_MGMT) {
730 			printf("%s: management data present\n",
731 			    ifp->if_xname);
732 			m_freem(m);
733 			continue;
734 		}
735 
736 		if (flags & XNF_RXF_CSUM_VALID)
737 			m->m_pkthdr.csum_flags = M_TCP_CSUM_IN_OK |
738 			    M_UDP_CSUM_IN_OK;
739 
740 		if (len < 0 || (len + offset > PAGE_SIZE)) {
741 			ifp->if_ierrors++;
742 			m_freem(m);
743 			continue;
744 		}
745 
746 		m->m_len = len;
747 		m->m_data += offset;
748 
749 		if (fmp == NULL) {
750 			m->m_pkthdr.len = len;
751 			fmp = m;
752 		} else {
753 			m->m_flags &= ~M_PKTHDR;
754 			lmp->m_next = m;
755 			fmp->m_pkthdr.len += m->m_len;
756 		}
757 		lmp = m;
758 
759 		if (flags & XNF_RXF_CHUNK) {
760 			sc->sc_rx_cbuf[0] = fmp;
761 			sc->sc_rx_cbuf[1] = lmp;
762 			continue;
763 		}
764 
765 		m = fmp;
766 
767 		ml_enqueue(&ml, m);
768 		sc->sc_rx_cbuf[0] = sc->sc_rx_cbuf[1] = fmp = lmp = NULL;
769 	}
770 
771 	sc->sc_rx_cons = cons;
772 	rxr->rxr_cons_event = sc->sc_rx_cons + 1;
773 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_rmap, 0, 0,
774 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
775 
776 	if (!ml_empty(&ml))
777 		if_input(ifp, &ml);
778 
779 	if (xnf_rx_ring_fill(sc) || (sc->sc_rx_cons != rxr->rxr_cons))
780 		xen_intr_schedule(sc->sc_xih);
781 }
782 
783 int
784 xnf_rx_ring_fill(struct xnf_softc *sc)
785 {
786 	struct ifnet *ifp = &sc->sc_ac.ac_if;
787 	struct xnf_rx_ring *rxr = sc->sc_rx_ring;
788 	bus_dmamap_t dmap;
789 	struct mbuf *m;
790 	uint32_t cons, prod, oprod;
791 	int i, flags, resched = 0;
792 
793 	cons = rxr->rxr_cons;
794 	prod = oprod = rxr->rxr_prod;
795 
796 	while (prod - cons < XNF_RX_DESC) {
797 		i = prod & (XNF_RX_DESC - 1);
798 		if (sc->sc_rx_buf[i])
799 			break;
800 		m = MCLGETI(NULL, M_DONTWAIT, NULL, XNF_MCLEN);
801 		if (m == NULL)
802 			break;
803 		m->m_len = m->m_pkthdr.len = XNF_MCLEN;
804 		dmap = sc->sc_rx_dmap[i];
805 		flags = (sc->sc_domid << 16) | BUS_DMA_READ | BUS_DMA_NOWAIT;
806 		if (bus_dmamap_load_mbuf(sc->sc_dmat, dmap, m, flags)) {
807 			m_freem(m);
808 			break;
809 		}
810 		sc->sc_rx_buf[i] = m;
811 		rxr->rxr_desc[i].rxd_req.rxq_ref = dmap->dm_segs[0].ds_addr;
812 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, 0, BUS_DMASYNC_PREWRITE);
813 		prod++;
814 	}
815 
816 	rxr->rxr_prod = prod;
817 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_rmap, 0, 0,
818 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
819 
820 	if ((prod - cons < XNF_RX_MIN) && (ifp->if_flags & IFF_RUNNING))
821 		resched = 1;
822 	if (prod - rxr->rxr_prod_event < prod - oprod)
823 		xen_intr_signal(sc->sc_xih);
824 
825 	return (resched);
826 }
827 
828 int
829 xnf_rx_ring_create(struct xnf_softc *sc)
830 {
831 	int i, flags, rsegs;
832 
833 	/* Allocate a page of memory for the ring */
834 	if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
835 	    &sc->sc_rx_seg, 1, &rsegs, BUS_DMA_ZERO | BUS_DMA_WAITOK)) {
836 		printf("%s: failed to allocate memory for the rx ring\n",
837 		    sc->sc_dev.dv_xname);
838 		return (-1);
839 	}
840 	/* Map in the allocated memory into the ring structure */
841 	if (bus_dmamem_map(sc->sc_dmat, &sc->sc_rx_seg, 1, PAGE_SIZE,
842 	    (caddr_t *)(&sc->sc_rx_ring), BUS_DMA_WAITOK)) {
843 		printf("%s: failed to map memory for the rx ring\n",
844 		    sc->sc_dev.dv_xname);
845 		goto errout;
846 	}
847 	/* Create a map to load the ring memory into */
848 	if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
849 	    BUS_DMA_WAITOK, &sc->sc_rx_rmap)) {
850 		printf("%s: failed to create a memory map for the rx ring\n",
851 		    sc->sc_dev.dv_xname);
852 		goto errout;
853 	}
854 	/* Load the ring into the ring map to extract the PA */
855 	flags = (sc->sc_domid << 16) | BUS_DMA_WAITOK;
856 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_rmap, sc->sc_rx_ring,
857 	    PAGE_SIZE, NULL, flags)) {
858 		printf("%s: failed to load the rx ring map\n",
859 		    sc->sc_dev.dv_xname);
860 		goto errout;
861 	}
862 	sc->sc_rx_ref = sc->sc_rx_rmap->dm_segs[0].ds_addr;
863 
864 	sc->sc_rx_ring->rxr_prod_event = sc->sc_rx_ring->rxr_cons_event = 1;
865 
866 	for (i = 0; i < XNF_RX_DESC; i++) {
867 		if (bus_dmamap_create(sc->sc_dmat, XNF_MCLEN, 1, XNF_MCLEN,
868 		    PAGE_SIZE, BUS_DMA_WAITOK, &sc->sc_rx_dmap[i])) {
869 			printf("%s: failed to create a memory map for the"
870 			    " rx slot %d\n", sc->sc_dev.dv_xname, i);
871 			goto errout;
872 		}
873 		sc->sc_rx_ring->rxr_desc[i].rxd_req.rxq_id = i;
874 	}
875 
876 	xnf_rx_ring_fill(sc);
877 
878 	return (0);
879 
880  errout:
881 	xnf_rx_ring_destroy(sc);
882 	return (-1);
883 }
884 
885 void
886 xnf_rx_ring_drain(struct xnf_softc *sc)
887 {
888 	struct xnf_rx_ring *rxr = sc->sc_rx_ring;
889 
890 	if (sc->sc_rx_cons != rxr->rxr_cons)
891 		xnf_rxeof(sc);
892 }
893 
894 void
895 xnf_rx_ring_destroy(struct xnf_softc *sc)
896 {
897 	int i, slots = 0;
898 
899 	for (i = 0; i < XNF_RX_DESC; i++) {
900 		if (sc->sc_rx_buf[i] == NULL)
901 			continue;
902 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_dmap[i], 0, 0,
903 		    BUS_DMASYNC_POSTREAD);
904 		bus_dmamap_unload(sc->sc_dmat, sc->sc_rx_dmap[i]);
905 		m_freem(sc->sc_rx_buf[i]);
906 		sc->sc_rx_buf[i] = NULL;
907 		slots++;
908 	}
909 
910 	for (i = 0; i < XNF_RX_DESC; i++) {
911 		if (sc->sc_rx_dmap[i] == NULL)
912 			continue;
913 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmap[i]);
914 		sc->sc_rx_dmap[i] = NULL;
915 	}
916 	if (sc->sc_rx_rmap) {
917 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_rmap, 0, 0,
918 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
919 		bus_dmamap_unload(sc->sc_dmat, sc->sc_rx_rmap);
920 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_rmap);
921 	}
922 	if (sc->sc_rx_ring) {
923 		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_rx_ring,
924 		    PAGE_SIZE);
925 		bus_dmamem_free(sc->sc_dmat, &sc->sc_rx_seg, 1);
926 	}
927 	sc->sc_rx_ring = NULL;
928 	sc->sc_rx_rmap = NULL;
929 	sc->sc_rx_cons = 0;
930 }
931 
932 int
933 xnf_tx_ring_create(struct xnf_softc *sc)
934 {
935 	struct ifnet *ifp = &sc->sc_ac.ac_if;
936 	int i, flags, nsegs, rsegs;
937 	bus_size_t segsz;
938 
939 	sc->sc_tx_frags = sc->sc_caps & XNF_CAP_SG ? XNF_TX_FRAG : 1;
940 
941 	/* Allocate a page of memory for the ring */
942 	if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
943 	    &sc->sc_tx_seg, 1, &rsegs, BUS_DMA_ZERO | BUS_DMA_WAITOK)) {
944 		printf("%s: failed to allocate memory for the tx ring\n",
945 		    sc->sc_dev.dv_xname);
946 		return (-1);
947 	}
948 	/* Map in the allocated memory into the ring structure */
949 	if (bus_dmamem_map(sc->sc_dmat, &sc->sc_tx_seg, 1, PAGE_SIZE,
950 	    (caddr_t *)&sc->sc_tx_ring, BUS_DMA_WAITOK)) {
951 		printf("%s: failed to map memory for the tx ring\n",
952 		    sc->sc_dev.dv_xname);
953 		goto errout;
954 	}
955 	/* Create a map to load the ring memory into */
956 	if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
957 	    BUS_DMA_WAITOK, &sc->sc_tx_rmap)) {
958 		printf("%s: failed to create a memory map for the tx ring\n",
959 		    sc->sc_dev.dv_xname);
960 		goto errout;
961 	}
962 	/* Load the ring into the ring map to extract the PA */
963 	flags = (sc->sc_domid << 16) | BUS_DMA_WAITOK;
964 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_tx_rmap, sc->sc_tx_ring,
965 	    PAGE_SIZE, NULL, flags)) {
966 		printf("%s: failed to load the tx ring map\n",
967 		    sc->sc_dev.dv_xname);
968 		goto errout;
969 	}
970 	sc->sc_tx_ref = sc->sc_tx_rmap->dm_segs[0].ds_addr;
971 
972 	sc->sc_tx_ring->txr_prod_event = sc->sc_tx_ring->txr_cons_event = 1;
973 
974 	if (sc->sc_caps & XNF_CAP_SG) {
975 		nsegs = roundup(ifp->if_hardmtu, XNF_MCLEN) / XNF_MCLEN + 1;
976 		segsz = nsegs * XNF_MCLEN;
977 	} else {
978 		nsegs = 1;
979 		segsz = XNF_MCLEN;
980 	}
981 	for (i = 0; i < XNF_TX_DESC; i++) {
982 		if (bus_dmamap_create(sc->sc_dmat, segsz, nsegs, XNF_MCLEN,
983 		    PAGE_SIZE, BUS_DMA_WAITOK, &sc->sc_tx_dmap[i])) {
984 			printf("%s: failed to create a memory map for the"
985 			    " tx slot %d\n", sc->sc_dev.dv_xname, i);
986 			goto errout;
987 		}
988 		sc->sc_tx_ring->txr_desc[i].txd_req.txq_id = i;
989 	}
990 
991 	return (0);
992 
993  errout:
994 	xnf_tx_ring_destroy(sc);
995 	return (-1);
996 }
997 
998 void
999 xnf_tx_ring_drain(struct xnf_softc *sc)
1000 {
1001 	struct xnf_tx_ring *txr = sc->sc_tx_ring;
1002 
1003 	if (sc->sc_tx_cons != txr->txr_cons)
1004 		xnf_txeof(sc);
1005 }
1006 
1007 void
1008 xnf_tx_ring_destroy(struct xnf_softc *sc)
1009 {
1010 	int i;
1011 
1012 	for (i = 0; i < XNF_TX_DESC; i++) {
1013 		if (sc->sc_tx_dmap[i] == NULL)
1014 			continue;
1015 		bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_dmap[i], 0, 0,
1016 		    BUS_DMASYNC_POSTWRITE);
1017 		bus_dmamap_unload(sc->sc_dmat, sc->sc_tx_dmap[i]);
1018 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmap[i]);
1019 		sc->sc_tx_dmap[i] = NULL;
1020 		if (sc->sc_tx_buf[i] == NULL)
1021 			continue;
1022 		m_free(sc->sc_tx_buf[i]);
1023 		sc->sc_tx_buf[i] = NULL;
1024 	}
1025 	if (sc->sc_tx_rmap) {
1026 		bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
1027 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1028 		bus_dmamap_unload(sc->sc_dmat, sc->sc_tx_rmap);
1029 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_rmap);
1030 	}
1031 	if (sc->sc_tx_ring) {
1032 		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_tx_ring,
1033 		    PAGE_SIZE);
1034 		bus_dmamem_free(sc->sc_dmat, &sc->sc_tx_seg, 1);
1035 	}
1036 	sc->sc_tx_ring = NULL;
1037 	sc->sc_tx_rmap = NULL;
1038 }
1039 
1040 int
1041 xnf_capabilities(struct xnf_softc *sc)
1042 {
1043 	const char *prop;
1044 	char val[32];
1045 	int error;
1046 
1047 	/* Query scatter-gather capability */
1048 	prop = "feature-sg";
1049 	if ((error = xs_getprop(sc->sc_xa.xa_parent, sc->sc_xa.xa_backend,
1050 	    prop, val, sizeof(val))) == 0) {
1051 		if (val[0] == '1')
1052 			sc->sc_caps |= XNF_CAP_SG;
1053 	} else if (error != ENOENT)
1054 		goto errout;
1055 
1056 	/* Query IPv4 checksum offloading capability, enabled by default */
1057 	sc->sc_caps |= XNF_CAP_CSUM4;
1058 	prop = "feature-no-csum-offload";
1059 	if ((error = xs_getprop(sc->sc_xa.xa_parent, sc->sc_xa.xa_backend,
1060 	    prop, val, sizeof(val))) == 0) {
1061 		if (val[0] == '1')
1062 			sc->sc_caps &= ~XNF_CAP_CSUM4;
1063 	} else if (error != ENOENT)
1064 		goto errout;
1065 
1066 	/* Query IPv6 checksum offloading capability */
1067 	prop = "feature-ipv6-csum-offload";
1068 	if ((error = xs_getprop(sc->sc_xa.xa_parent, sc->sc_xa.xa_backend,
1069 	    prop, val, sizeof(val))) == 0) {
1070 		if (val[0] == '1')
1071 			sc->sc_caps |= XNF_CAP_CSUM6;
1072 	} else if (error != ENOENT)
1073 		goto errout;
1074 
1075 	/* Query multicast traffic contol capability */
1076 	prop = "feature-multicast-control";
1077 	if ((error = xs_getprop(sc->sc_xa.xa_parent, sc->sc_xa.xa_backend,
1078 	    prop, val, sizeof(val))) == 0) {
1079 		if (val[0] == '1')
1080 			sc->sc_caps |= XNF_CAP_MCAST;
1081 	} else if (error != ENOENT)
1082 		goto errout;
1083 
1084 	/* Query split Rx/Tx event channel capability */
1085 	prop = "feature-split-event-channels";
1086 	if ((error = xs_getprop(sc->sc_xa.xa_parent, sc->sc_xa.xa_backend,
1087 	    prop, val, sizeof(val))) == 0) {
1088 		if (val[0] == '1')
1089 			sc->sc_caps |= XNF_CAP_SPLIT;
1090 	} else if (error != ENOENT)
1091 		goto errout;
1092 
1093 	/* Query multiqueue capability */
1094 	prop = "multi-queue-max-queues";
1095 	if ((error = xs_getprop(sc->sc_xa.xa_parent, sc->sc_xa.xa_backend,
1096 	    prop, val, sizeof(val))) == 0)
1097 		sc->sc_caps |= XNF_CAP_MULTIQ;
1098 	else if (error != ENOENT)
1099 		goto errout;
1100 
1101 	DPRINTF("%s: capabilities %b\n", sc->sc_dev.dv_xname, sc->sc_caps,
1102 	    "\20\006MULTIQ\005SPLIT\004MCAST\003CSUM6\002CSUM4\001SG");
1103 	return (0);
1104 
1105  errout:
1106 	printf("%s: failed to read \"%s\" property\n", sc->sc_dev.dv_xname,
1107 	    prop);
1108 	return (-1);
1109 }
1110 
1111 int
1112 xnf_init_backend(struct xnf_softc *sc)
1113 {
1114 	const char *prop;
1115 	char val[32];
1116 
1117 	/* Plumb the Rx ring */
1118 	prop = "rx-ring-ref";
1119 	snprintf(val, sizeof(val), "%u", sc->sc_rx_ref);
1120 	if (xs_setprop(sc->sc_xa.xa_parent, sc->sc_xa.xa_node, prop, val,
1121 	    strlen(val)))
1122 		goto errout;
1123 	/* Enable "copy" mode */
1124 	prop = "request-rx-copy";
1125 	snprintf(val, sizeof(val), "%u", 1);
1126 	if (xs_setprop(sc->sc_xa.xa_parent, sc->sc_xa.xa_node, prop, val,
1127 	    strlen(val)))
1128 		goto errout;
1129 	/* Enable notify mode */
1130 	prop = "feature-rx-notify";
1131 	snprintf(val, sizeof(val), "%u", 1);
1132 	if (xs_setprop(sc->sc_xa.xa_parent, sc->sc_xa.xa_node, prop, val,
1133 	    strlen(val)))
1134 		goto errout;
1135 
1136 	/* Plumb the Tx ring */
1137 	prop = "tx-ring-ref";
1138 	snprintf(val, sizeof(val), "%u", sc->sc_tx_ref);
1139 	if (xs_setprop(sc->sc_xa.xa_parent, sc->sc_xa.xa_node, prop, val,
1140 	    strlen(val)))
1141 		goto errout;
1142 	/* Enable scatter-gather mode */
1143 	if (sc->sc_tx_frags > 1) {
1144 		prop = "feature-sg";
1145 		snprintf(val, sizeof(val), "%u", 1);
1146 		if (xs_setprop(sc->sc_xa.xa_parent, sc->sc_xa.xa_node, prop,
1147 		    val, strlen(val)))
1148 			goto errout;
1149 	}
1150 
1151 	/* Enable IPv6 checksum offloading */
1152 	if (sc->sc_caps & XNF_CAP_CSUM6) {
1153 		prop = "feature-ipv6-csum-offload";
1154 		snprintf(val, sizeof(val), "%u", 1);
1155 		if (xs_setprop(sc->sc_xa.xa_parent, sc->sc_xa.xa_node, prop,
1156 		    val, strlen(val)))
1157 			goto errout;
1158 	}
1159 
1160 	/* Plumb the event channel port */
1161 	prop = "event-channel";
1162 	snprintf(val, sizeof(val), "%u", sc->sc_xih);
1163 	if (xs_setprop(sc->sc_xa.xa_parent, sc->sc_xa.xa_node, prop, val,
1164 	    strlen(val)))
1165 		goto errout;
1166 
1167 	/* Connect the device */
1168 	prop = "state";
1169 	snprintf(val, sizeof(val), "%u", 4);
1170 	if (xs_setprop(sc->sc_xa.xa_parent, sc->sc_xa.xa_node, prop, val,
1171 	    strlen(val)))
1172 		goto errout;
1173 
1174 	return (0);
1175 
1176  errout:
1177 	printf("%s: failed to set \"%s\" property to \"%s\"\n",
1178 	    sc->sc_dev.dv_xname, prop, val);
1179 	return (-1);
1180 }
1181