xref: /openbsd/sys/dev/pv/if_xnf.c (revision 6f40fd34)
1 /*	$OpenBSD: if_xnf.c,v 1.57 2017/06/12 12:35:07 mikeb Exp $	*/
2 
3 /*
4  * Copyright (c) 2015, 2016 Mike Belopuhov
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bpfilter.h"
20 #include "vlan.h"
21 #include "xen.h"
22 
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/atomic.h>
26 #include <sys/device.h>
27 #include <sys/kernel.h>
28 #include <sys/malloc.h>
29 #include <sys/mbuf.h>
30 #include <sys/pool.h>
31 #include <sys/queue.h>
32 #include <sys/socket.h>
33 #include <sys/sockio.h>
34 #include <sys/task.h>
35 #include <sys/timeout.h>
36 
37 #include <machine/bus.h>
38 
39 #include <dev/pv/xenreg.h>
40 #include <dev/pv/xenvar.h>
41 
42 #include <net/if.h>
43 #include <net/if_media.h>
44 
45 #include <netinet/in.h>
46 #include <netinet/if_ether.h>
47 
48 #ifdef INET6
49 #include <netinet/ip6.h>
50 #endif
51 
52 #if NBPFILTER > 0
53 #include <net/bpf.h>
54 #endif
55 
56 /* #define XNF_DEBUG */
57 
58 #ifdef XNF_DEBUG
59 #define DPRINTF(x...)		printf(x)
60 #else
61 #define DPRINTF(x...)
62 #endif
63 
64 /*
65  * Rx ring
66  */
67 
68 struct xnf_rx_req {
69 	uint16_t		 rxq_id;
70 	uint16_t		 rxq_pad;
71 	uint32_t		 rxq_ref;
72 } __packed;
73 
74 struct xnf_rx_rsp {
75 	uint16_t		 rxp_id;
76 	uint16_t		 rxp_offset;
77 	uint16_t		 rxp_flags;
78 #define  XNF_RXF_CSUM_VALID	  0x0001
79 #define  XNF_RXF_CSUM_BLANK	  0x0002
80 #define  XNF_RXF_CHUNK		  0x0004
81 #define  XNF_RXF_MGMT		  0x0008
82 	int16_t			 rxp_status;
83 } __packed;
84 
85 union xnf_rx_desc {
86 	struct xnf_rx_req	 rxd_req;
87 	struct xnf_rx_rsp	 rxd_rsp;
88 } __packed;
89 
90 #define XNF_RX_DESC		256
91 #define XNF_MCLEN		PAGE_SIZE
92 #define XNF_RX_MIN		32
93 
94 struct xnf_rx_ring {
95 	volatile uint32_t	 rxr_prod;
96 	volatile uint32_t	 rxr_prod_event;
97 	volatile uint32_t	 rxr_cons;
98 	volatile uint32_t	 rxr_cons_event;
99 	uint32_t		 rxr_reserved[12];
100 	union xnf_rx_desc	 rxr_desc[XNF_RX_DESC];
101 } __packed;
102 
103 
104 /*
105  * Tx ring
106  */
107 
108 struct xnf_tx_req {
109 	uint32_t		 txq_ref;
110 	uint16_t		 txq_offset;
111 	uint16_t		 txq_flags;
112 #define  XNF_TXF_CSUM_BLANK	  0x0001
113 #define  XNF_TXF_CSUM_VALID	  0x0002
114 #define  XNF_TXF_CHUNK		  0x0004
115 #define  XNF_TXF_ETXRA		  0x0008
116 	uint16_t		 txq_id;
117 	uint16_t		 txq_size;
118 } __packed;
119 
120 struct xnf_tx_rsp {
121 	uint16_t		 txp_id;
122 	int16_t			 txp_status;
123 } __packed;
124 
125 union xnf_tx_desc {
126 	struct xnf_tx_req	 txd_req;
127 	struct xnf_tx_rsp	 txd_rsp;
128 } __packed;
129 
130 #define XNF_TX_DESC		256
131 #define XNF_TX_FRAG		18
132 
133 struct xnf_tx_ring {
134 	volatile uint32_t	 txr_prod;
135 	volatile uint32_t	 txr_prod_event;
136 	volatile uint32_t	 txr_cons;
137 	volatile uint32_t	 txr_cons_event;
138 	uint32_t		 txr_reserved[12];
139 	union xnf_tx_desc	 txr_desc[XNF_TX_DESC];
140 } __packed;
141 
142 
143 /* Management frame, "extra info" in Xen parlance */
144 struct xnf_mgmt {
145 	uint8_t			 mg_type;
146 #define  XNF_MGMT_MCAST_ADD	2
147 #define  XNF_MGMT_MCAST_DEL	3
148 	uint8_t			 mg_flags;
149 	union {
150 		uint8_t		 mgu_mcaddr[ETHER_ADDR_LEN];
151 		uint16_t	 mgu_pad[3];
152 	} u;
153 #define mg_mcaddr		 u.mgu_mcaddr
154 } __packed;
155 
156 
157 struct xnf_softc {
158 	struct device		 sc_dev;
159 	struct device		*sc_parent;
160 	char			 sc_node[XEN_MAX_NODE_LEN];
161 	char			 sc_backend[XEN_MAX_BACKEND_LEN];
162 	bus_dma_tag_t		 sc_dmat;
163 	int			 sc_domid;
164 
165 	struct arpcom		 sc_ac;
166 	struct ifmedia		 sc_media;
167 
168 	xen_intr_handle_t	 sc_xih;
169 
170 	int			 sc_caps;
171 #define  XNF_CAP_SG		  0x0001
172 #define  XNF_CAP_CSUM4		  0x0002
173 #define  XNF_CAP_CSUM6		  0x0004
174 #define  XNF_CAP_MCAST		  0x0008
175 #define  XNF_CAP_SPLIT		  0x0010
176 #define  XNF_CAP_MULTIQ		  0x0020
177 
178 	/* Rx ring */
179 	struct xnf_rx_ring	*sc_rx_ring;
180 	uint32_t		 sc_rx_cons;
181 	bus_dmamap_t		 sc_rx_rmap;		  /* map for the ring */
182 	bus_dma_segment_t	 sc_rx_seg;
183 	uint32_t		 sc_rx_ref;		  /* grant table ref */
184 	struct mbuf		*sc_rx_buf[XNF_RX_DESC];
185 	bus_dmamap_t		 sc_rx_dmap[XNF_RX_DESC]; /* maps for packets */
186 	struct mbuf		*sc_rx_cbuf[2];	  	  /* chain handling */
187 
188 	/* Tx ring */
189 	struct xnf_tx_ring	*sc_tx_ring;
190 	uint32_t		 sc_tx_cons;
191 	bus_dmamap_t		 sc_tx_rmap;		  /* map for the ring */
192 	bus_dma_segment_t	 sc_tx_seg;
193 	uint32_t		 sc_tx_ref;		  /* grant table ref */
194 	int			 sc_tx_frags;
195 	struct mbuf		*sc_tx_buf[XNF_TX_DESC];
196 	bus_dmamap_t		 sc_tx_dmap[XNF_TX_DESC]; /* maps for packets */
197 };
198 
199 int	xnf_match(struct device *, void *, void *);
200 void	xnf_attach(struct device *, struct device *, void *);
201 int	xnf_detach(struct device *, int);
202 int	xnf_lladdr(struct xnf_softc *);
203 int	xnf_ioctl(struct ifnet *, u_long, caddr_t);
204 int	xnf_media_change(struct ifnet *);
205 void	xnf_media_status(struct ifnet *, struct ifmediareq *);
206 int	xnf_iff(struct xnf_softc *);
207 void	xnf_init(struct xnf_softc *);
208 void	xnf_stop(struct xnf_softc *);
209 void	xnf_start(struct ifqueue *);
210 int	xnf_encap(struct xnf_softc *, struct mbuf *, uint32_t *);
211 void	xnf_intr(void *);
212 void	xnf_watchdog(struct ifnet *);
213 void	xnf_txeof(struct xnf_softc *);
214 void	xnf_rxeof(struct xnf_softc *);
215 int	xnf_rx_ring_fill(struct xnf_softc *);
216 int	xnf_rx_ring_create(struct xnf_softc *);
217 void	xnf_rx_ring_drain(struct xnf_softc *);
218 void	xnf_rx_ring_destroy(struct xnf_softc *);
219 int	xnf_tx_ring_create(struct xnf_softc *);
220 void	xnf_tx_ring_drain(struct xnf_softc *);
221 void	xnf_tx_ring_destroy(struct xnf_softc *);
222 int	xnf_capabilities(struct xnf_softc *sc);
223 int	xnf_init_backend(struct xnf_softc *);
224 
225 struct cfdriver xnf_cd = {
226 	NULL, "xnf", DV_IFNET
227 };
228 
229 const struct cfattach xnf_ca = {
230 	sizeof(struct xnf_softc), xnf_match, xnf_attach, xnf_detach
231 };
232 
233 int
234 xnf_match(struct device *parent, void *match, void *aux)
235 {
236 	struct xen_attach_args *xa = aux;
237 
238 	if (strcmp("vif", xa->xa_name))
239 		return (0);
240 
241 	return (1);
242 }
243 
244 void
245 xnf_attach(struct device *parent, struct device *self, void *aux)
246 {
247 	struct xen_attach_args *xa = aux;
248 	struct xnf_softc *sc = (struct xnf_softc *)self;
249 	struct ifnet *ifp = &sc->sc_ac.ac_if;
250 
251 	sc->sc_parent = parent;
252 	sc->sc_dmat = xa->xa_dmat;
253 	sc->sc_domid = xa->xa_domid;
254 
255 	memcpy(sc->sc_node, xa->xa_node, XEN_MAX_NODE_LEN);
256 	memcpy(sc->sc_backend, xa->xa_backend, XEN_MAX_BACKEND_LEN);
257 
258 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
259 
260 	if (xnf_lladdr(sc)) {
261 		printf(": failed to obtain MAC address\n");
262 		return;
263 	}
264 
265 	if (xen_intr_establish(0, &sc->sc_xih, sc->sc_domid, xnf_intr, sc,
266 	    ifp->if_xname)) {
267 		printf(": failed to establish an interrupt\n");
268 		return;
269 	}
270 	xen_intr_mask(sc->sc_xih);
271 
272 	printf(" backend %d channel %u: address %s\n", sc->sc_domid,
273 	    sc->sc_xih, ether_sprintf(sc->sc_ac.ac_enaddr));
274 
275 	if (xnf_capabilities(sc)) {
276 		xen_intr_disestablish(sc->sc_xih);
277 		return;
278 	}
279 
280 	if (sc->sc_caps & XNF_CAP_SG)
281 		ifp->if_hardmtu = 9000;
282 
283 	if (xnf_rx_ring_create(sc)) {
284 		xen_intr_disestablish(sc->sc_xih);
285 		return;
286 	}
287 	if (xnf_tx_ring_create(sc)) {
288 		xen_intr_disestablish(sc->sc_xih);
289 		xnf_rx_ring_destroy(sc);
290 		return;
291 	}
292 	if (xnf_init_backend(sc)) {
293 		xen_intr_disestablish(sc->sc_xih);
294 		xnf_rx_ring_destroy(sc);
295 		xnf_tx_ring_destroy(sc);
296 		return;
297 	}
298 
299 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
300 	ifp->if_xflags = IFXF_MPSAFE;
301 	ifp->if_ioctl = xnf_ioctl;
302 	ifp->if_qstart = xnf_start;
303 	ifp->if_watchdog = xnf_watchdog;
304 	ifp->if_softc = sc;
305 
306 	ifp->if_capabilities = IFCAP_VLAN_MTU;
307 	if (sc->sc_caps & XNF_CAP_CSUM4)
308 		ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
309 	if (sc->sc_caps & XNF_CAP_CSUM6)
310 		ifp->if_capabilities |= IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6;
311 
312 	IFQ_SET_MAXLEN(&ifp->if_snd, XNF_TX_DESC - 1);
313 
314 	ifmedia_init(&sc->sc_media, IFM_IMASK, xnf_media_change,
315 	    xnf_media_status);
316 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
317 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_MANUAL);
318 
319 	if_attach(ifp);
320 	ether_ifattach(ifp);
321 
322 	/* Kick out emulated em's and re's */
323 	xen_unplug_emulated(parent, XEN_UNPLUG_NIC);
324 }
325 
326 int
327 xnf_detach(struct device *self, int flags)
328 {
329 	struct xnf_softc *sc = (struct xnf_softc *)self;
330 	struct ifnet *ifp = &sc->sc_ac.ac_if;
331 
332 	xnf_stop(sc);
333 
334 	ether_ifdetach(ifp);
335 	if_detach(ifp);
336 
337 	xen_intr_disestablish(sc->sc_xih);
338 
339 	if (sc->sc_tx_ring)
340 		xnf_tx_ring_destroy(sc);
341 	if (sc->sc_rx_ring)
342 		xnf_rx_ring_destroy(sc);
343 
344 	return (0);
345 }
346 
347 static int
348 nibble(int ch)
349 {
350 	if (ch >= '0' && ch <= '9')
351 		return (ch - '0');
352 	if (ch >= 'A' && ch <= 'F')
353 		return (10 + ch - 'A');
354 	if (ch >= 'a' && ch <= 'f')
355 		return (10 + ch - 'a');
356 	return (-1);
357 }
358 
359 int
360 xnf_lladdr(struct xnf_softc *sc)
361 {
362 	char enaddr[ETHER_ADDR_LEN];
363 	char mac[32];
364 	int i, j, lo, hi;
365 
366 	if (xs_getprop(sc->sc_parent, sc->sc_backend, "mac", mac, sizeof(mac)))
367 		return (-1);
368 
369 	for (i = 0, j = 0; j < ETHER_ADDR_LEN; i += 3, j++) {
370 		if ((hi = nibble(mac[i])) == -1 ||
371 		    (lo = nibble(mac[i+1])) == -1)
372 			return (-1);
373 		enaddr[j] = hi << 4 | lo;
374 	}
375 
376 	memcpy(sc->sc_ac.ac_enaddr, enaddr, ETHER_ADDR_LEN);
377 	return (0);
378 }
379 
380 int
381 xnf_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
382 {
383 	struct xnf_softc *sc = ifp->if_softc;
384 	struct ifreq *ifr = (struct ifreq *)data;
385 	int s, error = 0;
386 
387 	s = splnet();
388 
389 	switch (command) {
390 	case SIOCSIFADDR:
391 		ifp->if_flags |= IFF_UP;
392 		if (!(ifp->if_flags & IFF_RUNNING))
393 			xnf_init(sc);
394 		break;
395 	case SIOCSIFFLAGS:
396 		if (ifp->if_flags & IFF_UP) {
397 			if (ifp->if_flags & IFF_RUNNING)
398 				error = ENETRESET;
399 			else
400 				xnf_init(sc);
401 		} else {
402 			if (ifp->if_flags & IFF_RUNNING)
403 				xnf_stop(sc);
404 		}
405 		break;
406 	case SIOCGIFMEDIA:
407 	case SIOCSIFMEDIA:
408 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command);
409 		break;
410 	default:
411 		error = ether_ioctl(ifp, &sc->sc_ac, command, data);
412 		break;
413 	}
414 
415 	if (error == ENETRESET) {
416 		if (ifp->if_flags & IFF_RUNNING)
417 			xnf_iff(sc);
418 		error = 0;
419 	}
420 
421 	splx(s);
422 
423 	return (error);
424 }
425 
426 int
427 xnf_media_change(struct ifnet *ifp)
428 {
429 	return (0);
430 }
431 
432 void
433 xnf_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
434 {
435 	ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID;
436 	ifmr->ifm_active = IFM_ETHER | IFM_MANUAL;
437 }
438 
439 int
440 xnf_iff(struct xnf_softc *sc)
441 {
442 	return (0);
443 }
444 
445 void
446 xnf_init(struct xnf_softc *sc)
447 {
448 	struct ifnet *ifp = &sc->sc_ac.ac_if;
449 
450 	xnf_stop(sc);
451 
452 	xnf_iff(sc);
453 
454 	xnf_rx_ring_fill(sc);
455 
456 	if (xen_intr_unmask(sc->sc_xih)) {
457 		printf("%s: failed to enable interrupts\n", ifp->if_xname);
458 		xnf_stop(sc);
459 		return;
460 	}
461 
462 	ifp->if_flags |= IFF_RUNNING;
463 	ifq_clr_oactive(&ifp->if_snd);
464 }
465 
466 void
467 xnf_stop(struct xnf_softc *sc)
468 {
469 	struct ifnet *ifp = &sc->sc_ac.ac_if;
470 
471 	ifp->if_flags &= ~IFF_RUNNING;
472 
473 	xen_intr_mask(sc->sc_xih);
474 
475 	ifp->if_timer = 0;
476 
477 	ifq_barrier(&ifp->if_snd);
478 	xen_intr_barrier(sc->sc_xih);
479 
480 	ifq_clr_oactive(&ifp->if_snd);
481 
482 	if (sc->sc_tx_ring)
483 		xnf_tx_ring_drain(sc);
484 	if (sc->sc_rx_ring)
485 		xnf_rx_ring_drain(sc);
486 }
487 
488 void
489 xnf_start(struct ifqueue *ifq)
490 {
491 	struct ifnet *ifp = ifq->ifq_if;
492 	struct xnf_softc *sc = ifp->if_softc;
493 	struct xnf_tx_ring *txr = sc->sc_tx_ring;
494 	struct mbuf *m;
495 	int pkts = 0;
496 	uint32_t prod, oprod;
497 
498 	bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
499 	    BUS_DMASYNC_POSTREAD);
500 
501 	prod = oprod = txr->txr_prod;
502 
503 	for (;;) {
504 		if ((XNF_TX_DESC - (prod - sc->sc_tx_cons)) <
505 		    sc->sc_tx_frags) {
506 			/* transient */
507 			ifq_set_oactive(ifq);
508 			break;
509 		}
510 
511 		m = ifq_dequeue(ifq);
512 		if (m == NULL)
513 			break;
514 
515 		if (xnf_encap(sc, m, &prod)) {
516 			/* the chain is too large */
517 			ifp->if_oerrors++;
518 			m_freem(m);
519 			continue;
520 		}
521 
522 #if NBPFILTER > 0
523 		if (ifp->if_bpf)
524 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
525 #endif
526 		pkts++;
527 	}
528 	if (pkts > 0) {
529 		txr->txr_prod = prod;
530 		if (txr->txr_cons_event <= txr->txr_cons)
531 			txr->txr_cons_event = txr->txr_cons +
532 			    ((txr->txr_prod - txr->txr_cons) >> 1) + 1;
533 		bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
534 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
535 		if (prod - txr->txr_prod_event < prod - oprod)
536 			xen_intr_signal(sc->sc_xih);
537 		ifp->if_timer = 5;
538 	}
539 }
540 
541 static inline int
542 xnf_fragcount(struct mbuf *m_head)
543 {
544 	struct mbuf *m;
545 	vaddr_t va, va0;
546 	int n = 0;
547 
548 	for (m = m_head; m != NULL; m = m->m_next) {
549 		if (m->m_len == 0)
550 			continue;
551 		     /* start of the buffer */
552 		for (va0 = va = mtod(m, vaddr_t);
553 		     /* does the buffer end on this page? */
554 		     va + (PAGE_SIZE - (va & PAGE_MASK)) < va0 + m->m_len;
555 		     /* move on to the next page */
556 		     va += PAGE_SIZE - (va & PAGE_MASK))
557 			n++;
558 		n++;
559 	}
560 	return (n);
561 }
562 
563 int
564 xnf_encap(struct xnf_softc *sc, struct mbuf *m_head, uint32_t *prod)
565 {
566 	struct ifnet *ifp = &sc->sc_ac.ac_if;
567 	struct xnf_tx_ring *txr = sc->sc_tx_ring;
568 	union xnf_tx_desc *txd;
569 	struct mbuf *m;
570 	bus_dmamap_t dmap;
571 	uint32_t oprod = *prod;
572 	int i, id, flags, n;
573 
574 	if ((xnf_fragcount(m_head) > sc->sc_tx_frags) &&
575 	    m_defrag(m_head, M_DONTWAIT))
576 		goto errout;
577 
578 	flags = (sc->sc_domid << 16) | BUS_DMA_WRITE | BUS_DMA_NOWAIT;
579 
580 	for (m = m_head; m != NULL && m->m_len > 0; m = m->m_next) {
581 		i = *prod & (XNF_TX_DESC - 1);
582 		dmap = sc->sc_tx_dmap[i];
583 		txd = &txr->txr_desc[i];
584 		if (sc->sc_tx_buf[i])
585 			panic("%s: cons %u(%u) prod %u next %u seg %d/%d\n",
586 			    ifp->if_xname, txr->txr_cons, sc->sc_tx_cons,
587 			    txr->txr_prod, *prod, *prod - oprod,
588 			    xnf_fragcount(m_head));
589 
590 		if (bus_dmamap_load(sc->sc_dmat, dmap, m->m_data, m->m_len,
591 		    NULL, flags)) {
592 			DPRINTF("%s: failed to load %u bytes @%lu\n",
593 			    sc->sc_dev.dv_xname, m->m_len,
594 			    mtod(m, vaddr_t) & PAGE_MASK);
595 			goto unroll;
596 		}
597 
598 		if (m == m_head) {
599 			if (m->m_pkthdr.csum_flags &
600 			    (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT))
601 				txd->txd_req.txq_flags = XNF_TXF_CSUM_BLANK |
602 				    XNF_TXF_CSUM_VALID;
603 			txd->txd_req.txq_size = m->m_pkthdr.len;
604 		}
605 		for (n = 0; n < dmap->dm_nsegs; n++) {
606 			i = *prod & (XNF_TX_DESC - 1);
607 			txd = &txr->txr_desc[i];
608 			if (sc->sc_tx_buf[i])
609 				panic("%s: cons %u(%u) prod %u next %u "
610 				    "seg %d/%d\n", ifp->if_xname,
611 				    txr->txr_cons, sc->sc_tx_cons,
612 				    txr->txr_prod, *prod, *prod - oprod,
613 				    xnf_fragcount(m_head));
614 
615 			/* Don't overwrite lenght of the very first one */
616 			if (!(m == m_head && n == 0))
617 				txd->txd_req.txq_size = dmap->dm_segs[n].ds_len;
618 			/* The chunk flag will be removed from the last one */
619 			txd->txd_req.txq_flags |= XNF_TXF_CHUNK;
620 			txd->txd_req.txq_ref = dmap->dm_segs[n].ds_addr;
621 			if (n == 0)
622 				txd->txd_req.txq_offset =
623 				    mtod(m, vaddr_t) & PAGE_MASK;
624 			(*prod)++;
625 		}
626 	}
627 	/* Clear the chunk flag from the last segment */
628 	txd->txd_req.txq_flags &= ~XNF_TXF_CHUNK;
629 	sc->sc_tx_buf[i] = m_head;
630 	bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
631 	    BUS_DMASYNC_PREWRITE);
632 
633 	return (0);
634 
635  unroll:
636 	for (; *prod != oprod; (*prod)--) {
637 		i = (*prod - 1) & (XNF_TX_DESC - 1);
638 		dmap = sc->sc_tx_dmap[i];
639 		txd = &txr->txr_desc[i];
640 
641 		id = txd->txd_rsp.txp_id;
642 		memset(txd, 0, sizeof(*txd));
643 		txd->txd_req.txq_id = id;
644 
645 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, 0,
646 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
647 		bus_dmamap_unload(sc->sc_dmat, dmap);
648 
649 		if (sc->sc_tx_buf[i])
650 			sc->sc_tx_buf[i] = NULL;
651 	}
652 
653  errout:
654 	return (ENOBUFS);
655 }
656 
657 void
658 xnf_intr(void *arg)
659 {
660 	struct xnf_softc *sc = arg;
661 	struct ifnet *ifp = &sc->sc_ac.ac_if;
662 
663 	if (ifp->if_flags & IFF_RUNNING) {
664 		xnf_txeof(sc);
665 		xnf_rxeof(sc);
666 	}
667 }
668 
669 void
670 xnf_watchdog(struct ifnet *ifp)
671 {
672 	struct xnf_softc *sc = ifp->if_softc;
673 	struct xnf_tx_ring *txr = sc->sc_tx_ring;
674 
675 	printf("%s: tx stuck: prod %u cons %u,%u evt %u,%u\n",
676 	    ifp->if_xname, txr->txr_prod, txr->txr_cons, sc->sc_tx_cons,
677 	    txr->txr_prod_event, txr->txr_cons_event);
678 }
679 
680 void
681 xnf_txeof(struct xnf_softc *sc)
682 {
683 	struct ifnet *ifp = &sc->sc_ac.ac_if;
684 	struct xnf_tx_ring *txr = sc->sc_tx_ring;
685 	union xnf_tx_desc *txd;
686 	bus_dmamap_t dmap;
687 	uint32_t cons;
688 	int i, id;
689 
690 	bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
691 	    BUS_DMASYNC_POSTWRITE);
692 
693 	for (cons = sc->sc_tx_cons; cons != txr->txr_cons; cons++) {
694 		i = cons & (XNF_TX_DESC - 1);
695 		txd = &txr->txr_desc[i];
696 		dmap = sc->sc_tx_dmap[i];
697 
698 		id = txd->txd_rsp.txp_id;
699 		memset(txd, 0, sizeof(*txd));
700 		txd->txd_req.txq_id = id;
701 
702 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, 0,
703 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
704 		bus_dmamap_unload(sc->sc_dmat, dmap);
705 
706 		if (sc->sc_tx_buf[i] != NULL) {
707 			m_freem(sc->sc_tx_buf[i]);
708 			sc->sc_tx_buf[i] = NULL;
709 		}
710 	}
711 
712 	sc->sc_tx_cons = cons;
713 	txr->txr_cons_event = sc->sc_tx_cons +
714 	    ((txr->txr_prod - sc->sc_tx_cons) >> 1) + 1;
715 	bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
716 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
717 
718 	if (txr->txr_cons == txr->txr_prod)
719 		ifp->if_timer = 0;
720 	if (ifq_is_oactive(&ifp->if_snd))
721 		ifq_restart(&ifp->if_snd);
722 }
723 
724 void
725 xnf_rxeof(struct xnf_softc *sc)
726 {
727 	struct ifnet *ifp = &sc->sc_ac.ac_if;
728 	struct xnf_rx_ring *rxr = sc->sc_rx_ring;
729 	union xnf_rx_desc *rxd;
730 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
731 	struct mbuf *fmp = sc->sc_rx_cbuf[0];
732 	struct mbuf *lmp = sc->sc_rx_cbuf[1];
733 	struct mbuf *m;
734 	bus_dmamap_t dmap;
735 	uint32_t cons;
736 	int i, id, flags, len, offset;
737 
738 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_rmap, 0, 0,
739 	    BUS_DMASYNC_POSTREAD);
740 
741 	for (cons = sc->sc_rx_cons; cons != rxr->rxr_cons; cons++) {
742 		i = cons & (XNF_RX_DESC - 1);
743 		rxd = &rxr->rxr_desc[i];
744 		dmap = sc->sc_rx_dmap[i];
745 
746 		len = rxd->rxd_rsp.rxp_status;
747 		flags = rxd->rxd_rsp.rxp_flags;
748 		offset = rxd->rxd_rsp.rxp_offset;
749 		id = rxd->rxd_rsp.rxp_id;
750 		memset(rxd, 0, sizeof(*rxd));
751 		rxd->rxd_req.rxq_id = id;
752 
753 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, 0,
754 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
755 		bus_dmamap_unload(sc->sc_dmat, dmap);
756 
757 		m = sc->sc_rx_buf[i];
758 		KASSERT(m != NULL);
759 		sc->sc_rx_buf[i] = NULL;
760 
761 		if (flags & XNF_RXF_MGMT) {
762 			printf("%s: management data present\n",
763 			    ifp->if_xname);
764 			m_freem(m);
765 			continue;
766 		}
767 
768 		if (flags & XNF_RXF_CSUM_VALID)
769 			m->m_pkthdr.csum_flags = M_TCP_CSUM_IN_OK |
770 			    M_UDP_CSUM_IN_OK;
771 
772 		if (len < 0 || (len + offset > PAGE_SIZE)) {
773 			ifp->if_ierrors++;
774 			m_freem(m);
775 			continue;
776 		}
777 
778 		m->m_len = len;
779 		m->m_data += offset;
780 
781 		if (fmp == NULL) {
782 			m->m_pkthdr.len = len;
783 			fmp = m;
784 		} else {
785 			m->m_flags &= ~M_PKTHDR;
786 			lmp->m_next = m;
787 			fmp->m_pkthdr.len += m->m_len;
788 		}
789 		lmp = m;
790 
791 		if (flags & XNF_RXF_CHUNK) {
792 			sc->sc_rx_cbuf[0] = fmp;
793 			sc->sc_rx_cbuf[1] = lmp;
794 			continue;
795 		}
796 
797 		m = fmp;
798 
799 		ml_enqueue(&ml, m);
800 		sc->sc_rx_cbuf[0] = sc->sc_rx_cbuf[1] = fmp = lmp = NULL;
801 	}
802 
803 	sc->sc_rx_cons = cons;
804 	rxr->rxr_cons_event = sc->sc_rx_cons + 1;
805 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_rmap, 0, 0,
806 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
807 
808 	if_input(ifp, &ml);
809 
810 	if (xnf_rx_ring_fill(sc) || (sc->sc_rx_cons != rxr->rxr_cons))
811 		xen_intr_schedule(sc->sc_xih);
812 }
813 
814 int
815 xnf_rx_ring_fill(struct xnf_softc *sc)
816 {
817 	struct ifnet *ifp = &sc->sc_ac.ac_if;
818 	struct xnf_rx_ring *rxr = sc->sc_rx_ring;
819 	bus_dmamap_t dmap;
820 	struct mbuf *m;
821 	uint32_t cons, prod, oprod;
822 	int i, flags, resched = 0;
823 
824 	cons = rxr->rxr_cons;
825 	prod = oprod = rxr->rxr_prod;
826 
827 	while (prod - cons < XNF_RX_DESC) {
828 		i = prod & (XNF_RX_DESC - 1);
829 		if (sc->sc_rx_buf[i])
830 			break;
831 		m = MCLGETI(NULL, M_DONTWAIT, NULL, XNF_MCLEN);
832 		if (m == NULL)
833 			break;
834 		m->m_len = m->m_pkthdr.len = XNF_MCLEN;
835 		dmap = sc->sc_rx_dmap[i];
836 		flags = (sc->sc_domid << 16) | BUS_DMA_READ | BUS_DMA_NOWAIT;
837 		if (bus_dmamap_load_mbuf(sc->sc_dmat, dmap, m, flags)) {
838 			m_freem(m);
839 			break;
840 		}
841 		sc->sc_rx_buf[i] = m;
842 		rxr->rxr_desc[i].rxd_req.rxq_ref = dmap->dm_segs[0].ds_addr;
843 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, 0, BUS_DMASYNC_PREWRITE);
844 		prod++;
845 	}
846 
847 	rxr->rxr_prod = prod;
848 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_rmap, 0, 0,
849 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
850 
851 	if ((prod - cons < XNF_RX_MIN) && (ifp->if_flags & IFF_RUNNING))
852 		resched = 1;
853 	if (prod - rxr->rxr_prod_event < prod - oprod)
854 		xen_intr_signal(sc->sc_xih);
855 
856 	return (resched);
857 }
858 
859 int
860 xnf_rx_ring_create(struct xnf_softc *sc)
861 {
862 	int i, flags, rsegs;
863 
864 	/* Allocate a page of memory for the ring */
865 	if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
866 	    &sc->sc_rx_seg, 1, &rsegs, BUS_DMA_ZERO | BUS_DMA_NOWAIT)) {
867 		printf("%s: failed to allocate memory for the rx ring\n",
868 		    sc->sc_dev.dv_xname);
869 		return (-1);
870 	}
871 	/* Map in the allocated memory into the ring structure */
872 	if (bus_dmamem_map(sc->sc_dmat, &sc->sc_rx_seg, 1, PAGE_SIZE,
873 	    (caddr_t *)(&sc->sc_rx_ring), BUS_DMA_NOWAIT)) {
874 		printf("%s: failed to map memory for the rx ring\n",
875 		    sc->sc_dev.dv_xname);
876 		goto errout;
877 	}
878 	/* Create a map to load the ring memory into */
879 	if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
880 	    BUS_DMA_NOWAIT, &sc->sc_rx_rmap)) {
881 		printf("%s: failed to create a memory map for the rx ring\n",
882 		    sc->sc_dev.dv_xname);
883 		goto errout;
884 	}
885 	/* Load the ring into the ring map to extract the PA */
886 	flags = (sc->sc_domid << 16) | BUS_DMA_NOWAIT;
887 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_rmap, sc->sc_rx_ring,
888 	    PAGE_SIZE, NULL, flags)) {
889 		printf("%s: failed to load the rx ring map\n",
890 		    sc->sc_dev.dv_xname);
891 		goto errout;
892 	}
893 	sc->sc_rx_ref = sc->sc_rx_rmap->dm_segs[0].ds_addr;
894 
895 	sc->sc_rx_ring->rxr_prod_event = sc->sc_rx_ring->rxr_cons_event = 1;
896 
897 	for (i = 0; i < XNF_RX_DESC; i++) {
898 		if (bus_dmamap_create(sc->sc_dmat, XNF_MCLEN, 1, XNF_MCLEN,
899 		    PAGE_SIZE, BUS_DMA_NOWAIT, &sc->sc_rx_dmap[i])) {
900 			printf("%s: failed to create a memory map for the"
901 			    " rx slot %d\n", sc->sc_dev.dv_xname, i);
902 			goto errout;
903 		}
904 		sc->sc_rx_ring->rxr_desc[i].rxd_req.rxq_id = i;
905 	}
906 
907 	return (0);
908 
909  errout:
910 	xnf_rx_ring_destroy(sc);
911 	return (-1);
912 }
913 
914 void
915 xnf_rx_ring_drain(struct xnf_softc *sc)
916 {
917 	struct xnf_rx_ring *rxr = sc->sc_rx_ring;
918 
919 	if (sc->sc_rx_cons != rxr->rxr_cons)
920 		xnf_rxeof(sc);
921 }
922 
923 void
924 xnf_rx_ring_destroy(struct xnf_softc *sc)
925 {
926 	int i;
927 
928 	for (i = 0; i < XNF_RX_DESC; i++) {
929 		if (sc->sc_rx_buf[i] == NULL)
930 			continue;
931 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_dmap[i], 0, 0,
932 		    BUS_DMASYNC_POSTREAD);
933 		bus_dmamap_unload(sc->sc_dmat, sc->sc_rx_dmap[i]);
934 		m_freem(sc->sc_rx_buf[i]);
935 		sc->sc_rx_buf[i] = NULL;
936 	}
937 
938 	for (i = 0; i < XNF_RX_DESC; i++) {
939 		if (sc->sc_rx_dmap[i] == NULL)
940 			continue;
941 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmap[i]);
942 		sc->sc_rx_dmap[i] = NULL;
943 	}
944 	if (sc->sc_rx_rmap) {
945 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_rmap, 0, 0,
946 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
947 		bus_dmamap_unload(sc->sc_dmat, sc->sc_rx_rmap);
948 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_rmap);
949 	}
950 	if (sc->sc_rx_ring) {
951 		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_rx_ring,
952 		    PAGE_SIZE);
953 		bus_dmamem_free(sc->sc_dmat, &sc->sc_rx_seg, 1);
954 	}
955 	sc->sc_rx_ring = NULL;
956 	sc->sc_rx_rmap = NULL;
957 	sc->sc_rx_cons = 0;
958 }
959 
960 int
961 xnf_tx_ring_create(struct xnf_softc *sc)
962 {
963 	struct ifnet *ifp = &sc->sc_ac.ac_if;
964 	int i, flags, nsegs, rsegs;
965 	bus_size_t segsz;
966 
967 	sc->sc_tx_frags = sc->sc_caps & XNF_CAP_SG ? XNF_TX_FRAG : 1;
968 
969 	/* Allocate a page of memory for the ring */
970 	if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
971 	    &sc->sc_tx_seg, 1, &rsegs, BUS_DMA_ZERO | BUS_DMA_NOWAIT)) {
972 		printf("%s: failed to allocate memory for the tx ring\n",
973 		    sc->sc_dev.dv_xname);
974 		return (-1);
975 	}
976 	/* Map in the allocated memory into the ring structure */
977 	if (bus_dmamem_map(sc->sc_dmat, &sc->sc_tx_seg, 1, PAGE_SIZE,
978 	    (caddr_t *)&sc->sc_tx_ring, BUS_DMA_NOWAIT)) {
979 		printf("%s: failed to map memory for the tx ring\n",
980 		    sc->sc_dev.dv_xname);
981 		goto errout;
982 	}
983 	/* Create a map to load the ring memory into */
984 	if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
985 	    BUS_DMA_NOWAIT, &sc->sc_tx_rmap)) {
986 		printf("%s: failed to create a memory map for the tx ring\n",
987 		    sc->sc_dev.dv_xname);
988 		goto errout;
989 	}
990 	/* Load the ring into the ring map to extract the PA */
991 	flags = (sc->sc_domid << 16) | BUS_DMA_NOWAIT;
992 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_tx_rmap, sc->sc_tx_ring,
993 	    PAGE_SIZE, NULL, flags)) {
994 		printf("%s: failed to load the tx ring map\n",
995 		    sc->sc_dev.dv_xname);
996 		goto errout;
997 	}
998 	sc->sc_tx_ref = sc->sc_tx_rmap->dm_segs[0].ds_addr;
999 
1000 	sc->sc_tx_ring->txr_prod_event = sc->sc_tx_ring->txr_cons_event = 1;
1001 
1002 	if (sc->sc_caps & XNF_CAP_SG) {
1003 		nsegs = roundup(ifp->if_hardmtu, XNF_MCLEN) / XNF_MCLEN + 1;
1004 		segsz = nsegs * XNF_MCLEN;
1005 	} else {
1006 		nsegs = 1;
1007 		segsz = XNF_MCLEN;
1008 	}
1009 	for (i = 0; i < XNF_TX_DESC; i++) {
1010 		if (bus_dmamap_create(sc->sc_dmat, segsz, nsegs, XNF_MCLEN,
1011 		    PAGE_SIZE, BUS_DMA_NOWAIT, &sc->sc_tx_dmap[i])) {
1012 			printf("%s: failed to create a memory map for the"
1013 			    " tx slot %d\n", sc->sc_dev.dv_xname, i);
1014 			goto errout;
1015 		}
1016 		sc->sc_tx_ring->txr_desc[i].txd_req.txq_id = i;
1017 	}
1018 
1019 	return (0);
1020 
1021  errout:
1022 	xnf_tx_ring_destroy(sc);
1023 	return (-1);
1024 }
1025 
1026 void
1027 xnf_tx_ring_drain(struct xnf_softc *sc)
1028 {
1029 	struct xnf_tx_ring *txr = sc->sc_tx_ring;
1030 
1031 	if (sc->sc_tx_cons != txr->txr_cons)
1032 		xnf_txeof(sc);
1033 }
1034 
1035 void
1036 xnf_tx_ring_destroy(struct xnf_softc *sc)
1037 {
1038 	int i;
1039 
1040 	for (i = 0; i < XNF_TX_DESC; i++) {
1041 		if (sc->sc_tx_dmap[i] == NULL)
1042 			continue;
1043 		bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_dmap[i], 0, 0,
1044 		    BUS_DMASYNC_POSTWRITE);
1045 		bus_dmamap_unload(sc->sc_dmat, sc->sc_tx_dmap[i]);
1046 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmap[i]);
1047 		sc->sc_tx_dmap[i] = NULL;
1048 		if (sc->sc_tx_buf[i] == NULL)
1049 			continue;
1050 		m_free(sc->sc_tx_buf[i]);
1051 		sc->sc_tx_buf[i] = NULL;
1052 	}
1053 	if (sc->sc_tx_rmap) {
1054 		bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
1055 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1056 		bus_dmamap_unload(sc->sc_dmat, sc->sc_tx_rmap);
1057 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_rmap);
1058 	}
1059 	if (sc->sc_tx_ring) {
1060 		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_tx_ring,
1061 		    PAGE_SIZE);
1062 		bus_dmamem_free(sc->sc_dmat, &sc->sc_tx_seg, 1);
1063 	}
1064 	sc->sc_tx_ring = NULL;
1065 	sc->sc_tx_rmap = NULL;
1066 }
1067 
1068 int
1069 xnf_capabilities(struct xnf_softc *sc)
1070 {
1071 	unsigned long long res;
1072 	const char *prop;
1073 	char val[32];
1074 	int error;
1075 
1076 	/* Query scatter-gather capability */
1077 	prop = "feature-sg";
1078 	if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1079 	    && error != ENOENT)
1080 		goto errout;
1081 	if (error == 0 && res == 1)
1082 		sc->sc_caps |= XNF_CAP_SG;
1083 
1084 	/* Query IPv4 checksum offloading capability, enabled by default */
1085 	sc->sc_caps |= XNF_CAP_CSUM4;
1086 	prop = "feature-no-csum-offload";
1087 	if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1088 	    && error != ENOENT)
1089 		goto errout;
1090 	if (error == 0 && res == 1)
1091 		sc->sc_caps &= ~XNF_CAP_CSUM4;
1092 
1093 	/* Query IPv6 checksum offloading capability */
1094 	prop = "feature-ipv6-csum-offload";
1095 	if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1096 	    && error != ENOENT)
1097 		goto errout;
1098 	if (error == 0 && res == 1)
1099 		sc->sc_caps |= XNF_CAP_CSUM6;
1100 
1101 	/* Query multicast traffic contol capability */
1102 	prop = "feature-multicast-control";
1103 	if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1104 	    && error != ENOENT)
1105 		goto errout;
1106 	if (error == 0 && res == 1)
1107 		sc->sc_caps |= XNF_CAP_MCAST;
1108 
1109 	/* Query split Rx/Tx event channel capability */
1110 	prop = "feature-split-event-channels";
1111 	if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1112 	    && error != ENOENT)
1113 		goto errout;
1114 	if (error == 0 && res == 1)
1115 		sc->sc_caps |= XNF_CAP_SPLIT;
1116 
1117 	/* Query multiqueue capability */
1118 	prop = "multi-queue-max-queues";
1119 	if ((error = xs_getprop(sc->sc_parent, sc->sc_backend, prop, val,
1120 	    sizeof(val))) == 0)
1121 	if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1122 	    && error != ENOENT)
1123 		goto errout;
1124 	if (error == 0)
1125 		sc->sc_caps |= XNF_CAP_MULTIQ;
1126 
1127 	DPRINTF("%s: capabilities %b\n", sc->sc_dev.dv_xname, sc->sc_caps,
1128 	    "\20\006MULTIQ\005SPLIT\004MCAST\003CSUM6\002CSUM4\001SG");
1129 	return (0);
1130 
1131  errout:
1132 	printf("%s: failed to read \"%s\" property\n", sc->sc_dev.dv_xname,
1133 	    prop);
1134 	return (-1);
1135 }
1136 
1137 int
1138 xnf_init_backend(struct xnf_softc *sc)
1139 {
1140 	const char *prop;
1141 
1142 	/* Plumb the Rx ring */
1143 	prop = "rx-ring-ref";
1144 	if (xs_setnum(sc->sc_parent, sc->sc_node, prop, sc->sc_rx_ref))
1145 		goto errout;
1146 	/* Enable "copy" mode */
1147 	prop = "request-rx-copy";
1148 	if (xs_setnum(sc->sc_parent, sc->sc_node, prop, 1))
1149 		goto errout;
1150 	/* Enable notify mode */
1151 	prop = "feature-rx-notify";
1152 	if (xs_setnum(sc->sc_parent, sc->sc_node, prop, 1))
1153 		goto errout;
1154 
1155 	/* Plumb the Tx ring */
1156 	prop = "tx-ring-ref";
1157 	if (xs_setnum(sc->sc_parent, sc->sc_node, prop, sc->sc_tx_ref))
1158 		goto errout;
1159 	/* Enable scatter-gather mode */
1160 	if (sc->sc_tx_frags > 1) {
1161 		prop = "feature-sg";
1162 		if (xs_setnum(sc->sc_parent, sc->sc_node, prop, 1))
1163 			goto errout;
1164 	}
1165 
1166 	/* Enable IPv6 checksum offloading */
1167 	if (sc->sc_caps & XNF_CAP_CSUM6) {
1168 		prop = "feature-ipv6-csum-offload";
1169 		if (xs_setnum(sc->sc_parent, sc->sc_node, prop, 1))
1170 			goto errout;
1171 	}
1172 
1173 	/* Plumb the event channel port */
1174 	prop = "event-channel";
1175 	if (xs_setnum(sc->sc_parent, sc->sc_node, prop, sc->sc_xih))
1176 		goto errout;
1177 
1178 	/* Connect the device */
1179 	prop = "state";
1180 	if (xs_setprop(sc->sc_parent, sc->sc_node, prop, XEN_STATE_CONNECTED,
1181 	    strlen(XEN_STATE_CONNECTED)))
1182 		goto errout;
1183 
1184 	return (0);
1185 
1186  errout:
1187 	printf("%s: failed to set \"%s\" property\n", sc->sc_dev.dv_xname, prop);
1188 	return (-1);
1189 }
1190