1 /* $OpenBSD: if_xnf.c,v 1.70 2024/05/24 10:05:55 jsg Exp $ */
2
3 /*
4 * Copyright (c) 2015, 2016 Mike Belopuhov
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 #include "bpfilter.h"
20
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/atomic.h>
24 #include <sys/device.h>
25 #include <sys/mbuf.h>
26 #include <sys/queue.h>
27 #include <sys/sockio.h>
28 #include <sys/task.h>
29
30 #include <machine/bus.h>
31
32 #include <dev/pv/xenreg.h>
33 #include <dev/pv/xenvar.h>
34
35 #include <net/if.h>
36 #include <net/if_media.h>
37
38 #include <netinet/in.h>
39 #include <netinet/if_ether.h>
40
41 #if NBPFILTER > 0
42 #include <net/bpf.h>
43 #endif
44
45 /* #define XNF_DEBUG */
46
47 #ifdef XNF_DEBUG
48 #define DPRINTF(x...) printf(x)
49 #else
50 #define DPRINTF(x...)
51 #endif
52
53 /*
54 * Rx ring
55 */
56
57 struct xnf_rx_req {
58 uint16_t rxq_id;
59 uint16_t rxq_pad;
60 uint32_t rxq_ref;
61 } __packed;
62
63 struct xnf_rx_rsp {
64 uint16_t rxp_id;
65 uint16_t rxp_offset;
66 uint16_t rxp_flags;
67 #define XNF_RXF_CSUM_VALID 0x0001
68 #define XNF_RXF_CSUM_BLANK 0x0002
69 #define XNF_RXF_CHUNK 0x0004
70 #define XNF_RXF_MGMT 0x0008
71 int16_t rxp_status;
72 } __packed;
73
74 union xnf_rx_desc {
75 struct xnf_rx_req rxd_req;
76 struct xnf_rx_rsp rxd_rsp;
77 } __packed;
78
79 #define XNF_RX_DESC 256
80 #define XNF_MCLEN PAGE_SIZE
81 #define XNF_RX_MIN 32
82
83 struct xnf_rx_ring {
84 volatile uint32_t rxr_prod;
85 volatile uint32_t rxr_prod_event;
86 volatile uint32_t rxr_cons;
87 volatile uint32_t rxr_cons_event;
88 uint32_t rxr_reserved[12];
89 union xnf_rx_desc rxr_desc[XNF_RX_DESC];
90 } __packed;
91
92
93 /*
94 * Tx ring
95 */
96
97 struct xnf_tx_req {
98 uint32_t txq_ref;
99 uint16_t txq_offset;
100 uint16_t txq_flags;
101 #define XNF_TXF_CSUM_BLANK 0x0001
102 #define XNF_TXF_CSUM_VALID 0x0002
103 #define XNF_TXF_CHUNK 0x0004
104 #define XNF_TXF_ETXRA 0x0008
105 uint16_t txq_id;
106 uint16_t txq_size;
107 } __packed;
108
109 struct xnf_tx_rsp {
110 uint16_t txp_id;
111 int16_t txp_status;
112 } __packed;
113
114 union xnf_tx_desc {
115 struct xnf_tx_req txd_req;
116 struct xnf_tx_rsp txd_rsp;
117 } __packed;
118
119 #define XNF_TX_DESC 256
120 #define XNF_TX_FRAG 18
121
122 struct xnf_tx_ring {
123 volatile uint32_t txr_prod;
124 volatile uint32_t txr_prod_event;
125 volatile uint32_t txr_cons;
126 volatile uint32_t txr_cons_event;
127 uint32_t txr_reserved[12];
128 union xnf_tx_desc txr_desc[XNF_TX_DESC];
129 } __packed;
130
131 struct xnf_tx_buf {
132 uint32_t txb_ndesc;
133 bus_dmamap_t txb_dmap;
134 struct mbuf *txb_mbuf;
135 };
136
137 /* Management frame, "extra info" in Xen parlance */
138 struct xnf_mgmt {
139 uint8_t mg_type;
140 #define XNF_MGMT_MCAST_ADD 2
141 #define XNF_MGMT_MCAST_DEL 3
142 uint8_t mg_flags;
143 union {
144 uint8_t mgu_mcaddr[ETHER_ADDR_LEN];
145 uint16_t mgu_pad[3];
146 } u;
147 #define mg_mcaddr u.mgu_mcaddr
148 } __packed;
149
150
151 struct xnf_softc {
152 struct device sc_dev;
153 struct device *sc_parent;
154 char sc_node[XEN_MAX_NODE_LEN];
155 char sc_backend[XEN_MAX_BACKEND_LEN];
156 bus_dma_tag_t sc_dmat;
157 int sc_domid;
158
159 struct arpcom sc_ac;
160 struct ifmedia sc_media;
161
162 xen_intr_handle_t sc_xih;
163
164 int sc_caps;
165 #define XNF_CAP_SG 0x0001
166 #define XNF_CAP_CSUM4 0x0002
167 #define XNF_CAP_CSUM6 0x0004
168 #define XNF_CAP_MCAST 0x0008
169 #define XNF_CAP_SPLIT 0x0010
170 #define XNF_CAP_MULTIQ 0x0020
171
172 /* Rx ring */
173 struct xnf_rx_ring *sc_rx_ring;
174 bus_dmamap_t sc_rx_rmap; /* map for the ring */
175 bus_dma_segment_t sc_rx_seg;
176 uint32_t sc_rx_ref; /* grant table ref */
177 uint32_t sc_rx_cons;
178 struct mbuf *sc_rx_buf[XNF_RX_DESC];
179 bus_dmamap_t sc_rx_dmap[XNF_RX_DESC]; /* maps for packets */
180 struct mbuf *sc_rx_cbuf[2]; /* chain handling */
181
182 /* Tx ring */
183 struct xnf_tx_ring *sc_tx_ring;
184 bus_dmamap_t sc_tx_rmap; /* map for the ring */
185 bus_dma_segment_t sc_tx_seg;
186 uint32_t sc_tx_ref; /* grant table ref */
187 uint32_t sc_tx_cons;
188 int sc_tx_frags;
189 uint32_t sc_tx_next; /* next buffer */
190 volatile unsigned int sc_tx_avail;
191 struct xnf_tx_buf sc_tx_buf[XNF_TX_DESC];
192 };
193
194 int xnf_match(struct device *, void *, void *);
195 void xnf_attach(struct device *, struct device *, void *);
196 int xnf_detach(struct device *, int);
197 int xnf_lladdr(struct xnf_softc *);
198 int xnf_ioctl(struct ifnet *, u_long, caddr_t);
199 int xnf_media_change(struct ifnet *);
200 void xnf_media_status(struct ifnet *, struct ifmediareq *);
201 int xnf_iff(struct xnf_softc *);
202 void xnf_init(struct xnf_softc *);
203 void xnf_stop(struct xnf_softc *);
204 void xnf_start(struct ifqueue *);
205 int xnf_encap(struct xnf_softc *, struct mbuf *, uint32_t *);
206 void xnf_intr(void *);
207 void xnf_watchdog(struct ifnet *);
208 void xnf_txeof(struct xnf_softc *);
209 void xnf_rxeof(struct xnf_softc *);
210 int xnf_rx_ring_fill(struct xnf_softc *);
211 int xnf_rx_ring_create(struct xnf_softc *);
212 void xnf_rx_ring_drain(struct xnf_softc *);
213 void xnf_rx_ring_destroy(struct xnf_softc *);
214 int xnf_tx_ring_create(struct xnf_softc *);
215 void xnf_tx_ring_drain(struct xnf_softc *);
216 void xnf_tx_ring_destroy(struct xnf_softc *);
217 int xnf_capabilities(struct xnf_softc *sc);
218 int xnf_init_backend(struct xnf_softc *);
219
220 struct cfdriver xnf_cd = {
221 NULL, "xnf", DV_IFNET
222 };
223
224 const struct cfattach xnf_ca = {
225 sizeof(struct xnf_softc), xnf_match, xnf_attach, xnf_detach
226 };
227
228 int
xnf_match(struct device * parent,void * match,void * aux)229 xnf_match(struct device *parent, void *match, void *aux)
230 {
231 struct xen_attach_args *xa = aux;
232
233 if (strcmp("vif", xa->xa_name))
234 return (0);
235
236 return (1);
237 }
238
239 void
xnf_attach(struct device * parent,struct device * self,void * aux)240 xnf_attach(struct device *parent, struct device *self, void *aux)
241 {
242 struct xen_attach_args *xa = aux;
243 struct xnf_softc *sc = (struct xnf_softc *)self;
244 struct ifnet *ifp = &sc->sc_ac.ac_if;
245
246 sc->sc_parent = parent;
247 sc->sc_dmat = xa->xa_dmat;
248 sc->sc_domid = xa->xa_domid;
249
250 memcpy(sc->sc_node, xa->xa_node, XEN_MAX_NODE_LEN);
251 memcpy(sc->sc_backend, xa->xa_backend, XEN_MAX_BACKEND_LEN);
252
253 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
254
255 if (xnf_lladdr(sc)) {
256 printf(": failed to obtain MAC address\n");
257 return;
258 }
259
260 if (xen_intr_establish(0, &sc->sc_xih, sc->sc_domid, xnf_intr, sc,
261 ifp->if_xname)) {
262 printf(": failed to establish an interrupt\n");
263 return;
264 }
265 xen_intr_mask(sc->sc_xih);
266
267 printf(" backend %d channel %u: address %s\n", sc->sc_domid,
268 sc->sc_xih, ether_sprintf(sc->sc_ac.ac_enaddr));
269
270 if (xnf_capabilities(sc)) {
271 xen_intr_disestablish(sc->sc_xih);
272 return;
273 }
274
275 if (sc->sc_caps & XNF_CAP_SG)
276 ifp->if_hardmtu = 9000;
277
278 if (xnf_rx_ring_create(sc)) {
279 xen_intr_disestablish(sc->sc_xih);
280 return;
281 }
282 if (xnf_tx_ring_create(sc)) {
283 xen_intr_disestablish(sc->sc_xih);
284 xnf_rx_ring_destroy(sc);
285 return;
286 }
287 if (xnf_init_backend(sc)) {
288 xen_intr_disestablish(sc->sc_xih);
289 xnf_rx_ring_destroy(sc);
290 xnf_tx_ring_destroy(sc);
291 return;
292 }
293
294 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
295 ifp->if_xflags = IFXF_MPSAFE;
296 ifp->if_ioctl = xnf_ioctl;
297 ifp->if_qstart = xnf_start;
298 ifp->if_watchdog = xnf_watchdog;
299 ifp->if_softc = sc;
300
301 ifp->if_capabilities = IFCAP_VLAN_MTU;
302 if (sc->sc_caps & XNF_CAP_CSUM4)
303 ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
304 if (sc->sc_caps & XNF_CAP_CSUM6)
305 ifp->if_capabilities |= IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6;
306
307 ifq_init_maxlen(&ifp->if_snd, XNF_TX_DESC - 1);
308
309 ifmedia_init(&sc->sc_media, IFM_IMASK, xnf_media_change,
310 xnf_media_status);
311 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
312 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_MANUAL);
313
314 if_attach(ifp);
315 ether_ifattach(ifp);
316
317 /* Kick out emulated em's and re's */
318 xen_unplug_emulated(parent, XEN_UNPLUG_NIC);
319 }
320
321 int
xnf_detach(struct device * self,int flags)322 xnf_detach(struct device *self, int flags)
323 {
324 struct xnf_softc *sc = (struct xnf_softc *)self;
325 struct ifnet *ifp = &sc->sc_ac.ac_if;
326
327 xnf_stop(sc);
328
329 ether_ifdetach(ifp);
330 if_detach(ifp);
331
332 xen_intr_disestablish(sc->sc_xih);
333
334 if (sc->sc_tx_ring)
335 xnf_tx_ring_destroy(sc);
336 if (sc->sc_rx_ring)
337 xnf_rx_ring_destroy(sc);
338
339 return (0);
340 }
341
342 static int
nibble(int ch)343 nibble(int ch)
344 {
345 if (ch >= '0' && ch <= '9')
346 return (ch - '0');
347 if (ch >= 'A' && ch <= 'F')
348 return (10 + ch - 'A');
349 if (ch >= 'a' && ch <= 'f')
350 return (10 + ch - 'a');
351 return (-1);
352 }
353
354 int
xnf_lladdr(struct xnf_softc * sc)355 xnf_lladdr(struct xnf_softc *sc)
356 {
357 char enaddr[ETHER_ADDR_LEN];
358 char mac[32];
359 int i, j, lo, hi;
360
361 if (xs_getprop(sc->sc_parent, sc->sc_backend, "mac", mac, sizeof(mac)))
362 return (-1);
363
364 for (i = 0, j = 0; j < ETHER_ADDR_LEN; i += 3, j++) {
365 if ((hi = nibble(mac[i])) == -1 ||
366 (lo = nibble(mac[i+1])) == -1)
367 return (-1);
368 enaddr[j] = hi << 4 | lo;
369 }
370
371 memcpy(sc->sc_ac.ac_enaddr, enaddr, ETHER_ADDR_LEN);
372 return (0);
373 }
374
375 int
xnf_ioctl(struct ifnet * ifp,u_long command,caddr_t data)376 xnf_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
377 {
378 struct xnf_softc *sc = ifp->if_softc;
379 struct ifreq *ifr = (struct ifreq *)data;
380 int s, error = 0;
381
382 s = splnet();
383
384 switch (command) {
385 case SIOCSIFADDR:
386 ifp->if_flags |= IFF_UP;
387 if (!(ifp->if_flags & IFF_RUNNING))
388 xnf_init(sc);
389 break;
390 case SIOCSIFFLAGS:
391 if (ifp->if_flags & IFF_UP) {
392 if (ifp->if_flags & IFF_RUNNING)
393 error = ENETRESET;
394 else
395 xnf_init(sc);
396 } else {
397 if (ifp->if_flags & IFF_RUNNING)
398 xnf_stop(sc);
399 }
400 break;
401 case SIOCGIFMEDIA:
402 case SIOCSIFMEDIA:
403 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command);
404 break;
405 default:
406 error = ether_ioctl(ifp, &sc->sc_ac, command, data);
407 break;
408 }
409
410 if (error == ENETRESET) {
411 if (ifp->if_flags & IFF_RUNNING)
412 xnf_iff(sc);
413 error = 0;
414 }
415
416 splx(s);
417
418 return (error);
419 }
420
421 int
xnf_media_change(struct ifnet * ifp)422 xnf_media_change(struct ifnet *ifp)
423 {
424 return (0);
425 }
426
427 void
xnf_media_status(struct ifnet * ifp,struct ifmediareq * ifmr)428 xnf_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
429 {
430 ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID;
431 ifmr->ifm_active = IFM_ETHER | IFM_MANUAL;
432 }
433
434 int
xnf_iff(struct xnf_softc * sc)435 xnf_iff(struct xnf_softc *sc)
436 {
437 return (0);
438 }
439
440 void
xnf_init(struct xnf_softc * sc)441 xnf_init(struct xnf_softc *sc)
442 {
443 struct ifnet *ifp = &sc->sc_ac.ac_if;
444
445 xnf_stop(sc);
446
447 xnf_iff(sc);
448
449 xnf_rx_ring_fill(sc);
450
451 if (xen_intr_unmask(sc->sc_xih)) {
452 printf("%s: failed to enable interrupts\n", ifp->if_xname);
453 xnf_stop(sc);
454 return;
455 }
456
457 ifp->if_flags |= IFF_RUNNING;
458 ifq_clr_oactive(&ifp->if_snd);
459 }
460
461 void
xnf_stop(struct xnf_softc * sc)462 xnf_stop(struct xnf_softc *sc)
463 {
464 struct ifnet *ifp = &sc->sc_ac.ac_if;
465
466 ifp->if_flags &= ~IFF_RUNNING;
467
468 xen_intr_mask(sc->sc_xih);
469
470 ifp->if_timer = 0;
471
472 ifq_barrier(&ifp->if_snd);
473 xen_intr_barrier(sc->sc_xih);
474
475 ifq_clr_oactive(&ifp->if_snd);
476
477 if (sc->sc_tx_ring)
478 xnf_tx_ring_drain(sc);
479 if (sc->sc_rx_ring)
480 xnf_rx_ring_drain(sc);
481 }
482
483 void
xnf_start(struct ifqueue * ifq)484 xnf_start(struct ifqueue *ifq)
485 {
486 struct ifnet *ifp = ifq->ifq_if;
487 struct xnf_softc *sc = ifp->if_softc;
488 struct xnf_tx_ring *txr = sc->sc_tx_ring;
489 struct mbuf *m;
490 int pkts = 0;
491 uint32_t prod, oprod;
492
493 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
494 BUS_DMASYNC_POSTREAD);
495
496 prod = oprod = txr->txr_prod;
497
498 for (;;) {
499 if (((XNF_TX_DESC - (prod - sc->sc_tx_cons)) <
500 sc->sc_tx_frags) || !sc->sc_tx_avail) {
501 /* transient */
502 ifq_set_oactive(ifq);
503 break;
504 }
505
506 m = ifq_dequeue(ifq);
507 if (m == NULL)
508 break;
509
510 if (xnf_encap(sc, m, &prod)) {
511 /* the chain is too large */
512 ifp->if_oerrors++;
513 m_freem(m);
514 continue;
515 }
516
517 #if NBPFILTER > 0
518 if (ifp->if_bpf)
519 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
520 #endif
521 pkts++;
522 }
523 if (pkts > 0) {
524 txr->txr_prod = prod;
525 if (txr->txr_cons_event <= txr->txr_cons)
526 txr->txr_cons_event = txr->txr_cons +
527 ((txr->txr_prod - txr->txr_cons) >> 1) + 1;
528 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
529 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
530 if (prod - txr->txr_prod_event < prod - oprod)
531 xen_intr_signal(sc->sc_xih);
532 ifp->if_timer = 5;
533 }
534 }
535
536 static inline int
xnf_fragcount(struct mbuf * m_head)537 xnf_fragcount(struct mbuf *m_head)
538 {
539 struct mbuf *m;
540 vaddr_t va, va0;
541 int n = 0;
542
543 for (m = m_head; m != NULL; m = m->m_next) {
544 if (m->m_len == 0)
545 continue;
546 /* start of the buffer */
547 for (va0 = va = mtod(m, vaddr_t);
548 /* does the buffer end on this page? */
549 va + (PAGE_SIZE - (va & PAGE_MASK)) < va0 + m->m_len;
550 /* move on to the next page */
551 va += PAGE_SIZE - (va & PAGE_MASK))
552 n++;
553 n++;
554 }
555 return (n);
556 }
557
558 int
xnf_encap(struct xnf_softc * sc,struct mbuf * m_head,uint32_t * prod)559 xnf_encap(struct xnf_softc *sc, struct mbuf *m_head, uint32_t *prod)
560 {
561 struct xnf_tx_ring *txr = sc->sc_tx_ring;
562 struct xnf_tx_buf *txb = NULL;
563 union xnf_tx_desc *txd = NULL;
564 struct mbuf *m, **next;
565 uint32_t oprod = *prod;
566 uint16_t id;
567 int i, flags, n, used = 0;
568
569 if ((xnf_fragcount(m_head) > sc->sc_tx_frags) &&
570 m_defrag(m_head, M_DONTWAIT))
571 return (ENOBUFS);
572
573 flags = (sc->sc_domid << 16) | BUS_DMA_WRITE | BUS_DMA_NOWAIT;
574
575 next = &m_head->m_next;
576 for (m = m_head; m != NULL; m = *next) {
577 /* Unlink and free zero length nodes. */
578 if (m->m_len == 0) {
579 *next = m->m_next;
580 m_free(m);
581 continue;
582 }
583 next = &m->m_next;
584
585 i = *prod & (XNF_TX_DESC - 1);
586 txd = &txr->txr_desc[i];
587
588 /*
589 * Find an unused TX buffer. We're guaranteed to find one
590 * because xnf_encap cannot be called with sc_tx_avail == 0.
591 */
592 do {
593 id = sc->sc_tx_next++ & (XNF_TX_DESC - 1);
594 txb = &sc->sc_tx_buf[id];
595 } while (txb->txb_mbuf);
596
597 if (bus_dmamap_load(sc->sc_dmat, txb->txb_dmap, m->m_data,
598 m->m_len, NULL, flags)) {
599 DPRINTF("%s: failed to load %u bytes @%lu\n",
600 sc->sc_dev.dv_xname, m->m_len,
601 mtod(m, vaddr_t) & PAGE_MASK);
602 goto unroll;
603 }
604
605 for (n = 0; n < txb->txb_dmap->dm_nsegs; n++) {
606 i = *prod & (XNF_TX_DESC - 1);
607 txd = &txr->txr_desc[i];
608
609 if (m == m_head && n == 0) {
610 if (m->m_pkthdr.csum_flags &
611 (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT))
612 txd->txd_req.txq_flags =
613 XNF_TXF_CSUM_BLANK |
614 XNF_TXF_CSUM_VALID;
615 txd->txd_req.txq_size = m->m_pkthdr.len;
616 } else {
617 txd->txd_req.txq_size =
618 txb->txb_dmap->dm_segs[n].ds_len;
619 }
620 txd->txd_req.txq_ref =
621 txb->txb_dmap->dm_segs[n].ds_addr;
622 if (n == 0)
623 txd->txd_req.txq_offset =
624 mtod(m, vaddr_t) & PAGE_MASK;
625 /* The chunk flag will be removed from the last one */
626 txd->txd_req.txq_flags |= XNF_TXF_CHUNK;
627 txd->txd_req.txq_id = id;
628
629 txb->txb_ndesc++;
630 (*prod)++;
631 }
632
633 txb->txb_mbuf = m;
634 used++;
635 }
636
637 /* Clear the chunk flag from the last segment */
638 txd->txd_req.txq_flags &= ~XNF_TXF_CHUNK;
639 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
640 BUS_DMASYNC_PREWRITE);
641
642 KASSERT(sc->sc_tx_avail > used);
643 atomic_sub_int(&sc->sc_tx_avail, used);
644
645 return (0);
646
647 unroll:
648 DPRINTF("%s: unrolling from %u to %u\n", sc->sc_dev.dv_xname,
649 *prod, oprod);
650 for (; *prod != oprod; (*prod)--) {
651 i = (*prod - 1) & (XNF_TX_DESC - 1);
652 txd = &txr->txr_desc[i];
653 id = txd->txd_req.txq_id;
654 txb = &sc->sc_tx_buf[id];
655
656 memset(txd, 0, sizeof(*txd));
657
658 if (txb->txb_mbuf) {
659 bus_dmamap_sync(sc->sc_dmat, txb->txb_dmap, 0, 0,
660 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
661 bus_dmamap_unload(sc->sc_dmat, txb->txb_dmap);
662
663 txb->txb_mbuf = NULL;
664 txb->txb_ndesc = 0;
665 }
666 }
667 return (ENOBUFS);
668 }
669
670 void
xnf_intr(void * arg)671 xnf_intr(void *arg)
672 {
673 struct xnf_softc *sc = arg;
674 struct ifnet *ifp = &sc->sc_ac.ac_if;
675
676 if (ifp->if_flags & IFF_RUNNING) {
677 xnf_txeof(sc);
678 xnf_rxeof(sc);
679 }
680 }
681
682 void
xnf_watchdog(struct ifnet * ifp)683 xnf_watchdog(struct ifnet *ifp)
684 {
685 struct xnf_softc *sc = ifp->if_softc;
686 struct xnf_tx_ring *txr = sc->sc_tx_ring;
687
688 printf("%s: tx stuck: prod %u cons %u,%u evt %u,%u\n",
689 ifp->if_xname, txr->txr_prod, txr->txr_cons, sc->sc_tx_cons,
690 txr->txr_prod_event, txr->txr_cons_event);
691 }
692
693 void
xnf_txeof(struct xnf_softc * sc)694 xnf_txeof(struct xnf_softc *sc)
695 {
696 struct ifnet *ifp = &sc->sc_ac.ac_if;
697 struct xnf_tx_ring *txr = sc->sc_tx_ring;
698 struct xnf_tx_buf *txb;
699 union xnf_tx_desc *txd;
700 uint done = 0;
701 uint32_t cons;
702 uint16_t id;
703 int i;
704
705 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
706 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
707
708 for (cons = sc->sc_tx_cons; cons != txr->txr_cons; cons++) {
709 i = cons & (XNF_TX_DESC - 1);
710 txd = &txr->txr_desc[i];
711 id = txd->txd_rsp.txp_id;
712 KASSERT(id < XNF_TX_DESC);
713 txb = &sc->sc_tx_buf[id];
714
715 KASSERT(txb->txb_ndesc > 0);
716 if (--txb->txb_ndesc == 0) {
717 bus_dmamap_sync(sc->sc_dmat, txb->txb_dmap, 0, 0,
718 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
719 bus_dmamap_unload(sc->sc_dmat, txb->txb_dmap);
720
721 m_free(txb->txb_mbuf);
722 txb->txb_mbuf = NULL;
723 done++;
724 }
725
726 memset(txd, 0, sizeof(*txd));
727 }
728
729 sc->sc_tx_cons = cons;
730 txr->txr_cons_event = sc->sc_tx_cons +
731 ((txr->txr_prod - sc->sc_tx_cons) >> 1) + 1;
732 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
733 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
734
735 atomic_add_int(&sc->sc_tx_avail, done);
736
737 if (sc->sc_tx_cons == txr->txr_prod)
738 ifp->if_timer = 0;
739 if (ifq_is_oactive(&ifp->if_snd))
740 ifq_restart(&ifp->if_snd);
741 }
742
743 void
xnf_rxeof(struct xnf_softc * sc)744 xnf_rxeof(struct xnf_softc *sc)
745 {
746 struct ifnet *ifp = &sc->sc_ac.ac_if;
747 struct xnf_rx_ring *rxr = sc->sc_rx_ring;
748 union xnf_rx_desc *rxd;
749 struct mbuf_list ml = MBUF_LIST_INITIALIZER();
750 struct mbuf *fmp = sc->sc_rx_cbuf[0];
751 struct mbuf *lmp = sc->sc_rx_cbuf[1];
752 struct mbuf *m;
753 bus_dmamap_t dmap;
754 uint32_t cons;
755 uint16_t id;
756 int i, flags, len, offset;
757
758 bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_rmap, 0, 0,
759 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
760
761 for (cons = sc->sc_rx_cons; cons != rxr->rxr_cons; cons++) {
762 i = cons & (XNF_RX_DESC - 1);
763 rxd = &rxr->rxr_desc[i];
764
765 id = rxd->rxd_rsp.rxp_id;
766 len = rxd->rxd_rsp.rxp_status;
767 flags = rxd->rxd_rsp.rxp_flags;
768 offset = rxd->rxd_rsp.rxp_offset;
769
770 KASSERT(id < XNF_RX_DESC);
771
772 dmap = sc->sc_rx_dmap[id];
773 bus_dmamap_sync(sc->sc_dmat, dmap, 0, 0,
774 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
775 bus_dmamap_unload(sc->sc_dmat, dmap);
776
777 m = sc->sc_rx_buf[id];
778 KASSERT(m != NULL);
779 sc->sc_rx_buf[id] = NULL;
780
781 if (flags & XNF_RXF_MGMT) {
782 printf("%s: management data present\n",
783 ifp->if_xname);
784 m_freem(m);
785 continue;
786 }
787
788 if (flags & XNF_RXF_CSUM_VALID)
789 m->m_pkthdr.csum_flags = M_TCP_CSUM_IN_OK |
790 M_UDP_CSUM_IN_OK;
791
792 if (len < 0 || (len + offset > PAGE_SIZE)) {
793 ifp->if_ierrors++;
794 m_freem(m);
795 continue;
796 }
797
798 m->m_len = len;
799 m->m_data += offset;
800
801 if (fmp == NULL) {
802 m->m_pkthdr.len = len;
803 fmp = m;
804 } else {
805 m->m_flags &= ~M_PKTHDR;
806 lmp->m_next = m;
807 fmp->m_pkthdr.len += m->m_len;
808 }
809 lmp = m;
810
811 if (flags & XNF_RXF_CHUNK) {
812 sc->sc_rx_cbuf[0] = fmp;
813 sc->sc_rx_cbuf[1] = lmp;
814 continue;
815 }
816
817 m = fmp;
818
819 ml_enqueue(&ml, m);
820 sc->sc_rx_cbuf[0] = sc->sc_rx_cbuf[1] = fmp = lmp = NULL;
821
822 memset(rxd, 0, sizeof(*rxd));
823 rxd->rxd_req.rxq_id = id;
824 }
825
826 sc->sc_rx_cons = cons;
827 rxr->rxr_cons_event = sc->sc_rx_cons + 1;
828 bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_rmap, 0, 0,
829 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
830
831 if_input(ifp, &ml);
832
833 if (xnf_rx_ring_fill(sc) || (sc->sc_rx_cons != rxr->rxr_cons))
834 xen_intr_schedule(sc->sc_xih);
835 }
836
837 int
xnf_rx_ring_fill(struct xnf_softc * sc)838 xnf_rx_ring_fill(struct xnf_softc *sc)
839 {
840 struct ifnet *ifp = &sc->sc_ac.ac_if;
841 struct xnf_rx_ring *rxr = sc->sc_rx_ring;
842 union xnf_rx_desc *rxd;
843 bus_dmamap_t dmap;
844 struct mbuf *m;
845 uint32_t cons, prod, oprod;
846 uint16_t id;
847 int i, flags, resched = 0;
848
849 cons = rxr->rxr_cons;
850 prod = oprod = rxr->rxr_prod;
851
852 while (prod - cons < XNF_RX_DESC) {
853 i = prod & (XNF_RX_DESC - 1);
854 rxd = &rxr->rxr_desc[i];
855
856 id = rxd->rxd_rsp.rxp_id;
857 KASSERT(id < XNF_RX_DESC);
858 if (sc->sc_rx_buf[id])
859 break;
860 m = MCLGETL(NULL, M_DONTWAIT, XNF_MCLEN);
861 if (m == NULL)
862 break;
863 m->m_len = m->m_pkthdr.len = XNF_MCLEN;
864 dmap = sc->sc_rx_dmap[id];
865 flags = (sc->sc_domid << 16) | BUS_DMA_READ | BUS_DMA_NOWAIT;
866 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmap, m, flags)) {
867 m_freem(m);
868 break;
869 }
870 sc->sc_rx_buf[id] = m;
871 rxd->rxd_req.rxq_ref = dmap->dm_segs[0].ds_addr;
872 bus_dmamap_sync(sc->sc_dmat, dmap, 0, 0, BUS_DMASYNC_PREWRITE);
873 prod++;
874 }
875
876 rxr->rxr_prod = prod;
877 bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_rmap, 0, 0,
878 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
879
880 if ((prod - cons < XNF_RX_MIN) && (ifp->if_flags & IFF_RUNNING))
881 resched = 1;
882 if (prod - rxr->rxr_prod_event < prod - oprod)
883 xen_intr_signal(sc->sc_xih);
884
885 return (resched);
886 }
887
888 int
xnf_rx_ring_create(struct xnf_softc * sc)889 xnf_rx_ring_create(struct xnf_softc *sc)
890 {
891 int i, flags, rsegs;
892
893 /* Allocate a page of memory for the ring */
894 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
895 &sc->sc_rx_seg, 1, &rsegs, BUS_DMA_ZERO | BUS_DMA_NOWAIT)) {
896 printf("%s: failed to allocate memory for the rx ring\n",
897 sc->sc_dev.dv_xname);
898 return (-1);
899 }
900 /* Map in the allocated memory into the ring structure */
901 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_rx_seg, 1, PAGE_SIZE,
902 (caddr_t *)(&sc->sc_rx_ring), BUS_DMA_NOWAIT)) {
903 printf("%s: failed to map memory for the rx ring\n",
904 sc->sc_dev.dv_xname);
905 goto errout;
906 }
907 /* Create a map to load the ring memory into */
908 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
909 BUS_DMA_NOWAIT, &sc->sc_rx_rmap)) {
910 printf("%s: failed to create a memory map for the rx ring\n",
911 sc->sc_dev.dv_xname);
912 goto errout;
913 }
914 /* Load the ring into the ring map to extract the PA */
915 flags = (sc->sc_domid << 16) | BUS_DMA_NOWAIT;
916 if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_rmap, sc->sc_rx_ring,
917 PAGE_SIZE, NULL, flags)) {
918 printf("%s: failed to load the rx ring map\n",
919 sc->sc_dev.dv_xname);
920 goto errout;
921 }
922 sc->sc_rx_ref = sc->sc_rx_rmap->dm_segs[0].ds_addr;
923
924 sc->sc_rx_ring->rxr_prod_event = sc->sc_rx_ring->rxr_cons_event = 1;
925
926 for (i = 0; i < XNF_RX_DESC; i++) {
927 if (bus_dmamap_create(sc->sc_dmat, XNF_MCLEN, 1, XNF_MCLEN,
928 PAGE_SIZE, BUS_DMA_NOWAIT, &sc->sc_rx_dmap[i])) {
929 printf("%s: failed to create a memory map for the"
930 " rx slot %d\n", sc->sc_dev.dv_xname, i);
931 goto errout;
932 }
933 sc->sc_rx_ring->rxr_desc[i].rxd_req.rxq_id = i;
934 }
935
936 return (0);
937
938 errout:
939 xnf_rx_ring_destroy(sc);
940 return (-1);
941 }
942
943 void
xnf_rx_ring_drain(struct xnf_softc * sc)944 xnf_rx_ring_drain(struct xnf_softc *sc)
945 {
946 struct xnf_rx_ring *rxr = sc->sc_rx_ring;
947
948 if (sc->sc_rx_cons != rxr->rxr_cons)
949 xnf_rxeof(sc);
950 }
951
952 void
xnf_rx_ring_destroy(struct xnf_softc * sc)953 xnf_rx_ring_destroy(struct xnf_softc *sc)
954 {
955 int i;
956
957 for (i = 0; i < XNF_RX_DESC; i++) {
958 if (sc->sc_rx_buf[i] == NULL)
959 continue;
960 bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_dmap[i], 0, 0,
961 BUS_DMASYNC_POSTREAD);
962 bus_dmamap_unload(sc->sc_dmat, sc->sc_rx_dmap[i]);
963 m_freem(sc->sc_rx_buf[i]);
964 sc->sc_rx_buf[i] = NULL;
965 }
966
967 for (i = 0; i < XNF_RX_DESC; i++) {
968 if (sc->sc_rx_dmap[i] == NULL)
969 continue;
970 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmap[i]);
971 sc->sc_rx_dmap[i] = NULL;
972 }
973 if (sc->sc_rx_rmap) {
974 bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_rmap, 0, 0,
975 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
976 bus_dmamap_unload(sc->sc_dmat, sc->sc_rx_rmap);
977 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_rmap);
978 }
979 if (sc->sc_rx_ring) {
980 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_rx_ring,
981 PAGE_SIZE);
982 bus_dmamem_free(sc->sc_dmat, &sc->sc_rx_seg, 1);
983 }
984 sc->sc_rx_ring = NULL;
985 sc->sc_rx_rmap = NULL;
986 sc->sc_rx_cons = 0;
987 }
988
989 int
xnf_tx_ring_create(struct xnf_softc * sc)990 xnf_tx_ring_create(struct xnf_softc *sc)
991 {
992 struct ifnet *ifp = &sc->sc_ac.ac_if;
993 int i, flags, nsegs, rsegs;
994 bus_size_t segsz;
995
996 sc->sc_tx_frags = sc->sc_caps & XNF_CAP_SG ? XNF_TX_FRAG : 1;
997
998 /* Allocate a page of memory for the ring */
999 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
1000 &sc->sc_tx_seg, 1, &rsegs, BUS_DMA_ZERO | BUS_DMA_NOWAIT)) {
1001 printf("%s: failed to allocate memory for the tx ring\n",
1002 sc->sc_dev.dv_xname);
1003 return (-1);
1004 }
1005 /* Map in the allocated memory into the ring structure */
1006 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_tx_seg, 1, PAGE_SIZE,
1007 (caddr_t *)&sc->sc_tx_ring, BUS_DMA_NOWAIT)) {
1008 printf("%s: failed to map memory for the tx ring\n",
1009 sc->sc_dev.dv_xname);
1010 goto errout;
1011 }
1012 /* Create a map to load the ring memory into */
1013 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1014 BUS_DMA_NOWAIT, &sc->sc_tx_rmap)) {
1015 printf("%s: failed to create a memory map for the tx ring\n",
1016 sc->sc_dev.dv_xname);
1017 goto errout;
1018 }
1019 /* Load the ring into the ring map to extract the PA */
1020 flags = (sc->sc_domid << 16) | BUS_DMA_NOWAIT;
1021 if (bus_dmamap_load(sc->sc_dmat, sc->sc_tx_rmap, sc->sc_tx_ring,
1022 PAGE_SIZE, NULL, flags)) {
1023 printf("%s: failed to load the tx ring map\n",
1024 sc->sc_dev.dv_xname);
1025 goto errout;
1026 }
1027 sc->sc_tx_ref = sc->sc_tx_rmap->dm_segs[0].ds_addr;
1028
1029 sc->sc_tx_ring->txr_prod_event = sc->sc_tx_ring->txr_cons_event = 1;
1030
1031 if (sc->sc_caps & XNF_CAP_SG) {
1032 nsegs = roundup(ifp->if_hardmtu, XNF_MCLEN) / XNF_MCLEN + 1;
1033 segsz = nsegs * XNF_MCLEN;
1034 } else {
1035 nsegs = 1;
1036 segsz = XNF_MCLEN;
1037 }
1038 for (i = 0; i < XNF_TX_DESC; i++) {
1039 if (bus_dmamap_create(sc->sc_dmat, segsz, nsegs, XNF_MCLEN,
1040 PAGE_SIZE, BUS_DMA_NOWAIT, &sc->sc_tx_buf[i].txb_dmap)) {
1041 printf("%s: failed to create a memory map for the"
1042 " tx slot %d\n", sc->sc_dev.dv_xname, i);
1043 goto errout;
1044 }
1045 }
1046
1047 sc->sc_tx_avail = XNF_TX_DESC;
1048 sc->sc_tx_next = 0;
1049
1050 return (0);
1051
1052 errout:
1053 xnf_tx_ring_destroy(sc);
1054 return (-1);
1055 }
1056
1057 void
xnf_tx_ring_drain(struct xnf_softc * sc)1058 xnf_tx_ring_drain(struct xnf_softc *sc)
1059 {
1060 struct xnf_tx_ring *txr = sc->sc_tx_ring;
1061
1062 if (sc->sc_tx_cons != txr->txr_cons)
1063 xnf_txeof(sc);
1064 }
1065
1066 void
xnf_tx_ring_destroy(struct xnf_softc * sc)1067 xnf_tx_ring_destroy(struct xnf_softc *sc)
1068 {
1069 int i;
1070
1071 for (i = 0; i < XNF_TX_DESC; i++) {
1072 if (sc->sc_tx_buf[i].txb_dmap == NULL)
1073 continue;
1074 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_buf[i].txb_dmap, 0, 0,
1075 BUS_DMASYNC_POSTWRITE);
1076 bus_dmamap_unload(sc->sc_dmat, sc->sc_tx_buf[i].txb_dmap);
1077 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_buf[i].txb_dmap);
1078 sc->sc_tx_buf[i].txb_dmap = NULL;
1079 if (sc->sc_tx_buf[i].txb_mbuf == NULL)
1080 continue;
1081 m_free(sc->sc_tx_buf[i].txb_mbuf);
1082 sc->sc_tx_buf[i].txb_mbuf = NULL;
1083 sc->sc_tx_buf[i].txb_ndesc = 0;
1084 }
1085 if (sc->sc_tx_rmap) {
1086 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_rmap, 0, 0,
1087 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1088 bus_dmamap_unload(sc->sc_dmat, sc->sc_tx_rmap);
1089 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_rmap);
1090 }
1091 if (sc->sc_tx_ring) {
1092 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_tx_ring,
1093 PAGE_SIZE);
1094 bus_dmamem_free(sc->sc_dmat, &sc->sc_tx_seg, 1);
1095 }
1096 sc->sc_tx_ring = NULL;
1097 sc->sc_tx_rmap = NULL;
1098 sc->sc_tx_avail = XNF_TX_DESC;
1099 sc->sc_tx_next = 0;
1100 }
1101
1102 int
xnf_capabilities(struct xnf_softc * sc)1103 xnf_capabilities(struct xnf_softc *sc)
1104 {
1105 unsigned long long res;
1106 const char *prop;
1107 int error;
1108
1109 /* Query scatter-gather capability */
1110 prop = "feature-sg";
1111 if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1112 && error != ENOENT)
1113 goto errout;
1114 if (error == 0 && res == 1)
1115 sc->sc_caps |= XNF_CAP_SG;
1116
1117 #if 0
1118 /* Query IPv4 checksum offloading capability, enabled by default */
1119 sc->sc_caps |= XNF_CAP_CSUM4;
1120 prop = "feature-no-csum-offload";
1121 if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1122 && error != ENOENT)
1123 goto errout;
1124 if (error == 0 && res == 1)
1125 sc->sc_caps &= ~XNF_CAP_CSUM4;
1126
1127 /* Query IPv6 checksum offloading capability */
1128 prop = "feature-ipv6-csum-offload";
1129 if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1130 && error != ENOENT)
1131 goto errout;
1132 if (error == 0 && res == 1)
1133 sc->sc_caps |= XNF_CAP_CSUM6;
1134 #endif
1135
1136 /* Query multicast traffic control capability */
1137 prop = "feature-multicast-control";
1138 if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1139 && error != ENOENT)
1140 goto errout;
1141 if (error == 0 && res == 1)
1142 sc->sc_caps |= XNF_CAP_MCAST;
1143
1144 /* Query split Rx/Tx event channel capability */
1145 prop = "feature-split-event-channels";
1146 if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1147 && error != ENOENT)
1148 goto errout;
1149 if (error == 0 && res == 1)
1150 sc->sc_caps |= XNF_CAP_SPLIT;
1151
1152 /* Query multiqueue capability */
1153 prop = "multi-queue-max-queues";
1154 if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1155 && error != ENOENT)
1156 goto errout;
1157 if (error == 0)
1158 sc->sc_caps |= XNF_CAP_MULTIQ;
1159
1160 DPRINTF("%s: capabilities %b\n", sc->sc_dev.dv_xname, sc->sc_caps,
1161 "\20\006MULTIQ\005SPLIT\004MCAST\003CSUM6\002CSUM4\001SG");
1162 return (0);
1163
1164 errout:
1165 printf("%s: failed to read \"%s\" property\n", sc->sc_dev.dv_xname,
1166 prop);
1167 return (-1);
1168 }
1169
1170 int
xnf_init_backend(struct xnf_softc * sc)1171 xnf_init_backend(struct xnf_softc *sc)
1172 {
1173 const char *prop;
1174
1175 /* Plumb the Rx ring */
1176 prop = "rx-ring-ref";
1177 if (xs_setnum(sc->sc_parent, sc->sc_node, prop, sc->sc_rx_ref))
1178 goto errout;
1179 /* Enable "copy" mode */
1180 prop = "request-rx-copy";
1181 if (xs_setnum(sc->sc_parent, sc->sc_node, prop, 1))
1182 goto errout;
1183 /* Enable notify mode */
1184 prop = "feature-rx-notify";
1185 if (xs_setnum(sc->sc_parent, sc->sc_node, prop, 1))
1186 goto errout;
1187
1188 /* Plumb the Tx ring */
1189 prop = "tx-ring-ref";
1190 if (xs_setnum(sc->sc_parent, sc->sc_node, prop, sc->sc_tx_ref))
1191 goto errout;
1192 /* Enable scatter-gather mode */
1193 if (sc->sc_tx_frags > 1) {
1194 prop = "feature-sg";
1195 if (xs_setnum(sc->sc_parent, sc->sc_node, prop, 1))
1196 goto errout;
1197 }
1198
1199 /* Disable IPv4 checksum offloading */
1200 if (!(sc->sc_caps & XNF_CAP_CSUM4)) {
1201 prop = "feature-no-csum-offload";
1202 if (xs_setnum(sc->sc_parent, sc->sc_node, prop, 1))
1203 goto errout;
1204 }
1205
1206 /* Enable IPv6 checksum offloading */
1207 if (sc->sc_caps & XNF_CAP_CSUM6) {
1208 prop = "feature-ipv6-csum-offload";
1209 if (xs_setnum(sc->sc_parent, sc->sc_node, prop, 1))
1210 goto errout;
1211 }
1212
1213 /* Plumb the event channel port */
1214 prop = "event-channel";
1215 if (xs_setnum(sc->sc_parent, sc->sc_node, prop, sc->sc_xih))
1216 goto errout;
1217
1218 /* Connect the device */
1219 prop = "state";
1220 if (xs_setprop(sc->sc_parent, sc->sc_node, prop, XEN_STATE_CONNECTED,
1221 strlen(XEN_STATE_CONNECTED)))
1222 goto errout;
1223
1224 return (0);
1225
1226 errout:
1227 printf("%s: failed to set \"%s\" property\n", sc->sc_dev.dv_xname, prop);
1228 return (-1);
1229 }
1230