1 /*- 2 * Copyright (c) 2004-2006 Kip Macy 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/sockio.h> 34 #include <sys/mbuf.h> 35 #include <sys/malloc.h> 36 #include <sys/module.h> 37 #include <sys/kernel.h> 38 #include <sys/socket.h> 39 #include <sys/sysctl.h> 40 #include <sys/queue.h> 41 #include <sys/lock.h> 42 #include <sys/sx.h> 43 44 #include <net/if.h> 45 #include <net/if_arp.h> 46 #include <net/ethernet.h> 47 #include <net/if_dl.h> 48 #include <net/if_media.h> 49 50 #include <net/bpf.h> 51 52 #include <net/if_types.h> 53 #include <net/if.h> 54 55 #include <netinet/in_systm.h> 56 #include <netinet/in.h> 57 #include <netinet/ip.h> 58 #include <netinet/if_ether.h> 59 #if __FreeBSD_version >= 700000 60 #include <netinet/tcp.h> 61 #include <netinet/tcp_lro.h> 62 #endif 63 64 #include <vm/vm.h> 65 #include <vm/pmap.h> 66 67 #include <machine/clock.h> /* for DELAY */ 68 #include <machine/bus.h> 69 #include <machine/resource.h> 70 #include <machine/frame.h> 71 #include <machine/vmparam.h> 72 73 #include <sys/bus.h> 74 #include <sys/rman.h> 75 76 #include <machine/intr_machdep.h> 77 78 #include <machine/xen/xen-os.h> 79 #include <machine/xen/xenfunc.h> 80 #include <xen/hypervisor.h> 81 #include <xen/xen_intr.h> 82 #include <xen/evtchn.h> 83 #include <xen/gnttab.h> 84 #include <xen/interface/memory.h> 85 #include <xen/interface/io/netif.h> 86 #include <xen/xenbus/xenbusvar.h> 87 88 #include <dev/xen/netfront/mbufq.h> 89 90 #include "xenbus_if.h" 91 92 #define XN_CSUM_FEATURES (CSUM_TCP | CSUM_UDP | CSUM_TSO) 93 94 #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE) 95 #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE) 96 97 #if __FreeBSD_version >= 700000 98 /* 99 * Should the driver do LRO on the RX end 100 * this can be toggled on the fly, but the 101 * interface must be reset (down/up) for it 102 * to take effect. 103 */ 104 static int xn_enable_lro = 1; 105 TUNABLE_INT("hw.xn.enable_lro", &xn_enable_lro); 106 #else 107 108 #define IFCAP_TSO4 0 109 #define CSUM_TSO 0 110 111 #endif 112 113 #ifdef CONFIG_XEN 114 static int MODPARM_rx_copy = 0; 115 module_param_named(rx_copy, MODPARM_rx_copy, bool, 0); 116 MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)"); 117 static int MODPARM_rx_flip = 0; 118 module_param_named(rx_flip, MODPARM_rx_flip, bool, 0); 119 MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)"); 120 #else 121 static const int MODPARM_rx_copy = 1; 122 static const int MODPARM_rx_flip = 0; 123 #endif 124 125 /** 126 * \brief The maximum allowed data fragments in a single transmit 127 * request. 128 * 129 * This limit is imposed by the backend driver. We assume here that 130 * we are dealing with a Linux driver domain and have set our limit 131 * to mirror the Linux MAX_SKB_FRAGS constant. 132 */ 133 #define MAX_TX_REQ_FRAGS (65536 / PAGE_SIZE + 2) 134 135 #define RX_COPY_THRESHOLD 256 136 137 #define net_ratelimit() 0 138 139 struct netfront_info; 140 struct netfront_rx_info; 141 142 static void xn_txeof(struct netfront_info *); 143 static void xn_rxeof(struct netfront_info *); 144 static void network_alloc_rx_buffers(struct netfront_info *); 145 146 static void xn_tick_locked(struct netfront_info *); 147 static void xn_tick(void *); 148 149 static void xn_intr(void *); 150 static inline int xn_count_frags(struct mbuf *m); 151 static int xn_assemble_tx_request(struct netfront_info *sc, 152 struct mbuf *m_head); 153 static void xn_start_locked(struct ifnet *); 154 static void xn_start(struct ifnet *); 155 static int xn_ioctl(struct ifnet *, u_long, caddr_t); 156 static void xn_ifinit_locked(struct netfront_info *); 157 static void xn_ifinit(void *); 158 static void xn_stop(struct netfront_info *); 159 #ifdef notyet 160 static void xn_watchdog(struct ifnet *); 161 #endif 162 163 static void show_device(struct netfront_info *sc); 164 #ifdef notyet 165 static void netfront_closing(device_t dev); 166 #endif 167 static void netif_free(struct netfront_info *info); 168 static int netfront_detach(device_t dev); 169 170 static int talk_to_backend(device_t dev, struct netfront_info *info); 171 static int create_netdev(device_t dev); 172 static void netif_disconnect_backend(struct netfront_info *info); 173 static int setup_device(device_t dev, struct netfront_info *info); 174 static void end_access(int ref, void *page); 175 176 static int xn_ifmedia_upd(struct ifnet *ifp); 177 static void xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); 178 179 /* Xenolinux helper functions */ 180 int network_connect(struct netfront_info *); 181 182 static void xn_free_rx_ring(struct netfront_info *); 183 184 static void xn_free_tx_ring(struct netfront_info *); 185 186 static int xennet_get_responses(struct netfront_info *np, 187 struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons, 188 struct mbuf **list, int *pages_flipped_p); 189 190 #define virt_to_mfn(x) (vtomach(x) >> PAGE_SHIFT) 191 192 #define INVALID_P2M_ENTRY (~0UL) 193 194 /* 195 * Mbuf pointers. We need these to keep track of the virtual addresses 196 * of our mbuf chains since we can only convert from virtual to physical, 197 * not the other way around. The size must track the free index arrays. 198 */ 199 struct xn_chain_data { 200 struct mbuf *xn_tx_chain[NET_TX_RING_SIZE+1]; 201 int xn_tx_chain_cnt; 202 struct mbuf *xn_rx_chain[NET_RX_RING_SIZE+1]; 203 }; 204 205 #define NUM_ELEMENTS(x) (sizeof(x)/sizeof(*x)) 206 207 struct net_device_stats 208 { 209 u_long rx_packets; /* total packets received */ 210 u_long tx_packets; /* total packets transmitted */ 211 u_long rx_bytes; /* total bytes received */ 212 u_long tx_bytes; /* total bytes transmitted */ 213 u_long rx_errors; /* bad packets received */ 214 u_long tx_errors; /* packet transmit problems */ 215 u_long rx_dropped; /* no space in linux buffers */ 216 u_long tx_dropped; /* no space available in linux */ 217 u_long multicast; /* multicast packets received */ 218 u_long collisions; 219 220 /* detailed rx_errors: */ 221 u_long rx_length_errors; 222 u_long rx_over_errors; /* receiver ring buff overflow */ 223 u_long rx_crc_errors; /* recved pkt with crc error */ 224 u_long rx_frame_errors; /* recv'd frame alignment error */ 225 u_long rx_fifo_errors; /* recv'r fifo overrun */ 226 u_long rx_missed_errors; /* receiver missed packet */ 227 228 /* detailed tx_errors */ 229 u_long tx_aborted_errors; 230 u_long tx_carrier_errors; 231 u_long tx_fifo_errors; 232 u_long tx_heartbeat_errors; 233 u_long tx_window_errors; 234 235 /* for cslip etc */ 236 u_long rx_compressed; 237 u_long tx_compressed; 238 }; 239 240 struct netfront_info { 241 242 struct ifnet *xn_ifp; 243 #if __FreeBSD_version >= 700000 244 struct lro_ctrl xn_lro; 245 #endif 246 247 struct net_device_stats stats; 248 u_int tx_full; 249 250 netif_tx_front_ring_t tx; 251 netif_rx_front_ring_t rx; 252 253 struct mtx tx_lock; 254 struct mtx rx_lock; 255 struct mtx sc_lock; 256 257 u_int handle; 258 u_int irq; 259 u_int copying_receiver; 260 u_int carrier; 261 262 /* Receive-ring batched refills. */ 263 #define RX_MIN_TARGET 32 264 #define RX_MAX_TARGET NET_RX_RING_SIZE 265 int rx_min_target; 266 int rx_max_target; 267 int rx_target; 268 269 grant_ref_t gref_tx_head; 270 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1]; 271 grant_ref_t gref_rx_head; 272 grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1]; 273 274 device_t xbdev; 275 int tx_ring_ref; 276 int rx_ring_ref; 277 uint8_t mac[ETHER_ADDR_LEN]; 278 struct xn_chain_data xn_cdata; /* mbufs */ 279 struct mbuf_head xn_rx_batch; /* head of the batch queue */ 280 281 int xn_if_flags; 282 struct callout xn_stat_ch; 283 284 u_long rx_pfn_array[NET_RX_RING_SIZE]; 285 multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1]; 286 mmu_update_t rx_mmu[NET_RX_RING_SIZE]; 287 struct ifmedia sc_media; 288 }; 289 290 #define rx_mbufs xn_cdata.xn_rx_chain 291 #define tx_mbufs xn_cdata.xn_tx_chain 292 293 #define XN_LOCK_INIT(_sc, _name) \ 294 mtx_init(&(_sc)->tx_lock, #_name"_tx", "network transmit lock", MTX_DEF); \ 295 mtx_init(&(_sc)->rx_lock, #_name"_rx", "network receive lock", MTX_DEF); \ 296 mtx_init(&(_sc)->sc_lock, #_name"_sc", "netfront softc lock", MTX_DEF) 297 298 #define XN_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_lock) 299 #define XN_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_lock) 300 301 #define XN_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_lock) 302 #define XN_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_lock) 303 304 #define XN_LOCK(_sc) mtx_lock(&(_sc)->sc_lock); 305 #define XN_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_lock); 306 307 #define XN_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_lock, MA_OWNED); 308 #define XN_RX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->rx_lock, MA_OWNED); 309 #define XN_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_lock, MA_OWNED); 310 #define XN_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_lock); \ 311 mtx_destroy(&(_sc)->tx_lock); \ 312 mtx_destroy(&(_sc)->sc_lock); 313 314 struct netfront_rx_info { 315 struct netif_rx_response rx; 316 struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; 317 }; 318 319 #define netfront_carrier_on(netif) ((netif)->carrier = 1) 320 #define netfront_carrier_off(netif) ((netif)->carrier = 0) 321 #define netfront_carrier_ok(netif) ((netif)->carrier) 322 323 /* Access macros for acquiring freeing slots in xn_free_{tx,rx}_idxs[]. */ 324 325 326 327 /* 328 * Access macros for acquiring freeing slots in tx_skbs[]. 329 */ 330 331 static inline void 332 add_id_to_freelist(struct mbuf **list, uintptr_t id) 333 { 334 KASSERT(id != 0, 335 ("%s: the head item (0) must always be free.", __func__)); 336 list[id] = list[0]; 337 list[0] = (struct mbuf *)id; 338 } 339 340 static inline unsigned short 341 get_id_from_freelist(struct mbuf **list) 342 { 343 uintptr_t id; 344 345 id = (uintptr_t)list[0]; 346 KASSERT(id != 0, 347 ("%s: the head item (0) must always remain free.", __func__)); 348 list[0] = list[id]; 349 return (id); 350 } 351 352 static inline int 353 xennet_rxidx(RING_IDX idx) 354 { 355 return idx & (NET_RX_RING_SIZE - 1); 356 } 357 358 static inline struct mbuf * 359 xennet_get_rx_mbuf(struct netfront_info *np, RING_IDX ri) 360 { 361 int i = xennet_rxidx(ri); 362 struct mbuf *m; 363 364 m = np->rx_mbufs[i]; 365 np->rx_mbufs[i] = NULL; 366 return (m); 367 } 368 369 static inline grant_ref_t 370 xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri) 371 { 372 int i = xennet_rxidx(ri); 373 grant_ref_t ref = np->grant_rx_ref[i]; 374 KASSERT(ref != GRANT_REF_INVALID, ("Invalid grant reference!\n")); 375 np->grant_rx_ref[i] = GRANT_REF_INVALID; 376 return ref; 377 } 378 379 #define IPRINTK(fmt, args...) \ 380 printf("[XEN] " fmt, ##args) 381 #ifdef INVARIANTS 382 #define WPRINTK(fmt, args...) \ 383 printf("[XEN] " fmt, ##args) 384 #else 385 #define WPRINTK(fmt, args...) 386 #endif 387 #ifdef DEBUG 388 #define DPRINTK(fmt, args...) \ 389 printf("[XEN] %s: " fmt, __func__, ##args) 390 #else 391 #define DPRINTK(fmt, args...) 392 #endif 393 394 /** 395 * Read the 'mac' node at the given device's node in the store, and parse that 396 * as colon-separated octets, placing result the given mac array. mac must be 397 * a preallocated array of length ETH_ALEN (as declared in linux/if_ether.h). 398 * Return 0 on success, or errno on error. 399 */ 400 static int 401 xen_net_read_mac(device_t dev, uint8_t mac[]) 402 { 403 int error, i; 404 char *s, *e, *macstr; 405 406 error = xs_read(XST_NIL, xenbus_get_node(dev), "mac", NULL, 407 (void **) &macstr); 408 if (error) 409 return (error); 410 411 s = macstr; 412 for (i = 0; i < ETHER_ADDR_LEN; i++) { 413 mac[i] = strtoul(s, &e, 16); 414 if (s == e || (e[0] != ':' && e[0] != 0)) { 415 free(macstr, M_XENBUS); 416 return (ENOENT); 417 } 418 s = &e[1]; 419 } 420 free(macstr, M_XENBUS); 421 return (0); 422 } 423 424 /** 425 * Entry point to this code when a new device is created. Allocate the basic 426 * structures and the ring buffers for communication with the backend, and 427 * inform the backend of the appropriate details for those. Switch to 428 * Connected state. 429 */ 430 static int 431 netfront_probe(device_t dev) 432 { 433 434 if (!strcmp(xenbus_get_type(dev), "vif")) { 435 device_set_desc(dev, "Virtual Network Interface"); 436 return (0); 437 } 438 439 return (ENXIO); 440 } 441 442 static int 443 netfront_attach(device_t dev) 444 { 445 int err; 446 447 err = create_netdev(dev); 448 if (err) { 449 xenbus_dev_fatal(dev, err, "creating netdev"); 450 return err; 451 } 452 453 #if __FreeBSD_version >= 700000 454 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 455 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 456 OID_AUTO, "enable_lro", CTLTYPE_INT|CTLFLAG_RW, 457 &xn_enable_lro, 0, "Large Receive Offload"); 458 #endif 459 460 return 0; 461 } 462 463 464 /** 465 * We are reconnecting to the backend, due to a suspend/resume, or a backend 466 * driver restart. We tear down our netif structure and recreate it, but 467 * leave the device-layer structures intact so that this is transparent to the 468 * rest of the kernel. 469 */ 470 static int 471 netfront_resume(device_t dev) 472 { 473 struct netfront_info *info = device_get_softc(dev); 474 475 netif_disconnect_backend(info); 476 return (0); 477 } 478 479 480 /* Common code used when first setting up, and when resuming. */ 481 static int 482 talk_to_backend(device_t dev, struct netfront_info *info) 483 { 484 const char *message; 485 struct xs_transaction xst; 486 const char *node = xenbus_get_node(dev); 487 int err; 488 489 err = xen_net_read_mac(dev, info->mac); 490 if (err) { 491 xenbus_dev_fatal(dev, err, "parsing %s/mac", node); 492 goto out; 493 } 494 495 /* Create shared ring, alloc event channel. */ 496 err = setup_device(dev, info); 497 if (err) 498 goto out; 499 500 again: 501 err = xs_transaction_start(&xst); 502 if (err) { 503 xenbus_dev_fatal(dev, err, "starting transaction"); 504 goto destroy_ring; 505 } 506 err = xs_printf(xst, node, "tx-ring-ref","%u", 507 info->tx_ring_ref); 508 if (err) { 509 message = "writing tx ring-ref"; 510 goto abort_transaction; 511 } 512 err = xs_printf(xst, node, "rx-ring-ref","%u", 513 info->rx_ring_ref); 514 if (err) { 515 message = "writing rx ring-ref"; 516 goto abort_transaction; 517 } 518 err = xs_printf(xst, node, 519 "event-channel", "%u", irq_to_evtchn_port(info->irq)); 520 if (err) { 521 message = "writing event-channel"; 522 goto abort_transaction; 523 } 524 err = xs_printf(xst, node, "request-rx-copy", "%u", 525 info->copying_receiver); 526 if (err) { 527 message = "writing request-rx-copy"; 528 goto abort_transaction; 529 } 530 err = xs_printf(xst, node, "feature-rx-notify", "%d", 1); 531 if (err) { 532 message = "writing feature-rx-notify"; 533 goto abort_transaction; 534 } 535 err = xs_printf(xst, node, "feature-sg", "%d", 1); 536 if (err) { 537 message = "writing feature-sg"; 538 goto abort_transaction; 539 } 540 #if __FreeBSD_version >= 700000 541 err = xs_printf(xst, node, "feature-gso-tcpv4", "%d", 1); 542 if (err) { 543 message = "writing feature-gso-tcpv4"; 544 goto abort_transaction; 545 } 546 #endif 547 548 err = xs_transaction_end(xst, 0); 549 if (err) { 550 if (err == EAGAIN) 551 goto again; 552 xenbus_dev_fatal(dev, err, "completing transaction"); 553 goto destroy_ring; 554 } 555 556 return 0; 557 558 abort_transaction: 559 xs_transaction_end(xst, 1); 560 xenbus_dev_fatal(dev, err, "%s", message); 561 destroy_ring: 562 netif_free(info); 563 out: 564 return err; 565 } 566 567 568 static int 569 setup_device(device_t dev, struct netfront_info *info) 570 { 571 netif_tx_sring_t *txs; 572 netif_rx_sring_t *rxs; 573 int error; 574 struct ifnet *ifp; 575 576 ifp = info->xn_ifp; 577 578 info->tx_ring_ref = GRANT_REF_INVALID; 579 info->rx_ring_ref = GRANT_REF_INVALID; 580 info->rx.sring = NULL; 581 info->tx.sring = NULL; 582 info->irq = 0; 583 584 txs = (netif_tx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO); 585 if (!txs) { 586 error = ENOMEM; 587 xenbus_dev_fatal(dev, error, "allocating tx ring page"); 588 goto fail; 589 } 590 SHARED_RING_INIT(txs); 591 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); 592 error = xenbus_grant_ring(dev, virt_to_mfn(txs), &info->tx_ring_ref); 593 if (error) 594 goto fail; 595 596 rxs = (netif_rx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO); 597 if (!rxs) { 598 error = ENOMEM; 599 xenbus_dev_fatal(dev, error, "allocating rx ring page"); 600 goto fail; 601 } 602 SHARED_RING_INIT(rxs); 603 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); 604 605 error = xenbus_grant_ring(dev, virt_to_mfn(rxs), &info->rx_ring_ref); 606 if (error) 607 goto fail; 608 609 error = bind_listening_port_to_irqhandler(xenbus_get_otherend_id(dev), 610 "xn", xn_intr, info, INTR_TYPE_NET | INTR_MPSAFE, &info->irq); 611 612 if (error) { 613 xenbus_dev_fatal(dev, error, 614 "bind_evtchn_to_irqhandler failed"); 615 goto fail; 616 } 617 618 show_device(info); 619 620 return (0); 621 622 fail: 623 netif_free(info); 624 return (error); 625 } 626 627 /** 628 * If this interface has an ipv4 address, send an arp for it. This 629 * helps to get the network going again after migrating hosts. 630 */ 631 static void 632 netfront_send_fake_arp(device_t dev, struct netfront_info *info) 633 { 634 struct ifnet *ifp; 635 struct ifaddr *ifa; 636 637 ifp = info->xn_ifp; 638 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 639 if (ifa->ifa_addr->sa_family == AF_INET) { 640 arp_ifinit(ifp, ifa); 641 } 642 } 643 } 644 645 /** 646 * Callback received when the backend's state changes. 647 */ 648 static int 649 netfront_backend_changed(device_t dev, XenbusState newstate) 650 { 651 struct netfront_info *sc = device_get_softc(dev); 652 653 DPRINTK("newstate=%d\n", newstate); 654 655 switch (newstate) { 656 case XenbusStateInitialising: 657 case XenbusStateInitialised: 658 case XenbusStateConnected: 659 case XenbusStateUnknown: 660 case XenbusStateClosed: 661 case XenbusStateReconfigured: 662 case XenbusStateReconfiguring: 663 break; 664 case XenbusStateInitWait: 665 if (xenbus_get_state(dev) != XenbusStateInitialising) 666 break; 667 if (network_connect(sc) != 0) 668 break; 669 xenbus_set_state(dev, XenbusStateConnected); 670 netfront_send_fake_arp(dev, sc); 671 break; 672 case XenbusStateClosing: 673 xenbus_set_state(dev, XenbusStateClosed); 674 break; 675 } 676 return (0); 677 } 678 679 static void 680 xn_free_rx_ring(struct netfront_info *sc) 681 { 682 #if 0 683 int i; 684 685 for (i = 0; i < NET_RX_RING_SIZE; i++) { 686 if (sc->xn_cdata.rx_mbufs[i] != NULL) { 687 m_freem(sc->rx_mbufs[i]); 688 sc->rx_mbufs[i] = NULL; 689 } 690 } 691 692 sc->rx.rsp_cons = 0; 693 sc->xn_rx_if->req_prod = 0; 694 sc->xn_rx_if->event = sc->rx.rsp_cons ; 695 #endif 696 } 697 698 static void 699 xn_free_tx_ring(struct netfront_info *sc) 700 { 701 #if 0 702 int i; 703 704 for (i = 0; i < NET_TX_RING_SIZE; i++) { 705 if (sc->tx_mbufs[i] != NULL) { 706 m_freem(sc->tx_mbufs[i]); 707 sc->xn_cdata.xn_tx_chain[i] = NULL; 708 } 709 } 710 711 return; 712 #endif 713 } 714 715 /** 716 * \brief Verify that there is sufficient space in the Tx ring 717 * buffer for a maximally sized request to be enqueued. 718 * 719 * A transmit request requires a transmit descriptor for each packet 720 * fragment, plus up to 2 entries for "options" (e.g. TSO). 721 */ 722 static inline int 723 xn_tx_slot_available(struct netfront_info *np) 724 { 725 return (RING_FREE_REQUESTS(&np->tx) > (MAX_TX_REQ_FRAGS + 2)); 726 } 727 728 static void 729 netif_release_tx_bufs(struct netfront_info *np) 730 { 731 int i; 732 733 for (i = 1; i <= NET_TX_RING_SIZE; i++) { 734 struct mbuf *m; 735 736 m = np->tx_mbufs[i]; 737 738 /* 739 * We assume that no kernel addresses are 740 * less than NET_TX_RING_SIZE. Any entry 741 * in the table that is below this number 742 * must be an index from free-list tracking. 743 */ 744 if (((uintptr_t)m) <= NET_TX_RING_SIZE) 745 continue; 746 gnttab_grant_foreign_access_ref(np->grant_tx_ref[i], 747 xenbus_get_otherend_id(np->xbdev), 748 virt_to_mfn(mtod(m, vm_offset_t)), 749 GNTMAP_readonly); 750 gnttab_release_grant_reference(&np->gref_tx_head, 751 np->grant_tx_ref[i]); 752 np->grant_tx_ref[i] = GRANT_REF_INVALID; 753 add_id_to_freelist(np->tx_mbufs, i); 754 np->xn_cdata.xn_tx_chain_cnt--; 755 if (np->xn_cdata.xn_tx_chain_cnt < 0) { 756 panic("netif_release_tx_bufs: tx_chain_cnt must be >= 0"); 757 } 758 m_freem(m); 759 } 760 } 761 762 static void 763 network_alloc_rx_buffers(struct netfront_info *sc) 764 { 765 int otherend_id = xenbus_get_otherend_id(sc->xbdev); 766 unsigned short id; 767 struct mbuf *m_new; 768 int i, batch_target, notify; 769 RING_IDX req_prod; 770 struct xen_memory_reservation reservation; 771 grant_ref_t ref; 772 int nr_flips; 773 netif_rx_request_t *req; 774 vm_offset_t vaddr; 775 u_long pfn; 776 777 req_prod = sc->rx.req_prod_pvt; 778 779 if (unlikely(sc->carrier == 0)) 780 return; 781 782 /* 783 * Allocate mbufs greedily, even though we batch updates to the 784 * receive ring. This creates a less bursty demand on the memory 785 * allocator, and so should reduce the chance of failed allocation 786 * requests both for ourself and for other kernel subsystems. 787 * 788 * Here we attempt to maintain rx_target buffers in flight, counting 789 * buffers that we have yet to process in the receive ring. 790 */ 791 batch_target = sc->rx_target - (req_prod - sc->rx.rsp_cons); 792 for (i = mbufq_len(&sc->xn_rx_batch); i < batch_target; i++) { 793 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 794 if (m_new == NULL) { 795 printf("%s: MGETHDR failed\n", __func__); 796 goto no_mbuf; 797 } 798 799 m_cljget(m_new, M_DONTWAIT, MJUMPAGESIZE); 800 if ((m_new->m_flags & M_EXT) == 0) { 801 printf("%s: m_cljget failed\n", __func__); 802 m_freem(m_new); 803 804 no_mbuf: 805 if (i != 0) 806 goto refill; 807 /* 808 * XXX set timer 809 */ 810 break; 811 } 812 m_new->m_len = m_new->m_pkthdr.len = MJUMPAGESIZE; 813 814 /* queue the mbufs allocated */ 815 mbufq_tail(&sc->xn_rx_batch, m_new); 816 } 817 818 /* 819 * If we've allocated at least half of our target number of entries, 820 * submit them to the backend - we have enough to make the overhead 821 * of submission worthwhile. Otherwise wait for more mbufs and 822 * request entries to become available. 823 */ 824 if (i < (sc->rx_target/2)) { 825 if (req_prod >sc->rx.sring->req_prod) 826 goto push; 827 return; 828 } 829 830 /* 831 * Double floating fill target if we risked having the backend 832 * run out of empty buffers for receive traffic. We define "running 833 * low" as having less than a fourth of our target buffers free 834 * at the time we refilled the queue. 835 */ 836 if ((req_prod - sc->rx.sring->rsp_prod) < (sc->rx_target / 4)) { 837 sc->rx_target *= 2; 838 if (sc->rx_target > sc->rx_max_target) 839 sc->rx_target = sc->rx_max_target; 840 } 841 842 refill: 843 for (nr_flips = i = 0; ; i++) { 844 if ((m_new = mbufq_dequeue(&sc->xn_rx_batch)) == NULL) 845 break; 846 847 m_new->m_ext.ext_arg1 = (vm_paddr_t *)(uintptr_t)( 848 vtophys(m_new->m_ext.ext_buf) >> PAGE_SHIFT); 849 850 id = xennet_rxidx(req_prod + i); 851 852 KASSERT(sc->rx_mbufs[id] == NULL, ("non-NULL xm_rx_chain")); 853 sc->rx_mbufs[id] = m_new; 854 855 ref = gnttab_claim_grant_reference(&sc->gref_rx_head); 856 KASSERT(ref != GNTTAB_LIST_END, 857 ("reserved grant references exhuasted")); 858 sc->grant_rx_ref[id] = ref; 859 860 vaddr = mtod(m_new, vm_offset_t); 861 pfn = vtophys(vaddr) >> PAGE_SHIFT; 862 req = RING_GET_REQUEST(&sc->rx, req_prod + i); 863 864 if (sc->copying_receiver == 0) { 865 gnttab_grant_foreign_transfer_ref(ref, 866 otherend_id, pfn); 867 sc->rx_pfn_array[nr_flips] = PFNTOMFN(pfn); 868 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 869 /* Remove this page before passing 870 * back to Xen. 871 */ 872 set_phys_to_machine(pfn, INVALID_P2M_ENTRY); 873 MULTI_update_va_mapping(&sc->rx_mcl[i], 874 vaddr, 0, 0); 875 } 876 nr_flips++; 877 } else { 878 gnttab_grant_foreign_access_ref(ref, 879 otherend_id, 880 PFNTOMFN(pfn), 0); 881 } 882 req->id = id; 883 req->gref = ref; 884 885 sc->rx_pfn_array[i] = 886 vtomach(mtod(m_new,vm_offset_t)) >> PAGE_SHIFT; 887 } 888 889 KASSERT(i, ("no mbufs processed")); /* should have returned earlier */ 890 KASSERT(mbufq_len(&sc->xn_rx_batch) == 0, ("not all mbufs processed")); 891 /* 892 * We may have allocated buffers which have entries outstanding 893 * in the page * update queue -- make sure we flush those first! 894 */ 895 PT_UPDATES_FLUSH(); 896 if (nr_flips != 0) { 897 #ifdef notyet 898 /* Tell the ballon driver what is going on. */ 899 balloon_update_driver_allowance(i); 900 #endif 901 set_xen_guest_handle(reservation.extent_start, sc->rx_pfn_array); 902 reservation.nr_extents = i; 903 reservation.extent_order = 0; 904 reservation.address_bits = 0; 905 reservation.domid = DOMID_SELF; 906 907 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 908 909 /* After all PTEs have been zapped, flush the TLB. */ 910 sc->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = 911 UVMF_TLB_FLUSH|UVMF_ALL; 912 913 /* Give away a batch of pages. */ 914 sc->rx_mcl[i].op = __HYPERVISOR_memory_op; 915 sc->rx_mcl[i].args[0] = XENMEM_decrease_reservation; 916 sc->rx_mcl[i].args[1] = (u_long)&reservation; 917 /* Zap PTEs and give away pages in one big multicall. */ 918 (void)HYPERVISOR_multicall(sc->rx_mcl, i+1); 919 920 /* Check return status of HYPERVISOR_dom_mem_op(). */ 921 if (unlikely(sc->rx_mcl[i].result != i)) 922 panic("Unable to reduce memory reservation\n"); 923 } else { 924 if (HYPERVISOR_memory_op( 925 XENMEM_decrease_reservation, &reservation) 926 != i) 927 panic("Unable to reduce memory " 928 "reservation\n"); 929 } 930 } else { 931 wmb(); 932 } 933 934 /* Above is a suitable barrier to ensure backend will see requests. */ 935 sc->rx.req_prod_pvt = req_prod + i; 936 push: 937 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->rx, notify); 938 if (notify) 939 notify_remote_via_irq(sc->irq); 940 } 941 942 static void 943 xn_rxeof(struct netfront_info *np) 944 { 945 struct ifnet *ifp; 946 #if __FreeBSD_version >= 700000 947 struct lro_ctrl *lro = &np->xn_lro; 948 struct lro_entry *queued; 949 #endif 950 struct netfront_rx_info rinfo; 951 struct netif_rx_response *rx = &rinfo.rx; 952 struct netif_extra_info *extras = rinfo.extras; 953 RING_IDX i, rp; 954 multicall_entry_t *mcl; 955 struct mbuf *m; 956 struct mbuf_head rxq, errq; 957 int err, pages_flipped = 0, work_to_do; 958 959 do { 960 XN_RX_LOCK_ASSERT(np); 961 if (!netfront_carrier_ok(np)) 962 return; 963 964 mbufq_init(&errq); 965 mbufq_init(&rxq); 966 967 ifp = np->xn_ifp; 968 969 rp = np->rx.sring->rsp_prod; 970 rmb(); /* Ensure we see queued responses up to 'rp'. */ 971 972 i = np->rx.rsp_cons; 973 while ((i != rp)) { 974 memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); 975 memset(extras, 0, sizeof(rinfo.extras)); 976 977 m = NULL; 978 err = xennet_get_responses(np, &rinfo, rp, &i, &m, 979 &pages_flipped); 980 981 if (unlikely(err)) { 982 if (m) 983 mbufq_tail(&errq, m); 984 np->stats.rx_errors++; 985 continue; 986 } 987 988 m->m_pkthdr.rcvif = ifp; 989 if ( rx->flags & NETRXF_data_validated ) { 990 /* Tell the stack the checksums are okay */ 991 /* 992 * XXX this isn't necessarily the case - need to add 993 * check 994 */ 995 996 m->m_pkthdr.csum_flags |= 997 (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID 998 | CSUM_PSEUDO_HDR); 999 m->m_pkthdr.csum_data = 0xffff; 1000 } 1001 1002 np->stats.rx_packets++; 1003 np->stats.rx_bytes += m->m_pkthdr.len; 1004 1005 mbufq_tail(&rxq, m); 1006 np->rx.rsp_cons = i; 1007 } 1008 1009 if (pages_flipped) { 1010 /* Some pages are no longer absent... */ 1011 #ifdef notyet 1012 balloon_update_driver_allowance(-pages_flipped); 1013 #endif 1014 /* Do all the remapping work, and M->P updates, in one big 1015 * hypercall. 1016 */ 1017 if (!!xen_feature(XENFEAT_auto_translated_physmap)) { 1018 mcl = np->rx_mcl + pages_flipped; 1019 mcl->op = __HYPERVISOR_mmu_update; 1020 mcl->args[0] = (u_long)np->rx_mmu; 1021 mcl->args[1] = pages_flipped; 1022 mcl->args[2] = 0; 1023 mcl->args[3] = DOMID_SELF; 1024 (void)HYPERVISOR_multicall(np->rx_mcl, 1025 pages_flipped + 1); 1026 } 1027 } 1028 1029 while ((m = mbufq_dequeue(&errq))) 1030 m_freem(m); 1031 1032 /* 1033 * Process all the mbufs after the remapping is complete. 1034 * Break the mbuf chain first though. 1035 */ 1036 while ((m = mbufq_dequeue(&rxq)) != NULL) { 1037 ifp->if_ipackets++; 1038 1039 /* 1040 * Do we really need to drop the rx lock? 1041 */ 1042 XN_RX_UNLOCK(np); 1043 #if __FreeBSD_version >= 700000 1044 /* Use LRO if possible */ 1045 if ((ifp->if_capenable & IFCAP_LRO) == 0 || 1046 lro->lro_cnt == 0 || tcp_lro_rx(lro, m, 0)) { 1047 /* 1048 * If LRO fails, pass up to the stack 1049 * directly. 1050 */ 1051 (*ifp->if_input)(ifp, m); 1052 } 1053 #else 1054 (*ifp->if_input)(ifp, m); 1055 #endif 1056 XN_RX_LOCK(np); 1057 } 1058 1059 np->rx.rsp_cons = i; 1060 1061 #if __FreeBSD_version >= 700000 1062 /* 1063 * Flush any outstanding LRO work 1064 */ 1065 while (!SLIST_EMPTY(&lro->lro_active)) { 1066 queued = SLIST_FIRST(&lro->lro_active); 1067 SLIST_REMOVE_HEAD(&lro->lro_active, next); 1068 tcp_lro_flush(lro, queued); 1069 } 1070 #endif 1071 1072 #if 0 1073 /* If we get a callback with very few responses, reduce fill target. */ 1074 /* NB. Note exponential increase, linear decrease. */ 1075 if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > 1076 ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target)) 1077 np->rx_target = np->rx_min_target; 1078 #endif 1079 1080 network_alloc_rx_buffers(np); 1081 1082 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, work_to_do); 1083 } while (work_to_do); 1084 } 1085 1086 static void 1087 xn_txeof(struct netfront_info *np) 1088 { 1089 RING_IDX i, prod; 1090 unsigned short id; 1091 struct ifnet *ifp; 1092 netif_tx_response_t *txr; 1093 struct mbuf *m; 1094 1095 XN_TX_LOCK_ASSERT(np); 1096 1097 if (!netfront_carrier_ok(np)) 1098 return; 1099 1100 ifp = np->xn_ifp; 1101 1102 do { 1103 prod = np->tx.sring->rsp_prod; 1104 rmb(); /* Ensure we see responses up to 'rp'. */ 1105 1106 for (i = np->tx.rsp_cons; i != prod; i++) { 1107 txr = RING_GET_RESPONSE(&np->tx, i); 1108 if (txr->status == NETIF_RSP_NULL) 1109 continue; 1110 1111 if (txr->status != NETIF_RSP_OKAY) { 1112 printf("%s: WARNING: response is %d!\n", 1113 __func__, txr->status); 1114 } 1115 id = txr->id; 1116 m = np->tx_mbufs[id]; 1117 KASSERT(m != NULL, ("mbuf not found in xn_tx_chain")); 1118 KASSERT((uintptr_t)m > NET_TX_RING_SIZE, 1119 ("mbuf already on the free list, but we're " 1120 "trying to free it again!")); 1121 M_ASSERTVALID(m); 1122 1123 /* 1124 * Increment packet count if this is the last 1125 * mbuf of the chain. 1126 */ 1127 if (!m->m_next) 1128 ifp->if_opackets++; 1129 if (unlikely(gnttab_query_foreign_access( 1130 np->grant_tx_ref[id]) != 0)) { 1131 panic("grant id %u still in use by the backend", 1132 id); 1133 } 1134 gnttab_end_foreign_access_ref( 1135 np->grant_tx_ref[id]); 1136 gnttab_release_grant_reference( 1137 &np->gref_tx_head, np->grant_tx_ref[id]); 1138 np->grant_tx_ref[id] = GRANT_REF_INVALID; 1139 1140 np->tx_mbufs[id] = NULL; 1141 add_id_to_freelist(np->tx_mbufs, id); 1142 np->xn_cdata.xn_tx_chain_cnt--; 1143 m_free(m); 1144 /* Only mark the queue active if we've freed up at least one slot to try */ 1145 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1146 } 1147 np->tx.rsp_cons = prod; 1148 1149 /* 1150 * Set a new event, then check for race with update of 1151 * tx_cons. Note that it is essential to schedule a 1152 * callback, no matter how few buffers are pending. Even if 1153 * there is space in the transmit ring, higher layers may 1154 * be blocked because too much data is outstanding: in such 1155 * cases notification from Xen is likely to be the only kick 1156 * that we'll get. 1157 */ 1158 np->tx.sring->rsp_event = 1159 prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; 1160 1161 mb(); 1162 } while (prod != np->tx.sring->rsp_prod); 1163 1164 if (np->tx_full && 1165 ((np->tx.sring->req_prod - prod) < NET_TX_RING_SIZE)) { 1166 np->tx_full = 0; 1167 #if 0 1168 if (np->user_state == UST_OPEN) 1169 netif_wake_queue(dev); 1170 #endif 1171 } 1172 1173 } 1174 1175 static void 1176 xn_intr(void *xsc) 1177 { 1178 struct netfront_info *np = xsc; 1179 struct ifnet *ifp = np->xn_ifp; 1180 1181 #if 0 1182 if (!(np->rx.rsp_cons != np->rx.sring->rsp_prod && 1183 likely(netfront_carrier_ok(np)) && 1184 ifp->if_drv_flags & IFF_DRV_RUNNING)) 1185 return; 1186 #endif 1187 if (RING_HAS_UNCONSUMED_RESPONSES(&np->tx)) { 1188 XN_TX_LOCK(np); 1189 xn_txeof(np); 1190 XN_TX_UNLOCK(np); 1191 } 1192 1193 XN_RX_LOCK(np); 1194 xn_rxeof(np); 1195 XN_RX_UNLOCK(np); 1196 1197 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1198 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1199 xn_start(ifp); 1200 } 1201 1202 1203 static void 1204 xennet_move_rx_slot(struct netfront_info *np, struct mbuf *m, 1205 grant_ref_t ref) 1206 { 1207 int new = xennet_rxidx(np->rx.req_prod_pvt); 1208 1209 KASSERT(np->rx_mbufs[new] == NULL, ("rx_mbufs != NULL")); 1210 np->rx_mbufs[new] = m; 1211 np->grant_rx_ref[new] = ref; 1212 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; 1213 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; 1214 np->rx.req_prod_pvt++; 1215 } 1216 1217 static int 1218 xennet_get_extras(struct netfront_info *np, 1219 struct netif_extra_info *extras, RING_IDX rp, RING_IDX *cons) 1220 { 1221 struct netif_extra_info *extra; 1222 1223 int err = 0; 1224 1225 do { 1226 struct mbuf *m; 1227 grant_ref_t ref; 1228 1229 if (unlikely(*cons + 1 == rp)) { 1230 #if 0 1231 if (net_ratelimit()) 1232 WPRINTK("Missing extra info\n"); 1233 #endif 1234 err = EINVAL; 1235 break; 1236 } 1237 1238 extra = (struct netif_extra_info *) 1239 RING_GET_RESPONSE(&np->rx, ++(*cons)); 1240 1241 if (unlikely(!extra->type || 1242 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 1243 #if 0 1244 if (net_ratelimit()) 1245 WPRINTK("Invalid extra type: %d\n", 1246 extra->type); 1247 #endif 1248 err = EINVAL; 1249 } else { 1250 memcpy(&extras[extra->type - 1], extra, sizeof(*extra)); 1251 } 1252 1253 m = xennet_get_rx_mbuf(np, *cons); 1254 ref = xennet_get_rx_ref(np, *cons); 1255 xennet_move_rx_slot(np, m, ref); 1256 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); 1257 1258 return err; 1259 } 1260 1261 static int 1262 xennet_get_responses(struct netfront_info *np, 1263 struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons, 1264 struct mbuf **list, 1265 int *pages_flipped_p) 1266 { 1267 int pages_flipped = *pages_flipped_p; 1268 struct mmu_update *mmu; 1269 struct multicall_entry *mcl; 1270 struct netif_rx_response *rx = &rinfo->rx; 1271 struct netif_extra_info *extras = rinfo->extras; 1272 struct mbuf *m, *m0, *m_prev; 1273 grant_ref_t ref = xennet_get_rx_ref(np, *cons); 1274 RING_IDX ref_cons = *cons; 1275 int max = 5 /* MAX_TX_REQ_FRAGS + (rx->status <= RX_COPY_THRESHOLD) */; 1276 int frags = 1; 1277 int err = 0; 1278 u_long ret; 1279 1280 m0 = m = m_prev = xennet_get_rx_mbuf(np, *cons); 1281 1282 1283 if (rx->flags & NETRXF_extra_info) { 1284 err = xennet_get_extras(np, extras, rp, cons); 1285 } 1286 1287 1288 if (m0 != NULL) { 1289 m0->m_pkthdr.len = 0; 1290 m0->m_next = NULL; 1291 } 1292 1293 for (;;) { 1294 u_long mfn; 1295 1296 #if 0 1297 DPRINTK("rx->status=%hd rx->offset=%hu frags=%u\n", 1298 rx->status, rx->offset, frags); 1299 #endif 1300 if (unlikely(rx->status < 0 || 1301 rx->offset + rx->status > PAGE_SIZE)) { 1302 1303 #if 0 1304 if (net_ratelimit()) 1305 WPRINTK("rx->offset: %x, size: %u\n", 1306 rx->offset, rx->status); 1307 #endif 1308 xennet_move_rx_slot(np, m, ref); 1309 if (m0 == m) 1310 m0 = NULL; 1311 m = NULL; 1312 err = EINVAL; 1313 goto next_skip_queue; 1314 } 1315 1316 /* 1317 * This definitely indicates a bug, either in this driver or in 1318 * the backend driver. In future this should flag the bad 1319 * situation to the system controller to reboot the backed. 1320 */ 1321 if (ref == GRANT_REF_INVALID) { 1322 1323 #if 0 1324 if (net_ratelimit()) 1325 WPRINTK("Bad rx response id %d.\n", rx->id); 1326 #endif 1327 printf("%s: Bad rx response id %d.\n", __func__,rx->id); 1328 err = EINVAL; 1329 goto next; 1330 } 1331 1332 if (!np->copying_receiver) { 1333 /* Memory pressure, insufficient buffer 1334 * headroom, ... 1335 */ 1336 if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) { 1337 WPRINTK("Unfulfilled rx req (id=%d, st=%d).\n", 1338 rx->id, rx->status); 1339 xennet_move_rx_slot(np, m, ref); 1340 err = ENOMEM; 1341 goto next; 1342 } 1343 1344 if (!xen_feature( XENFEAT_auto_translated_physmap)) { 1345 /* Remap the page. */ 1346 void *vaddr = mtod(m, void *); 1347 uint32_t pfn; 1348 1349 mcl = np->rx_mcl + pages_flipped; 1350 mmu = np->rx_mmu + pages_flipped; 1351 1352 MULTI_update_va_mapping(mcl, (u_long)vaddr, 1353 (((vm_paddr_t)mfn) << PAGE_SHIFT) | PG_RW | 1354 PG_V | PG_M | PG_A, 0); 1355 pfn = (uintptr_t)m->m_ext.ext_arg1; 1356 mmu->ptr = ((vm_paddr_t)mfn << PAGE_SHIFT) | 1357 MMU_MACHPHYS_UPDATE; 1358 mmu->val = pfn; 1359 1360 set_phys_to_machine(pfn, mfn); 1361 } 1362 pages_flipped++; 1363 } else { 1364 ret = gnttab_end_foreign_access_ref(ref); 1365 KASSERT(ret, ("ret != 0")); 1366 } 1367 1368 gnttab_release_grant_reference(&np->gref_rx_head, ref); 1369 1370 next: 1371 if (m == NULL) 1372 break; 1373 1374 m->m_len = rx->status; 1375 m->m_data += rx->offset; 1376 m0->m_pkthdr.len += rx->status; 1377 1378 next_skip_queue: 1379 if (!(rx->flags & NETRXF_more_data)) 1380 break; 1381 1382 if (*cons + frags == rp) { 1383 if (net_ratelimit()) 1384 WPRINTK("Need more frags\n"); 1385 err = ENOENT; 1386 printf("%s: cons %u frags %u rp %u, not enough frags\n", 1387 __func__, *cons, frags, rp); 1388 break; 1389 } 1390 /* 1391 * Note that m can be NULL, if rx->status < 0 or if 1392 * rx->offset + rx->status > PAGE_SIZE above. 1393 */ 1394 m_prev = m; 1395 1396 rx = RING_GET_RESPONSE(&np->rx, *cons + frags); 1397 m = xennet_get_rx_mbuf(np, *cons + frags); 1398 1399 /* 1400 * m_prev == NULL can happen if rx->status < 0 or if 1401 * rx->offset + * rx->status > PAGE_SIZE above. 1402 */ 1403 if (m_prev != NULL) 1404 m_prev->m_next = m; 1405 1406 /* 1407 * m0 can be NULL if rx->status < 0 or if * rx->offset + 1408 * rx->status > PAGE_SIZE above. 1409 */ 1410 if (m0 == NULL) 1411 m0 = m; 1412 m->m_next = NULL; 1413 ref = xennet_get_rx_ref(np, *cons + frags); 1414 ref_cons = *cons + frags; 1415 frags++; 1416 } 1417 *list = m0; 1418 1419 if (unlikely(frags > max)) { 1420 if (net_ratelimit()) 1421 WPRINTK("Too many frags\n"); 1422 printf("%s: too many frags %d > max %d\n", __func__, frags, 1423 max); 1424 err = E2BIG; 1425 } 1426 1427 *cons += frags; 1428 1429 *pages_flipped_p = pages_flipped; 1430 1431 return err; 1432 } 1433 1434 static void 1435 xn_tick_locked(struct netfront_info *sc) 1436 { 1437 XN_RX_LOCK_ASSERT(sc); 1438 callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc); 1439 1440 /* XXX placeholder for printing debug information */ 1441 1442 } 1443 1444 1445 static void 1446 xn_tick(void *xsc) 1447 { 1448 struct netfront_info *sc; 1449 1450 sc = xsc; 1451 XN_RX_LOCK(sc); 1452 xn_tick_locked(sc); 1453 XN_RX_UNLOCK(sc); 1454 1455 } 1456 1457 /** 1458 * \brief Count the number of fragments in an mbuf chain. 1459 * 1460 * Surprisingly, there isn't an M* macro for this. 1461 */ 1462 static inline int 1463 xn_count_frags(struct mbuf *m) 1464 { 1465 int nfrags; 1466 1467 for (nfrags = 0; m != NULL; m = m->m_next) 1468 nfrags++; 1469 1470 return (nfrags); 1471 } 1472 1473 /** 1474 * Given an mbuf chain, make sure we have enough room and then push 1475 * it onto the transmit ring. 1476 */ 1477 static int 1478 xn_assemble_tx_request(struct netfront_info *sc, struct mbuf *m_head) 1479 { 1480 struct ifnet *ifp; 1481 struct mbuf *m; 1482 u_int nfrags; 1483 netif_extra_info_t *extra; 1484 int otherend_id; 1485 1486 ifp = sc->xn_ifp; 1487 1488 /** 1489 * Defragment the mbuf if necessary. 1490 */ 1491 nfrags = xn_count_frags(m_head); 1492 1493 /* 1494 * Check to see whether this request is longer than netback 1495 * can handle, and try to defrag it. 1496 */ 1497 /** 1498 * It is a bit lame, but the netback driver in Linux can't 1499 * deal with nfrags > MAX_TX_REQ_FRAGS, which is a quirk of 1500 * the Linux network stack. 1501 */ 1502 if (nfrags > MAX_TX_REQ_FRAGS) { 1503 m = m_defrag(m_head, M_DONTWAIT); 1504 if (!m) { 1505 /* 1506 * Defrag failed, so free the mbuf and 1507 * therefore drop the packet. 1508 */ 1509 m_freem(m_head); 1510 return (EMSGSIZE); 1511 } 1512 m_head = m; 1513 } 1514 1515 /* Determine how many fragments now exist */ 1516 nfrags = xn_count_frags(m_head); 1517 1518 /* 1519 * Check to see whether the defragmented packet has too many 1520 * segments for the Linux netback driver. 1521 */ 1522 /** 1523 * The FreeBSD TCP stack, with TSO enabled, can produce a chain 1524 * of mbufs longer than Linux can handle. Make sure we don't 1525 * pass a too-long chain over to the other side by dropping the 1526 * packet. It doesn't look like there is currently a way to 1527 * tell the TCP stack to generate a shorter chain of packets. 1528 */ 1529 if (nfrags > MAX_TX_REQ_FRAGS) { 1530 #ifdef DEBUG 1531 printf("%s: nfrags %d > MAX_TX_REQ_FRAGS %d, netback " 1532 "won't be able to handle it, dropping\n", 1533 __func__, nfrags, MAX_TX_REQ_FRAGS); 1534 #endif 1535 m_freem(m_head); 1536 return (EMSGSIZE); 1537 } 1538 1539 /* 1540 * This check should be redundant. We've already verified that we 1541 * have enough slots in the ring to handle a packet of maximum 1542 * size, and that our packet is less than the maximum size. Keep 1543 * it in here as an assert for now just to make certain that 1544 * xn_tx_chain_cnt is accurate. 1545 */ 1546 KASSERT((sc->xn_cdata.xn_tx_chain_cnt + nfrags) <= NET_TX_RING_SIZE, 1547 ("%s: xn_tx_chain_cnt (%d) + nfrags (%d) > NET_TX_RING_SIZE " 1548 "(%d)!", __func__, (int) sc->xn_cdata.xn_tx_chain_cnt, 1549 (int) nfrags, (int) NET_TX_RING_SIZE)); 1550 1551 /* 1552 * Start packing the mbufs in this chain into 1553 * the fragment pointers. Stop when we run out 1554 * of fragments or hit the end of the mbuf chain. 1555 */ 1556 m = m_head; 1557 extra = NULL; 1558 otherend_id = xenbus_get_otherend_id(sc->xbdev); 1559 for (m = m_head; m; m = m->m_next) { 1560 netif_tx_request_t *tx; 1561 uintptr_t id; 1562 grant_ref_t ref; 1563 u_long mfn; /* XXX Wrong type? */ 1564 1565 tx = RING_GET_REQUEST(&sc->tx, sc->tx.req_prod_pvt); 1566 id = get_id_from_freelist(sc->tx_mbufs); 1567 if (id == 0) 1568 panic("xn_start_locked: was allocated the freelist head!\n"); 1569 sc->xn_cdata.xn_tx_chain_cnt++; 1570 if (sc->xn_cdata.xn_tx_chain_cnt > NET_TX_RING_SIZE) 1571 panic("xn_start_locked: tx_chain_cnt must be <= NET_TX_RING_SIZE\n"); 1572 sc->tx_mbufs[id] = m; 1573 tx->id = id; 1574 ref = gnttab_claim_grant_reference(&sc->gref_tx_head); 1575 KASSERT((short)ref >= 0, ("Negative ref")); 1576 mfn = virt_to_mfn(mtod(m, vm_offset_t)); 1577 gnttab_grant_foreign_access_ref(ref, otherend_id, 1578 mfn, GNTMAP_readonly); 1579 tx->gref = sc->grant_tx_ref[id] = ref; 1580 tx->offset = mtod(m, vm_offset_t) & (PAGE_SIZE - 1); 1581 tx->flags = 0; 1582 if (m == m_head) { 1583 /* 1584 * The first fragment has the entire packet 1585 * size, subsequent fragments have just the 1586 * fragment size. The backend works out the 1587 * true size of the first fragment by 1588 * subtracting the sizes of the other 1589 * fragments. 1590 */ 1591 tx->size = m->m_pkthdr.len; 1592 1593 /* 1594 * The first fragment contains the checksum flags 1595 * and is optionally followed by extra data for 1596 * TSO etc. 1597 */ 1598 /** 1599 * CSUM_TSO requires checksum offloading. 1600 * Some versions of FreeBSD fail to 1601 * set CSUM_TCP in the CSUM_TSO case, 1602 * so we have to test for CSUM_TSO 1603 * explicitly. 1604 */ 1605 if (m->m_pkthdr.csum_flags 1606 & (CSUM_DELAY_DATA | CSUM_TSO)) { 1607 tx->flags |= (NETTXF_csum_blank 1608 | NETTXF_data_validated); 1609 } 1610 #if __FreeBSD_version >= 700000 1611 if (m->m_pkthdr.csum_flags & CSUM_TSO) { 1612 struct netif_extra_info *gso = 1613 (struct netif_extra_info *) 1614 RING_GET_REQUEST(&sc->tx, 1615 ++sc->tx.req_prod_pvt); 1616 1617 tx->flags |= NETTXF_extra_info; 1618 1619 gso->u.gso.size = m->m_pkthdr.tso_segsz; 1620 gso->u.gso.type = 1621 XEN_NETIF_GSO_TYPE_TCPV4; 1622 gso->u.gso.pad = 0; 1623 gso->u.gso.features = 0; 1624 1625 gso->type = XEN_NETIF_EXTRA_TYPE_GSO; 1626 gso->flags = 0; 1627 } 1628 #endif 1629 } else { 1630 tx->size = m->m_len; 1631 } 1632 if (m->m_next) 1633 tx->flags |= NETTXF_more_data; 1634 1635 sc->tx.req_prod_pvt++; 1636 } 1637 BPF_MTAP(ifp, m_head); 1638 1639 sc->stats.tx_bytes += m_head->m_pkthdr.len; 1640 sc->stats.tx_packets++; 1641 1642 return (0); 1643 } 1644 1645 static void 1646 xn_start_locked(struct ifnet *ifp) 1647 { 1648 struct netfront_info *sc; 1649 struct mbuf *m_head; 1650 int notify; 1651 1652 sc = ifp->if_softc; 1653 1654 if (!netfront_carrier_ok(sc)) 1655 return; 1656 1657 /* 1658 * While we have enough transmit slots available for at least one 1659 * maximum-sized packet, pull mbufs off the queue and put them on 1660 * the transmit ring. 1661 */ 1662 while (xn_tx_slot_available(sc)) { 1663 IF_DEQUEUE(&ifp->if_snd, m_head); 1664 if (m_head == NULL) 1665 break; 1666 1667 if (xn_assemble_tx_request(sc, m_head) != 0) 1668 break; 1669 } 1670 1671 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->tx, notify); 1672 if (notify) 1673 notify_remote_via_irq(sc->irq); 1674 1675 if (RING_FULL(&sc->tx)) { 1676 sc->tx_full = 1; 1677 #if 0 1678 netif_stop_queue(dev); 1679 #endif 1680 } 1681 } 1682 1683 1684 static void 1685 xn_start(struct ifnet *ifp) 1686 { 1687 struct netfront_info *sc; 1688 sc = ifp->if_softc; 1689 XN_TX_LOCK(sc); 1690 xn_start_locked(ifp); 1691 XN_TX_UNLOCK(sc); 1692 } 1693 1694 /* equivalent of network_open() in Linux */ 1695 static void 1696 xn_ifinit_locked(struct netfront_info *sc) 1697 { 1698 struct ifnet *ifp; 1699 1700 XN_LOCK_ASSERT(sc); 1701 1702 ifp = sc->xn_ifp; 1703 1704 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1705 return; 1706 1707 xn_stop(sc); 1708 1709 network_alloc_rx_buffers(sc); 1710 sc->rx.sring->rsp_event = sc->rx.rsp_cons + 1; 1711 1712 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1713 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1714 if_link_state_change(ifp, LINK_STATE_UP); 1715 1716 callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc); 1717 1718 } 1719 1720 1721 static void 1722 xn_ifinit(void *xsc) 1723 { 1724 struct netfront_info *sc = xsc; 1725 1726 XN_LOCK(sc); 1727 xn_ifinit_locked(sc); 1728 XN_UNLOCK(sc); 1729 1730 } 1731 1732 1733 static int 1734 xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1735 { 1736 struct netfront_info *sc = ifp->if_softc; 1737 struct ifreq *ifr = (struct ifreq *) data; 1738 struct ifaddr *ifa = (struct ifaddr *)data; 1739 1740 int mask, error = 0; 1741 switch(cmd) { 1742 case SIOCSIFADDR: 1743 case SIOCGIFADDR: 1744 XN_LOCK(sc); 1745 if (ifa->ifa_addr->sa_family == AF_INET) { 1746 ifp->if_flags |= IFF_UP; 1747 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 1748 xn_ifinit_locked(sc); 1749 arp_ifinit(ifp, ifa); 1750 XN_UNLOCK(sc); 1751 } else { 1752 XN_UNLOCK(sc); 1753 error = ether_ioctl(ifp, cmd, data); 1754 } 1755 break; 1756 case SIOCSIFMTU: 1757 /* XXX can we alter the MTU on a VN ?*/ 1758 #ifdef notyet 1759 if (ifr->ifr_mtu > XN_JUMBO_MTU) 1760 error = EINVAL; 1761 else 1762 #endif 1763 { 1764 ifp->if_mtu = ifr->ifr_mtu; 1765 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1766 xn_ifinit(sc); 1767 } 1768 break; 1769 case SIOCSIFFLAGS: 1770 XN_LOCK(sc); 1771 if (ifp->if_flags & IFF_UP) { 1772 /* 1773 * If only the state of the PROMISC flag changed, 1774 * then just use the 'set promisc mode' command 1775 * instead of reinitializing the entire NIC. Doing 1776 * a full re-init means reloading the firmware and 1777 * waiting for it to start up, which may take a 1778 * second or two. 1779 */ 1780 #ifdef notyet 1781 /* No promiscuous mode with Xen */ 1782 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1783 ifp->if_flags & IFF_PROMISC && 1784 !(sc->xn_if_flags & IFF_PROMISC)) { 1785 XN_SETBIT(sc, XN_RX_MODE, 1786 XN_RXMODE_RX_PROMISC); 1787 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1788 !(ifp->if_flags & IFF_PROMISC) && 1789 sc->xn_if_flags & IFF_PROMISC) { 1790 XN_CLRBIT(sc, XN_RX_MODE, 1791 XN_RXMODE_RX_PROMISC); 1792 } else 1793 #endif 1794 xn_ifinit_locked(sc); 1795 } else { 1796 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1797 xn_stop(sc); 1798 } 1799 } 1800 sc->xn_if_flags = ifp->if_flags; 1801 XN_UNLOCK(sc); 1802 error = 0; 1803 break; 1804 case SIOCSIFCAP: 1805 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1806 if (mask & IFCAP_TXCSUM) { 1807 if (IFCAP_TXCSUM & ifp->if_capenable) { 1808 ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4); 1809 ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP 1810 | CSUM_IP | CSUM_TSO); 1811 } else { 1812 ifp->if_capenable |= IFCAP_TXCSUM; 1813 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP 1814 | CSUM_IP); 1815 } 1816 } 1817 if (mask & IFCAP_RXCSUM) { 1818 ifp->if_capenable ^= IFCAP_RXCSUM; 1819 } 1820 #if __FreeBSD_version >= 700000 1821 if (mask & IFCAP_TSO4) { 1822 if (IFCAP_TSO4 & ifp->if_capenable) { 1823 ifp->if_capenable &= ~IFCAP_TSO4; 1824 ifp->if_hwassist &= ~CSUM_TSO; 1825 } else if (IFCAP_TXCSUM & ifp->if_capenable) { 1826 ifp->if_capenable |= IFCAP_TSO4; 1827 ifp->if_hwassist |= CSUM_TSO; 1828 } else { 1829 IPRINTK("Xen requires tx checksum offload" 1830 " be enabled to use TSO\n"); 1831 error = EINVAL; 1832 } 1833 } 1834 if (mask & IFCAP_LRO) { 1835 ifp->if_capenable ^= IFCAP_LRO; 1836 1837 } 1838 #endif 1839 error = 0; 1840 break; 1841 case SIOCADDMULTI: 1842 case SIOCDELMULTI: 1843 #ifdef notyet 1844 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1845 XN_LOCK(sc); 1846 xn_setmulti(sc); 1847 XN_UNLOCK(sc); 1848 error = 0; 1849 } 1850 #endif 1851 /* FALLTHROUGH */ 1852 case SIOCSIFMEDIA: 1853 case SIOCGIFMEDIA: 1854 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 1855 break; 1856 default: 1857 error = ether_ioctl(ifp, cmd, data); 1858 } 1859 1860 return (error); 1861 } 1862 1863 static void 1864 xn_stop(struct netfront_info *sc) 1865 { 1866 struct ifnet *ifp; 1867 1868 XN_LOCK_ASSERT(sc); 1869 1870 ifp = sc->xn_ifp; 1871 1872 callout_stop(&sc->xn_stat_ch); 1873 1874 xn_free_rx_ring(sc); 1875 xn_free_tx_ring(sc); 1876 1877 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1878 if_link_state_change(ifp, LINK_STATE_DOWN); 1879 } 1880 1881 /* START of Xenolinux helper functions adapted to FreeBSD */ 1882 int 1883 network_connect(struct netfront_info *np) 1884 { 1885 int i, requeue_idx, error; 1886 grant_ref_t ref; 1887 netif_rx_request_t *req; 1888 u_int feature_rx_copy, feature_rx_flip; 1889 1890 error = xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev), 1891 "feature-rx-copy", NULL, "%u", &feature_rx_copy); 1892 if (error) 1893 feature_rx_copy = 0; 1894 error = xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev), 1895 "feature-rx-flip", NULL, "%u", &feature_rx_flip); 1896 if (error) 1897 feature_rx_flip = 1; 1898 1899 /* 1900 * Copy packets on receive path if: 1901 * (a) This was requested by user, and the backend supports it; or 1902 * (b) Flipping was requested, but this is unsupported by the backend. 1903 */ 1904 np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) || 1905 (MODPARM_rx_flip && !feature_rx_flip)); 1906 1907 /* Recovery procedure: */ 1908 error = talk_to_backend(np->xbdev, np); 1909 if (error) 1910 return (error); 1911 1912 /* Step 1: Reinitialise variables. */ 1913 netif_release_tx_bufs(np); 1914 1915 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ 1916 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { 1917 struct mbuf *m; 1918 u_long pfn; 1919 1920 if (np->rx_mbufs[i] == NULL) 1921 continue; 1922 1923 m = np->rx_mbufs[requeue_idx] = xennet_get_rx_mbuf(np, i); 1924 ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); 1925 1926 req = RING_GET_REQUEST(&np->rx, requeue_idx); 1927 pfn = vtophys(mtod(m, vm_offset_t)) >> PAGE_SHIFT; 1928 1929 if (!np->copying_receiver) { 1930 gnttab_grant_foreign_transfer_ref(ref, 1931 xenbus_get_otherend_id(np->xbdev), 1932 pfn); 1933 } else { 1934 gnttab_grant_foreign_access_ref(ref, 1935 xenbus_get_otherend_id(np->xbdev), 1936 PFNTOMFN(pfn), 0); 1937 } 1938 req->gref = ref; 1939 req->id = requeue_idx; 1940 1941 requeue_idx++; 1942 } 1943 1944 np->rx.req_prod_pvt = requeue_idx; 1945 1946 /* Step 3: All public and private state should now be sane. Get 1947 * ready to start sending and receiving packets and give the driver 1948 * domain a kick because we've probably just requeued some 1949 * packets. 1950 */ 1951 netfront_carrier_on(np); 1952 notify_remote_via_irq(np->irq); 1953 XN_TX_LOCK(np); 1954 xn_txeof(np); 1955 XN_TX_UNLOCK(np); 1956 network_alloc_rx_buffers(np); 1957 1958 return (0); 1959 } 1960 1961 static void 1962 show_device(struct netfront_info *sc) 1963 { 1964 #ifdef DEBUG 1965 if (sc) { 1966 IPRINTK("<vif handle=%u %s(%s) evtchn=%u irq=%u tx=%p rx=%p>\n", 1967 sc->xn_ifno, 1968 be_state_name[sc->xn_backend_state], 1969 sc->xn_user_state ? "open" : "closed", 1970 sc->xn_evtchn, 1971 sc->xn_irq, 1972 sc->xn_tx_if, 1973 sc->xn_rx_if); 1974 } else { 1975 IPRINTK("<vif NULL>\n"); 1976 } 1977 #endif 1978 } 1979 1980 /** Create a network device. 1981 * @param handle device handle 1982 */ 1983 int 1984 create_netdev(device_t dev) 1985 { 1986 int i; 1987 struct netfront_info *np; 1988 int err; 1989 struct ifnet *ifp; 1990 1991 np = device_get_softc(dev); 1992 1993 np->xbdev = dev; 1994 1995 XN_LOCK_INIT(np, xennetif); 1996 1997 ifmedia_init(&np->sc_media, 0, xn_ifmedia_upd, xn_ifmedia_sts); 1998 ifmedia_add(&np->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); 1999 ifmedia_set(&np->sc_media, IFM_ETHER|IFM_MANUAL); 2000 2001 np->rx_target = RX_MIN_TARGET; 2002 np->rx_min_target = RX_MIN_TARGET; 2003 np->rx_max_target = RX_MAX_TARGET; 2004 2005 /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */ 2006 for (i = 0; i <= NET_TX_RING_SIZE; i++) { 2007 np->tx_mbufs[i] = (void *) ((u_long) i+1); 2008 np->grant_tx_ref[i] = GRANT_REF_INVALID; 2009 } 2010 np->tx_mbufs[NET_TX_RING_SIZE] = (void *)0; 2011 2012 for (i = 0; i <= NET_RX_RING_SIZE; i++) { 2013 2014 np->rx_mbufs[i] = NULL; 2015 np->grant_rx_ref[i] = GRANT_REF_INVALID; 2016 } 2017 /* A grant for every tx ring slot */ 2018 if (gnttab_alloc_grant_references(NET_TX_RING_SIZE, 2019 &np->gref_tx_head) != 0) { 2020 IPRINTK("#### netfront can't alloc tx grant refs\n"); 2021 err = ENOMEM; 2022 goto exit; 2023 } 2024 /* A grant for every rx ring slot */ 2025 if (gnttab_alloc_grant_references(RX_MAX_TARGET, 2026 &np->gref_rx_head) != 0) { 2027 WPRINTK("#### netfront can't alloc rx grant refs\n"); 2028 gnttab_free_grant_references(np->gref_tx_head); 2029 err = ENOMEM; 2030 goto exit; 2031 } 2032 2033 err = xen_net_read_mac(dev, np->mac); 2034 if (err) { 2035 xenbus_dev_fatal(dev, err, "parsing %s/mac", 2036 xenbus_get_node(dev)); 2037 goto out; 2038 } 2039 2040 /* Set up ifnet structure */ 2041 ifp = np->xn_ifp = if_alloc(IFT_ETHER); 2042 ifp->if_softc = np; 2043 if_initname(ifp, "xn", device_get_unit(dev)); 2044 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2045 ifp->if_ioctl = xn_ioctl; 2046 ifp->if_output = ether_output; 2047 ifp->if_start = xn_start; 2048 #ifdef notyet 2049 ifp->if_watchdog = xn_watchdog; 2050 #endif 2051 ifp->if_init = xn_ifinit; 2052 ifp->if_mtu = ETHERMTU; 2053 ifp->if_snd.ifq_maxlen = NET_TX_RING_SIZE - 1; 2054 2055 ifp->if_hwassist = XN_CSUM_FEATURES; 2056 ifp->if_capabilities = IFCAP_HWCSUM; 2057 #if __FreeBSD_version >= 700000 2058 ifp->if_capabilities |= IFCAP_TSO4; 2059 if (xn_enable_lro) { 2060 int err = tcp_lro_init(&np->xn_lro); 2061 if (err) { 2062 device_printf(dev, "LRO initialization failed\n"); 2063 goto exit; 2064 } 2065 np->xn_lro.ifp = ifp; 2066 ifp->if_capabilities |= IFCAP_LRO; 2067 } 2068 #endif 2069 ifp->if_capenable = ifp->if_capabilities; 2070 2071 ether_ifattach(ifp, np->mac); 2072 callout_init(&np->xn_stat_ch, CALLOUT_MPSAFE); 2073 netfront_carrier_off(np); 2074 2075 return (0); 2076 2077 exit: 2078 gnttab_free_grant_references(np->gref_tx_head); 2079 out: 2080 panic("do something smart"); 2081 2082 } 2083 2084 /** 2085 * Handle the change of state of the backend to Closing. We must delete our 2086 * device-layer structures now, to ensure that writes are flushed through to 2087 * the backend. Once is this done, we can switch to Closed in 2088 * acknowledgement. 2089 */ 2090 #if 0 2091 static void 2092 netfront_closing(device_t dev) 2093 { 2094 #if 0 2095 struct netfront_info *info = dev->dev_driver_data; 2096 2097 DPRINTK("netfront_closing: %s removed\n", dev->nodename); 2098 2099 close_netdev(info); 2100 #endif 2101 xenbus_switch_state(dev, XenbusStateClosed); 2102 } 2103 #endif 2104 2105 static int 2106 netfront_detach(device_t dev) 2107 { 2108 struct netfront_info *info = device_get_softc(dev); 2109 2110 DPRINTK("%s\n", xenbus_get_node(dev)); 2111 2112 netif_free(info); 2113 2114 return 0; 2115 } 2116 2117 static void 2118 netif_free(struct netfront_info *info) 2119 { 2120 netif_disconnect_backend(info); 2121 #if 0 2122 close_netdev(info); 2123 #endif 2124 } 2125 2126 static void 2127 netif_disconnect_backend(struct netfront_info *info) 2128 { 2129 XN_RX_LOCK(info); 2130 XN_TX_LOCK(info); 2131 netfront_carrier_off(info); 2132 XN_TX_UNLOCK(info); 2133 XN_RX_UNLOCK(info); 2134 2135 end_access(info->tx_ring_ref, info->tx.sring); 2136 end_access(info->rx_ring_ref, info->rx.sring); 2137 info->tx_ring_ref = GRANT_REF_INVALID; 2138 info->rx_ring_ref = GRANT_REF_INVALID; 2139 info->tx.sring = NULL; 2140 info->rx.sring = NULL; 2141 2142 if (info->irq) 2143 unbind_from_irqhandler(info->irq); 2144 2145 info->irq = 0; 2146 } 2147 2148 2149 static void 2150 end_access(int ref, void *page) 2151 { 2152 if (ref != GRANT_REF_INVALID) 2153 gnttab_end_foreign_access(ref, page); 2154 } 2155 2156 static int 2157 xn_ifmedia_upd(struct ifnet *ifp) 2158 { 2159 return (0); 2160 } 2161 2162 static void 2163 xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2164 { 2165 ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE; 2166 ifmr->ifm_active = IFM_ETHER|IFM_MANUAL; 2167 } 2168 2169 /* ** Driver registration ** */ 2170 static device_method_t netfront_methods[] = { 2171 /* Device interface */ 2172 DEVMETHOD(device_probe, netfront_probe), 2173 DEVMETHOD(device_attach, netfront_attach), 2174 DEVMETHOD(device_detach, netfront_detach), 2175 DEVMETHOD(device_shutdown, bus_generic_shutdown), 2176 DEVMETHOD(device_suspend, bus_generic_suspend), 2177 DEVMETHOD(device_resume, netfront_resume), 2178 2179 /* Xenbus interface */ 2180 DEVMETHOD(xenbus_otherend_changed, netfront_backend_changed), 2181 2182 { 0, 0 } 2183 }; 2184 2185 static driver_t netfront_driver = { 2186 "xn", 2187 netfront_methods, 2188 sizeof(struct netfront_info), 2189 }; 2190 devclass_t netfront_devclass; 2191 2192 DRIVER_MODULE(xe, xenbusb_front, netfront_driver, netfront_devclass, 0, 0); 2193