1 /* $NetBSD: if_tun.c,v 1.14 1994/06/29 06:36:25 cgd Exp $ */ 2 3 /* 4 * Copyright (c) 1988, Julian Onions <jpo@cs.nott.ac.uk> 5 * Nottingham University 1987. 6 * 7 * This source may be freely distributed, however I would be interested 8 * in any changes that are made. 9 * 10 * This driver takes packets off the IP i/f and hands them up to a 11 * user process to have its wicked way with. This driver has it's 12 * roots in a similar driver written by Phil Cockcroft (formerly) at 13 * UCL. This driver is based much more on read/write/poll mode of 14 * operation though. 15 * 16 * $FreeBSD: src/sys/net/if_tun.c,v 1.74.2.8 2002/02/13 00:43:11 dillon Exp $ 17 */ 18 19 #include "use_tun.h" 20 #include "opt_atalk.h" 21 #include "opt_inet.h" 22 #include "opt_inet6.h" 23 #include "opt_ipx.h" 24 25 #include <sys/param.h> 26 #include <sys/proc.h> 27 #include <sys/priv.h> 28 #include <sys/systm.h> 29 #include <sys/mbuf.h> 30 #include <sys/socket.h> 31 #include <sys/conf.h> 32 #include <sys/device.h> 33 #include <sys/filio.h> 34 #include <sys/sockio.h> 35 #include <sys/thread2.h> 36 #include <sys/ttycom.h> 37 #include <sys/signalvar.h> 38 #include <sys/filedesc.h> 39 #include <sys/kernel.h> 40 #include <sys/sysctl.h> 41 #include <sys/uio.h> 42 #include <sys/vnode.h> 43 #include <sys/malloc.h> 44 45 #include <sys/mplock2.h> 46 47 #include <net/if.h> 48 #include <net/if_types.h> 49 #include <net/ifq_var.h> 50 #include <net/netisr.h> 51 #include <net/route.h> 52 #include <sys/devfs.h> 53 54 #ifdef INET 55 #include <netinet/in.h> 56 #endif 57 58 #include <net/bpf.h> 59 60 #include "if_tunvar.h" 61 #include "if_tun.h" 62 63 static MALLOC_DEFINE(M_TUN, "tun", "Tunnel Interface"); 64 65 static void tunattach (void *); 66 PSEUDO_SET(tunattach, if_tun); 67 68 static void tuncreate (cdev_t dev); 69 70 #define TUNDEBUG if (tundebug) if_printf 71 static int tundebug = 0; 72 SYSCTL_INT(_debug, OID_AUTO, if_tun_debug, CTLFLAG_RW, &tundebug, 0, 73 "Enable debug output"); 74 75 static int tunoutput (struct ifnet *, struct mbuf *, struct sockaddr *, 76 struct rtentry *rt); 77 static int tunifioctl (struct ifnet *, u_long, caddr_t, struct ucred *); 78 static int tuninit (struct ifnet *); 79 static void tunstart(struct ifnet *); 80 static void tun_filter_detach(struct knote *); 81 static int tun_filter_read(struct knote *, long); 82 static int tun_filter_write(struct knote *, long); 83 84 static d_open_t tunopen; 85 static d_close_t tunclose; 86 static d_read_t tunread; 87 static d_write_t tunwrite; 88 static d_ioctl_t tunioctl; 89 static d_kqfilter_t tunkqfilter; 90 91 static d_clone_t tunclone; 92 DEVFS_DECLARE_CLONE_BITMAP(tun); 93 94 #if NTUN <= 1 95 #define TUN_PREALLOCATED_UNITS 4 96 #else 97 #define TUN_PREALLOCATED_UNITS NTUN 98 #endif 99 100 static struct dev_ops tun_ops = { 101 { "tun", 0, 0 }, 102 .d_open = tunopen, 103 .d_close = tunclose, 104 .d_read = tunread, 105 .d_write = tunwrite, 106 .d_ioctl = tunioctl, 107 .d_kqfilter = tunkqfilter 108 }; 109 110 static void 111 tunattach(void *dummy) 112 { 113 int i; 114 make_autoclone_dev(&tun_ops, &DEVFS_CLONE_BITMAP(tun), 115 tunclone, UID_UUCP, GID_DIALER, 0600, "tun"); 116 for (i = 0; i < TUN_PREALLOCATED_UNITS; i++) { 117 make_dev(&tun_ops, i, UID_UUCP, GID_DIALER, 0600, "tun%d", i); 118 devfs_clone_bitmap_set(&DEVFS_CLONE_BITMAP(tun), i); 119 } 120 /* Doesn't need uninit because unloading is not possible, see PSEUDO_SET */ 121 } 122 123 static int 124 tunclone(struct dev_clone_args *ap) 125 { 126 int unit; 127 128 unit = devfs_clone_bitmap_get(&DEVFS_CLONE_BITMAP(tun), 0); 129 ap->a_dev = make_only_dev(&tun_ops, unit, UID_UUCP, GID_DIALER, 0600, 130 "tun%d", unit); 131 132 return 0; 133 } 134 135 static void 136 tuncreate(cdev_t dev) 137 { 138 struct tun_softc *sc; 139 struct ifnet *ifp; 140 141 #if 0 142 dev = make_dev(&tun_ops, minor(dev), 143 UID_UUCP, GID_DIALER, 0600, "tun%d", lminor(dev)); 144 #endif 145 146 MALLOC(sc, struct tun_softc *, sizeof(*sc), M_TUN, M_WAITOK | M_ZERO); 147 sc->tun_flags = TUN_INITED; 148 149 ifp = &sc->tun_if; 150 if_initname(ifp, "tun", lminor(dev)); 151 ifp->if_mtu = TUNMTU; 152 ifp->if_ioctl = tunifioctl; 153 ifp->if_output = tunoutput; 154 ifp->if_start = tunstart; 155 ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST; 156 ifp->if_type = IFT_PPP; 157 ifq_set_maxlen(&ifp->if_snd, ifqmaxlen); 158 ifq_set_ready(&ifp->if_snd); 159 ifp->if_softc = sc; 160 if_attach(ifp, NULL); 161 bpfattach(ifp, DLT_NULL, sizeof(u_int)); 162 dev->si_drv1 = sc; 163 } 164 165 /* 166 * tunnel open - must be superuser & the device must be 167 * configured in 168 */ 169 static int 170 tunopen(struct dev_open_args *ap) 171 { 172 cdev_t dev = ap->a_head.a_dev; 173 struct ifnet *ifp; 174 struct tun_softc *tp; 175 int error; 176 177 if ((error = priv_check_cred(ap->a_cred, PRIV_ROOT, 0)) != 0) 178 return (error); 179 180 tp = dev->si_drv1; 181 if (!tp) { 182 tuncreate(dev); 183 tp = dev->si_drv1; 184 } 185 if (tp->tun_flags & TUN_OPEN) 186 return EBUSY; 187 tp->tun_pid = curproc->p_pid; 188 ifp = &tp->tun_if; 189 tp->tun_flags |= TUN_OPEN; 190 TUNDEBUG(ifp, "open\n"); 191 return (0); 192 } 193 194 /* 195 * tunclose - close the device - mark i/f down & delete 196 * routing info 197 */ 198 static int 199 tunclose(struct dev_close_args *ap) 200 { 201 cdev_t dev = ap->a_head.a_dev; 202 struct tun_softc *tp; 203 struct ifnet *ifp; 204 205 tp = dev->si_drv1; 206 ifp = &tp->tun_if; 207 208 tp->tun_flags &= ~TUN_OPEN; 209 tp->tun_pid = 0; 210 211 /* Junk all pending output. */ 212 ifq_purge(&ifp->if_snd); 213 214 if (ifp->if_flags & IFF_UP) 215 if_down(ifp); 216 ifp->if_flags &= ~IFF_RUNNING; 217 if_purgeaddrs_nolink(ifp); 218 219 funsetown(tp->tun_sigio); 220 KNOTE(&tp->tun_rkq.ki_note, 0); 221 222 TUNDEBUG(ifp, "closed\n"); 223 #if 0 224 if (dev->si_uminor >= TUN_PREALLOCATED_UNITS) { 225 devfs_clone_bitmap_put(&DEVFS_CLONE_BITMAP(tun), dev->si_uminor); 226 } 227 #endif 228 return (0); 229 } 230 231 static int 232 tuninit(struct ifnet *ifp) 233 { 234 struct tun_softc *tp = ifp->if_softc; 235 struct ifaddr_container *ifac; 236 int error = 0; 237 238 TUNDEBUG(ifp, "tuninit\n"); 239 240 ifp->if_flags |= IFF_UP | IFF_RUNNING; 241 getmicrotime(&ifp->if_lastchange); 242 243 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 244 struct ifaddr *ifa = ifac->ifa; 245 246 if (ifa->ifa_addr == NULL) { 247 error = EFAULT; 248 /* XXX: Should maybe return straight off? */ 249 } else { 250 #ifdef INET 251 if (ifa->ifa_addr->sa_family == AF_INET) { 252 struct sockaddr_in *si; 253 254 si = (struct sockaddr_in *)ifa->ifa_addr; 255 if (si->sin_addr.s_addr) 256 tp->tun_flags |= TUN_IASET; 257 } 258 #endif 259 } 260 } 261 return (error); 262 } 263 264 /* 265 * Process an ioctl request. 266 * 267 * MPSAFE 268 */ 269 int 270 tunifioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 271 { 272 struct ifreq *ifr = (struct ifreq *)data; 273 struct tun_softc *tp = ifp->if_softc; 274 struct ifstat *ifs; 275 int error = 0; 276 277 switch(cmd) { 278 case SIOCGIFSTATUS: 279 ifs = (struct ifstat *)data; 280 if (tp->tun_pid) 281 ksprintf(ifs->ascii + strlen(ifs->ascii), 282 "\tOpened by PID %d\n", tp->tun_pid); 283 break; 284 case SIOCSIFADDR: 285 error = tuninit(ifp); 286 TUNDEBUG(ifp, "address set, error=%d\n", error); 287 break; 288 case SIOCSIFDSTADDR: 289 error = tuninit(ifp); 290 TUNDEBUG(ifp, "destination address set, error=%d\n", error); 291 break; 292 case SIOCSIFMTU: 293 ifp->if_mtu = ifr->ifr_mtu; 294 TUNDEBUG(ifp, "mtu set\n"); 295 break; 296 case SIOCSIFFLAGS: 297 case SIOCADDMULTI: 298 case SIOCDELMULTI: 299 break; 300 default: 301 error = EINVAL; 302 } 303 return (error); 304 } 305 306 /* 307 * tunoutput - queue packets from higher level ready to put out. 308 * 309 * MPSAFE 310 */ 311 static int 312 tunoutput_serialized(struct ifnet *ifp, struct mbuf *m0, struct sockaddr *dst, 313 struct rtentry *rt) 314 { 315 struct tun_softc *tp = ifp->if_softc; 316 int error; 317 struct altq_pktattr pktattr; 318 319 TUNDEBUG(ifp, "tunoutput\n"); 320 321 if ((tp->tun_flags & TUN_READY) != TUN_READY) { 322 TUNDEBUG(ifp, "not ready 0%o\n", tp->tun_flags); 323 m_freem (m0); 324 return EHOSTDOWN; 325 } 326 327 /* 328 * if the queueing discipline needs packet classification, 329 * do it before prepending link headers. 330 */ 331 ifq_classify(&ifp->if_snd, m0, dst->sa_family, &pktattr); 332 333 /* BPF write needs to be handled specially */ 334 if (dst->sa_family == AF_UNSPEC) { 335 dst->sa_family = *(mtod(m0, int *)); 336 m0->m_len -= sizeof(int); 337 m0->m_pkthdr.len -= sizeof(int); 338 m0->m_data += sizeof(int); 339 } 340 341 if (ifp->if_bpf) { 342 /* 343 * We need to prepend the address family as 344 * a four byte field. 345 */ 346 uint32_t af = dst->sa_family; 347 348 bpf_ptap(ifp->if_bpf, m0, &af, sizeof(af)); 349 } 350 351 /* prepend sockaddr? this may abort if the mbuf allocation fails */ 352 if (tp->tun_flags & TUN_LMODE) { 353 /* allocate space for sockaddr */ 354 M_PREPEND(m0, dst->sa_len, MB_DONTWAIT); 355 356 /* if allocation failed drop packet */ 357 if (m0 == NULL){ 358 IF_DROP(&ifp->if_snd); 359 ifp->if_oerrors++; 360 return (ENOBUFS); 361 } else { 362 bcopy(dst, m0->m_data, dst->sa_len); 363 } 364 } 365 366 if (tp->tun_flags & TUN_IFHEAD) { 367 /* Prepend the address family */ 368 M_PREPEND(m0, 4, MB_DONTWAIT); 369 370 /* if allocation failed drop packet */ 371 if (m0 == NULL){ 372 IF_DROP(&ifp->if_snd); 373 ifp->if_oerrors++; 374 return ENOBUFS; 375 } else 376 *(u_int32_t *)m0->m_data = htonl(dst->sa_family); 377 } else { 378 #ifdef INET 379 if (dst->sa_family != AF_INET) 380 #endif 381 { 382 m_freem(m0); 383 return EAFNOSUPPORT; 384 } 385 } 386 387 error = ifq_handoff(ifp, m0, &pktattr); 388 if (error) { 389 ifp->if_collisions++; 390 } else { 391 ifp->if_opackets++; 392 if (tp->tun_flags & TUN_RWAIT) { 393 tp->tun_flags &= ~TUN_RWAIT; 394 wakeup((caddr_t)tp); 395 } 396 get_mplock(); 397 if (tp->tun_flags & TUN_ASYNC && tp->tun_sigio) 398 pgsigio(tp->tun_sigio, SIGIO, 0); 399 rel_mplock(); 400 ifnet_deserialize_all(ifp); 401 KNOTE(&tp->tun_rkq.ki_note, 0); 402 ifnet_serialize_all(ifp); 403 } 404 return (error); 405 } 406 407 static int 408 tunoutput(struct ifnet *ifp, struct mbuf *m0, struct sockaddr *dst, 409 struct rtentry *rt) 410 { 411 int error; 412 413 ifnet_serialize_all(ifp); 414 error = tunoutput_serialized(ifp, m0, dst, rt); 415 ifnet_deserialize_all(ifp); 416 417 return error; 418 } 419 420 /* 421 * the ops interface is now pretty minimal. 422 */ 423 static int 424 tunioctl(struct dev_ioctl_args *ap) 425 { 426 cdev_t dev = ap->a_head.a_dev; 427 struct tun_softc *tp = dev->si_drv1; 428 struct tuninfo *tunp; 429 430 switch (ap->a_cmd) { 431 case TUNSIFINFO: 432 tunp = (struct tuninfo *)ap->a_data; 433 if (tunp->mtu < IF_MINMTU) 434 return (EINVAL); 435 tp->tun_if.if_mtu = tunp->mtu; 436 tp->tun_if.if_type = tunp->type; 437 tp->tun_if.if_baudrate = tunp->baudrate; 438 break; 439 case TUNGIFINFO: 440 tunp = (struct tuninfo *)ap->a_data; 441 tunp->mtu = tp->tun_if.if_mtu; 442 tunp->type = tp->tun_if.if_type; 443 tunp->baudrate = tp->tun_if.if_baudrate; 444 break; 445 case TUNSDEBUG: 446 tundebug = *(int *)ap->a_data; 447 break; 448 case TUNGDEBUG: 449 *(int *)ap->a_data = tundebug; 450 break; 451 case TUNSLMODE: 452 if (*(int *)ap->a_data) { 453 tp->tun_flags |= TUN_LMODE; 454 tp->tun_flags &= ~TUN_IFHEAD; 455 } else 456 tp->tun_flags &= ~TUN_LMODE; 457 break; 458 case TUNSIFHEAD: 459 if (*(int *)ap->a_data) { 460 tp->tun_flags |= TUN_IFHEAD; 461 tp->tun_flags &= ~TUN_LMODE; 462 } else 463 tp->tun_flags &= ~TUN_IFHEAD; 464 break; 465 case TUNGIFHEAD: 466 *(int *)ap->a_data = (tp->tun_flags & TUN_IFHEAD) ? 1 : 0; 467 break; 468 case TUNSIFMODE: 469 /* deny this if UP */ 470 if (tp->tun_if.if_flags & IFF_UP) 471 return(EBUSY); 472 473 switch (*(int *)ap->a_data & ~IFF_MULTICAST) { 474 case IFF_POINTOPOINT: 475 case IFF_BROADCAST: 476 tp->tun_if.if_flags &= ~(IFF_BROADCAST|IFF_POINTOPOINT); 477 tp->tun_if.if_flags |= *(int *)ap->a_data; 478 break; 479 default: 480 return(EINVAL); 481 } 482 break; 483 case TUNSIFPID: 484 tp->tun_pid = curproc->p_pid; 485 break; 486 case FIOASYNC: 487 if (*(int *)ap->a_data) 488 tp->tun_flags |= TUN_ASYNC; 489 else 490 tp->tun_flags &= ~TUN_ASYNC; 491 break; 492 case FIONREAD: 493 if (!ifq_is_empty(&tp->tun_if.if_snd)) { 494 struct mbuf *mb; 495 496 mb = ifq_poll(&tp->tun_if.if_snd); 497 for( *(int *)ap->a_data = 0; mb != 0; mb = mb->m_next) 498 *(int *)ap->a_data += mb->m_len; 499 } else { 500 *(int *)ap->a_data = 0; 501 } 502 break; 503 case FIOSETOWN: 504 return (fsetown(*(int *)ap->a_data, &tp->tun_sigio)); 505 506 case FIOGETOWN: 507 *(int *)ap->a_data = fgetown(tp->tun_sigio); 508 return (0); 509 510 /* This is deprecated, FIOSETOWN should be used instead. */ 511 case TIOCSPGRP: 512 return (fsetown(-(*(int *)ap->a_data), &tp->tun_sigio)); 513 514 /* This is deprecated, FIOGETOWN should be used instead. */ 515 case TIOCGPGRP: 516 *(int *)ap->a_data = -fgetown(tp->tun_sigio); 517 return (0); 518 519 default: 520 return (ENOTTY); 521 } 522 return (0); 523 } 524 525 /* 526 * The ops read interface - reads a packet at a time, or at 527 * least as much of a packet as can be read. 528 */ 529 static int 530 tunread(struct dev_read_args *ap) 531 { 532 cdev_t dev = ap->a_head.a_dev; 533 struct uio *uio = ap->a_uio; 534 struct tun_softc *tp = dev->si_drv1; 535 struct ifnet *ifp = &tp->tun_if; 536 struct mbuf *m0; 537 int error=0, len; 538 539 TUNDEBUG(ifp, "read\n"); 540 if ((tp->tun_flags & TUN_READY) != TUN_READY) { 541 TUNDEBUG(ifp, "not ready 0%o\n", tp->tun_flags); 542 return EHOSTDOWN; 543 } 544 545 tp->tun_flags &= ~TUN_RWAIT; 546 547 ifnet_serialize_all(ifp); 548 549 while ((m0 = ifq_dequeue(&ifp->if_snd, NULL)) == NULL) { 550 if (ap->a_ioflag & IO_NDELAY) { 551 ifnet_deserialize_all(ifp); 552 return EWOULDBLOCK; 553 } 554 tp->tun_flags |= TUN_RWAIT; 555 ifnet_deserialize_all(ifp); 556 if ((error = tsleep(tp, PCATCH, "tunread", 0)) != 0) 557 return error; 558 ifnet_serialize_all(ifp); 559 } 560 561 ifnet_deserialize_all(ifp); 562 563 while (m0 && uio->uio_resid > 0 && error == 0) { 564 len = (int)szmin(uio->uio_resid, m0->m_len); 565 if (len != 0) 566 error = uiomove(mtod(m0, caddr_t), (size_t)len, uio); 567 m0 = m_free(m0); 568 } 569 570 if (m0) { 571 TUNDEBUG(ifp, "Dropping mbuf\n"); 572 m_freem(m0); 573 } 574 return error; 575 } 576 577 /* 578 * the ops write interface - an atomic write is a packet - or else! 579 */ 580 static int 581 tunwrite(struct dev_write_args *ap) 582 { 583 cdev_t dev = ap->a_head.a_dev; 584 struct uio *uio = ap->a_uio; 585 struct tun_softc *tp = dev->si_drv1; 586 struct ifnet *ifp = &tp->tun_if; 587 struct mbuf *top, **mp, *m; 588 int error=0; 589 size_t tlen, mlen; 590 uint32_t family; 591 int isr; 592 593 TUNDEBUG(ifp, "tunwrite\n"); 594 595 if (uio->uio_resid == 0) 596 return 0; 597 598 if (uio->uio_resid > TUNMRU) { 599 TUNDEBUG(ifp, "len=%zd!\n", uio->uio_resid); 600 return EIO; 601 } 602 tlen = uio->uio_resid; 603 604 /* get a header mbuf */ 605 MGETHDR(m, MB_DONTWAIT, MT_DATA); 606 if (m == NULL) 607 return ENOBUFS; 608 mlen = MHLEN; 609 610 top = 0; 611 mp = ⊤ 612 while (error == 0 && uio->uio_resid > 0) { 613 m->m_len = (int)szmin(mlen, uio->uio_resid); 614 error = uiomove(mtod (m, caddr_t), (size_t)m->m_len, uio); 615 *mp = m; 616 mp = &m->m_next; 617 if (uio->uio_resid > 0) { 618 MGET (m, MB_DONTWAIT, MT_DATA); 619 if (m == 0) { 620 error = ENOBUFS; 621 break; 622 } 623 mlen = MLEN; 624 } 625 } 626 if (error) { 627 if (top) 628 m_freem (top); 629 ifp->if_ierrors++; 630 return error; 631 } 632 633 top->m_pkthdr.len = (int)tlen; 634 top->m_pkthdr.rcvif = ifp; 635 636 if (ifp->if_bpf) { 637 if (tp->tun_flags & TUN_IFHEAD) { 638 /* 639 * Conveniently, we already have a 4-byte address 640 * family prepended to our packet ! 641 * Inconveniently, it's in the wrong byte order ! 642 */ 643 if ((top = m_pullup(top, sizeof(family))) == NULL) 644 return ENOBUFS; 645 *mtod(top, u_int32_t *) = 646 ntohl(*mtod(top, u_int32_t *)); 647 bpf_mtap(ifp->if_bpf, top); 648 *mtod(top, u_int32_t *) = 649 htonl(*mtod(top, u_int32_t *)); 650 } else { 651 /* 652 * We need to prepend the address family as 653 * a four byte field. 654 */ 655 static const uint32_t af = AF_INET; 656 657 bpf_ptap(ifp->if_bpf, top, &af, sizeof(af)); 658 } 659 } 660 661 if (tp->tun_flags & TUN_IFHEAD) { 662 if (top->m_len < sizeof(family) && 663 (top = m_pullup(top, sizeof(family))) == NULL) 664 return ENOBUFS; 665 family = ntohl(*mtod(top, u_int32_t *)); 666 m_adj(top, sizeof(family)); 667 } else 668 family = AF_INET; 669 670 ifp->if_ibytes += top->m_pkthdr.len; 671 ifp->if_ipackets++; 672 673 switch (family) { 674 #ifdef INET 675 case AF_INET: 676 isr = NETISR_IP; 677 break; 678 #endif 679 #ifdef INET6 680 case AF_INET6: 681 isr = NETISR_IPV6; 682 break; 683 #endif 684 #ifdef IPX 685 case AF_IPX: 686 isr = NETISR_IPX; 687 break; 688 #endif 689 #ifdef NETATALK 690 case AF_APPLETALK: 691 isr = NETISR_ATALK2; 692 break; 693 #endif 694 default: 695 m_freem(m); 696 return (EAFNOSUPPORT); 697 } 698 699 netisr_queue(isr, top); 700 return (0); 701 } 702 703 static struct filterops tun_read_filtops = 704 { FILTEROP_ISFD, NULL, tun_filter_detach, tun_filter_read }; 705 static struct filterops tun_write_filtops = 706 { FILTEROP_ISFD, NULL, tun_filter_detach, tun_filter_write }; 707 708 static int 709 tunkqfilter(struct dev_kqfilter_args *ap) 710 { 711 cdev_t dev = ap->a_head.a_dev; 712 struct tun_softc *tp = dev->si_drv1; 713 struct knote *kn = ap->a_kn; 714 struct klist *klist; 715 716 ap->a_result = 0; 717 ifnet_serialize_all(&tp->tun_if); 718 719 switch (kn->kn_filter) { 720 case EVFILT_READ: 721 kn->kn_fop = &tun_read_filtops; 722 kn->kn_hook = (caddr_t)tp; 723 break; 724 case EVFILT_WRITE: 725 kn->kn_fop = &tun_write_filtops; 726 kn->kn_hook = (caddr_t)tp; 727 break; 728 default: 729 ifnet_deserialize_all(&tp->tun_if); 730 ap->a_result = EOPNOTSUPP; 731 return (0); 732 } 733 734 klist = &tp->tun_rkq.ki_note; 735 knote_insert(klist, kn); 736 ifnet_deserialize_all(&tp->tun_if); 737 738 return (0); 739 } 740 741 static void 742 tun_filter_detach(struct knote *kn) 743 { 744 struct tun_softc *tp = (struct tun_softc *)kn->kn_hook; 745 struct klist *klist = &tp->tun_rkq.ki_note; 746 747 knote_remove(klist, kn); 748 } 749 750 static int 751 tun_filter_write(struct knote *kn, long hint) 752 { 753 /* Always ready for a write */ 754 return (1); 755 } 756 757 static int 758 tun_filter_read(struct knote *kn, long hint) 759 { 760 struct tun_softc *tp = (struct tun_softc *)kn->kn_hook; 761 int ready = 0; 762 763 ifnet_serialize_all(&tp->tun_if); 764 if (!ifq_is_empty(&tp->tun_if.if_snd)) 765 ready = 1; 766 ifnet_deserialize_all(&tp->tun_if); 767 768 return (ready); 769 } 770 771 /* 772 * Start packet transmission on the interface. 773 * when the interface queue is rate-limited by ALTQ, 774 * if_start is needed to drain packets from the queue in order 775 * to notify readers when outgoing packets become ready. 776 */ 777 static void 778 tunstart(struct ifnet *ifp) 779 { 780 struct tun_softc *tp = ifp->if_softc; 781 struct mbuf *m; 782 783 if (!ifq_is_enabled(&ifp->if_snd)) 784 return; 785 786 m = ifq_poll(&ifp->if_snd); 787 if (m != NULL) { 788 if (tp->tun_flags & TUN_RWAIT) { 789 tp->tun_flags &= ~TUN_RWAIT; 790 wakeup((caddr_t)tp); 791 } 792 if (tp->tun_flags & TUN_ASYNC && tp->tun_sigio) 793 pgsigio(tp->tun_sigio, SIGIO, 0); 794 ifnet_deserialize_tx(ifp); 795 KNOTE(&tp->tun_rkq.ki_note, 0); 796 ifnet_serialize_tx(ifp); 797 } 798 } 799