1 /* $OpenBSD: if_pppx.c,v 1.111 2021/07/20 16:44:55 mvs Exp $ */ 2 3 /* 4 * Copyright (c) 2010 Claudio Jeker <claudio@openbsd.org> 5 * Copyright (c) 2010 David Gwynne <dlg@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /*- 21 * Copyright (c) 2009 Internet Initiative Japan Inc. 22 * All rights reserved. 23 * 24 * Redistribution and use in source and binary forms, with or without 25 * modification, are permitted provided that the following conditions 26 * are met: 27 * 1. Redistributions of source code must retain the above copyright 28 * notice, this list of conditions and the following disclaimer. 29 * 2. Redistributions in binary form must reproduce the above copyright 30 * notice, this list of conditions and the following disclaimer in the 31 * documentation and/or other materials provided with the distribution. 32 * 33 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 34 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 36 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 37 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 38 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 39 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 41 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 42 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 43 * SUCH DAMAGE. 44 */ 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/buf.h> 48 #include <sys/kernel.h> 49 #include <sys/malloc.h> 50 #include <sys/device.h> 51 #include <sys/conf.h> 52 #include <sys/queue.h> 53 #include <sys/pool.h> 54 #include <sys/mbuf.h> 55 #include <sys/errno.h> 56 #include <sys/protosw.h> 57 #include <sys/socket.h> 58 #include <sys/ioctl.h> 59 #include <sys/vnode.h> 60 #include <sys/poll.h> 61 #include <sys/selinfo.h> 62 63 #include <net/if.h> 64 #include <net/if_types.h> 65 #include <netinet/in.h> 66 #include <netinet/if_ether.h> 67 #include <net/if_dl.h> 68 69 #include <netinet/in_var.h> 70 #include <netinet/ip.h> 71 #include <netinet/ip_var.h> 72 73 #ifdef INET6 74 #include <netinet6/in6_var.h> 75 #include <netinet/ip6.h> 76 #include <netinet6/nd6.h> 77 #endif /* INET6 */ 78 79 #include "bpfilter.h" 80 #if NBPFILTER > 0 81 #include <net/bpf.h> 82 #endif 83 84 #include "pf.h" 85 #if NPF > 0 86 #include <net/pfvar.h> 87 #endif 88 89 #include <net/ppp_defs.h> 90 #include <net/ppp-comp.h> 91 #include <crypto/arc4.h> 92 93 #ifdef PIPEX 94 #include <net/radix.h> 95 #include <net/pipex.h> 96 #include <net/pipex_local.h> 97 #else 98 #error PIPEX option not enabled 99 #endif 100 101 #ifdef PPPX_DEBUG 102 #define PPPX_D_INIT (1<<0) 103 104 int pppxdebug = 0; 105 106 #define DPRINTF(_m, _p...) do { \ 107 if (ISSET(pppxdebug, (_m))) \ 108 printf(_p); \ 109 } while (0) 110 #else 111 #define DPRINTF(_m, _p...) /* _m, _p */ 112 #endif 113 114 115 struct pppx_if; 116 117 /* 118 * Locks used to protect struct members and global data 119 * I immutable after creation 120 * K kernel lock 121 * N net lock 122 */ 123 124 struct pppx_dev { 125 LIST_ENTRY(pppx_dev) pxd_entry; /* [K] */ 126 int pxd_unit; /* [I] */ 127 128 /* kq shizz */ 129 struct selinfo pxd_rsel; 130 struct mutex pxd_rsel_mtx; 131 struct selinfo pxd_wsel; 132 struct mutex pxd_wsel_mtx; 133 134 /* queue of packets for userland to service - protected by splnet */ 135 struct mbuf_queue pxd_svcq; 136 int pxd_waiting; /* [N] */ 137 LIST_HEAD(,pppx_if) pxd_pxis; /* [N] */ 138 }; 139 140 LIST_HEAD(, pppx_dev) pppx_devs = 141 LIST_HEAD_INITIALIZER(pppx_devs); /* [K] */ 142 struct pool pppx_if_pl; 143 144 struct pppx_dev *pppx_dev_lookup(dev_t); 145 struct pppx_dev *pppx_dev2pxd(dev_t); 146 147 struct pppx_if_key { 148 int pxik_session_id; /* [I] */ 149 int pxik_protocol; /* [I] */ 150 }; 151 152 struct pppx_if { 153 struct pppx_if_key pxi_key; /* [I] must be first 154 in the struct */ 155 156 RBT_ENTRY(pppx_if) pxi_entry; /* [N] */ 157 LIST_ENTRY(pppx_if) pxi_list; /* [N] */ 158 159 int pxi_ready; /* [N] */ 160 161 int pxi_unit; /* [I] */ 162 struct ifnet pxi_if; 163 struct pppx_dev *pxi_dev; /* [I] */ 164 struct pipex_session *pxi_session; /* [I] */ 165 }; 166 167 static inline int 168 pppx_if_cmp(const struct pppx_if *a, const struct pppx_if *b) 169 { 170 return memcmp(&a->pxi_key, &b->pxi_key, sizeof(a->pxi_key)); 171 } 172 173 RBT_HEAD(pppx_ifs, pppx_if) pppx_ifs = RBT_INITIALIZER(&pppx_ifs); /* [N] */ 174 RBT_PROTOTYPE(pppx_ifs, pppx_if, pxi_entry, pppx_if_cmp); 175 176 int pppx_if_next_unit(void); 177 struct pppx_if *pppx_if_find(struct pppx_dev *, int, int); 178 int pppx_add_session(struct pppx_dev *, 179 struct pipex_session_req *); 180 int pppx_del_session(struct pppx_dev *, 181 struct pipex_session_close_req *); 182 int pppx_set_session_descr(struct pppx_dev *, 183 struct pipex_session_descr_req *); 184 185 void pppx_if_destroy(struct pppx_dev *, struct pppx_if *); 186 void pppx_if_qstart(struct ifqueue *); 187 int pppx_if_output(struct ifnet *, struct mbuf *, 188 struct sockaddr *, struct rtentry *); 189 int pppx_if_ioctl(struct ifnet *, u_long, caddr_t); 190 191 192 void pppxattach(int); 193 194 void filt_pppx_rdetach(struct knote *); 195 int filt_pppx_read(struct knote *, long); 196 197 const struct filterops pppx_rd_filtops = { 198 .f_flags = FILTEROP_ISFD, 199 .f_attach = NULL, 200 .f_detach = filt_pppx_rdetach, 201 .f_event = filt_pppx_read, 202 }; 203 204 void filt_pppx_wdetach(struct knote *); 205 int filt_pppx_write(struct knote *, long); 206 207 const struct filterops pppx_wr_filtops = { 208 .f_flags = FILTEROP_ISFD, 209 .f_attach = NULL, 210 .f_detach = filt_pppx_wdetach, 211 .f_event = filt_pppx_write, 212 }; 213 214 struct pppx_dev * 215 pppx_dev_lookup(dev_t dev) 216 { 217 struct pppx_dev *pxd; 218 int unit = minor(dev); 219 220 LIST_FOREACH(pxd, &pppx_devs, pxd_entry) { 221 if (pxd->pxd_unit == unit) 222 return (pxd); 223 } 224 225 return (NULL); 226 } 227 228 struct pppx_dev * 229 pppx_dev2pxd(dev_t dev) 230 { 231 struct pppx_dev *pxd; 232 233 pxd = pppx_dev_lookup(dev); 234 235 return (pxd); 236 } 237 238 void 239 pppxattach(int n) 240 { 241 pool_init(&pppx_if_pl, sizeof(struct pppx_if), 0, IPL_NONE, 242 PR_WAITOK, "pppxif", NULL); 243 pipex_init(); 244 } 245 246 int 247 pppxopen(dev_t dev, int flags, int mode, struct proc *p) 248 { 249 struct pppx_dev *pxd; 250 251 pxd = malloc(sizeof(*pxd), M_DEVBUF, M_WAITOK | M_ZERO); 252 if (pppx_dev_lookup(dev) != NULL) { 253 free(pxd, M_DEVBUF, sizeof(*pxd)); 254 return (EBUSY); 255 } 256 257 pxd->pxd_unit = minor(dev); 258 mtx_init(&pxd->pxd_rsel_mtx, IPL_NET); 259 mtx_init(&pxd->pxd_wsel_mtx, IPL_NET); 260 LIST_INIT(&pxd->pxd_pxis); 261 262 mq_init(&pxd->pxd_svcq, 128, IPL_NET); 263 LIST_INSERT_HEAD(&pppx_devs, pxd, pxd_entry); 264 265 return 0; 266 } 267 268 int 269 pppxread(dev_t dev, struct uio *uio, int ioflag) 270 { 271 struct pppx_dev *pxd = pppx_dev2pxd(dev); 272 struct mbuf *m, *m0; 273 int error = 0; 274 size_t len; 275 276 if (!pxd) 277 return (ENXIO); 278 279 while ((m0 = mq_dequeue(&pxd->pxd_svcq)) == NULL) { 280 if (ISSET(ioflag, IO_NDELAY)) 281 return (EWOULDBLOCK); 282 283 NET_LOCK(); 284 pxd->pxd_waiting = 1; 285 error = rwsleep_nsec(pxd, &netlock, 286 (PZERO + 1)|PCATCH, "pppxread", INFSLP); 287 NET_UNLOCK(); 288 if (error != 0) { 289 return (error); 290 } 291 } 292 293 while (m0 != NULL && uio->uio_resid > 0 && error == 0) { 294 len = ulmin(uio->uio_resid, m0->m_len); 295 if (len != 0) 296 error = uiomove(mtod(m0, caddr_t), len, uio); 297 m = m_free(m0); 298 m0 = m; 299 } 300 301 m_freem(m0); 302 303 return (error); 304 } 305 306 int 307 pppxwrite(dev_t dev, struct uio *uio, int ioflag) 308 { 309 struct pppx_dev *pxd = pppx_dev2pxd(dev); 310 struct pppx_hdr *th; 311 struct pppx_if *pxi; 312 uint32_t proto; 313 struct mbuf *top, **mp, *m; 314 int tlen; 315 int error = 0; 316 size_t mlen; 317 318 if (uio->uio_resid < sizeof(*th) + sizeof(uint32_t) || 319 uio->uio_resid > MCLBYTES) 320 return (EMSGSIZE); 321 322 tlen = uio->uio_resid; 323 324 MGETHDR(m, M_DONTWAIT, MT_DATA); 325 if (m == NULL) 326 return (ENOBUFS); 327 mlen = MHLEN; 328 if (uio->uio_resid > MHLEN) { 329 MCLGET(m, M_DONTWAIT); 330 if (!(m->m_flags & M_EXT)) { 331 m_free(m); 332 return (ENOBUFS); 333 } 334 mlen = MCLBYTES; 335 } 336 337 top = NULL; 338 mp = ⊤ 339 340 while (error == 0 && uio->uio_resid > 0) { 341 m->m_len = ulmin(mlen, uio->uio_resid); 342 error = uiomove(mtod (m, caddr_t), m->m_len, uio); 343 *mp = m; 344 mp = &m->m_next; 345 if (error == 0 && uio->uio_resid > 0) { 346 MGET(m, M_DONTWAIT, MT_DATA); 347 if (m == NULL) { 348 error = ENOBUFS; 349 break; 350 } 351 mlen = MLEN; 352 if (uio->uio_resid >= MINCLSIZE) { 353 MCLGET(m, M_DONTWAIT); 354 if (!(m->m_flags & M_EXT)) { 355 error = ENOBUFS; 356 m_free(m); 357 break; 358 } 359 mlen = MCLBYTES; 360 } 361 } 362 } 363 364 if (error) { 365 m_freem(top); 366 return (error); 367 } 368 369 top->m_pkthdr.len = tlen; 370 371 /* Find the interface */ 372 th = mtod(top, struct pppx_hdr *); 373 m_adj(top, sizeof(struct pppx_hdr)); 374 375 NET_LOCK(); 376 377 pxi = pppx_if_find(pxd, th->pppx_id, th->pppx_proto); 378 if (pxi == NULL) { 379 NET_UNLOCK(); 380 m_freem(top); 381 return (EINVAL); 382 } 383 top->m_pkthdr.ph_ifidx = pxi->pxi_if.if_index; 384 385 #if NBPFILTER > 0 386 if (pxi->pxi_if.if_bpf) 387 bpf_mtap(pxi->pxi_if.if_bpf, top, BPF_DIRECTION_IN); 388 #endif 389 /* strip the tunnel header */ 390 proto = ntohl(*(uint32_t *)(th + 1)); 391 m_adj(top, sizeof(uint32_t)); 392 393 switch (proto) { 394 case AF_INET: 395 ipv4_input(&pxi->pxi_if, top); 396 break; 397 #ifdef INET6 398 case AF_INET6: 399 ipv6_input(&pxi->pxi_if, top); 400 break; 401 #endif 402 default: 403 m_freem(top); 404 error = EAFNOSUPPORT; 405 break; 406 } 407 408 NET_UNLOCK(); 409 410 return (error); 411 } 412 413 int 414 pppxioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) 415 { 416 struct pppx_dev *pxd = pppx_dev2pxd(dev); 417 int error = 0; 418 419 NET_LOCK(); 420 switch (cmd) { 421 case PIPEXASESSION: 422 error = pppx_add_session(pxd, 423 (struct pipex_session_req *)addr); 424 break; 425 426 case PIPEXDSESSION: 427 error = pppx_del_session(pxd, 428 (struct pipex_session_close_req *)addr); 429 break; 430 431 case PIPEXSIFDESCR: 432 error = pppx_set_session_descr(pxd, 433 (struct pipex_session_descr_req *)addr); 434 break; 435 436 case FIONBIO: 437 break; 438 case FIONREAD: 439 *(int *)addr = mq_hdatalen(&pxd->pxd_svcq); 440 break; 441 442 default: 443 error = pipex_ioctl(pxd, cmd, addr); 444 break; 445 } 446 NET_UNLOCK(); 447 448 return (error); 449 } 450 451 int 452 pppxpoll(dev_t dev, int events, struct proc *p) 453 { 454 struct pppx_dev *pxd = pppx_dev2pxd(dev); 455 int revents = 0; 456 457 if (events & (POLLIN | POLLRDNORM)) { 458 if (!mq_empty(&pxd->pxd_svcq)) 459 revents |= events & (POLLIN | POLLRDNORM); 460 } 461 if (events & (POLLOUT | POLLWRNORM)) 462 revents |= events & (POLLOUT | POLLWRNORM); 463 464 if (revents == 0) { 465 if (events & (POLLIN | POLLRDNORM)) 466 selrecord(p, &pxd->pxd_rsel); 467 } 468 469 return (revents); 470 } 471 472 int 473 pppxkqfilter(dev_t dev, struct knote *kn) 474 { 475 struct pppx_dev *pxd = pppx_dev2pxd(dev); 476 struct mutex *mtx; 477 struct klist *klist; 478 479 switch (kn->kn_filter) { 480 case EVFILT_READ: 481 mtx = &pxd->pxd_rsel_mtx; 482 klist = &pxd->pxd_rsel.si_note; 483 kn->kn_fop = &pppx_rd_filtops; 484 break; 485 case EVFILT_WRITE: 486 mtx = &pxd->pxd_wsel_mtx; 487 klist = &pxd->pxd_wsel.si_note; 488 kn->kn_fop = &pppx_wr_filtops; 489 break; 490 default: 491 return (EINVAL); 492 } 493 494 kn->kn_hook = (caddr_t)pxd; 495 496 mtx_enter(mtx); 497 klist_insert_locked(klist, kn); 498 mtx_leave(mtx); 499 500 return (0); 501 } 502 503 void 504 filt_pppx_rdetach(struct knote *kn) 505 { 506 struct pppx_dev *pxd = (struct pppx_dev *)kn->kn_hook; 507 struct klist *klist = &pxd->pxd_rsel.si_note; 508 509 mtx_enter(&pxd->pxd_rsel_mtx); 510 klist_remove_locked(klist, kn); 511 mtx_leave(&pxd->pxd_rsel_mtx); 512 } 513 514 int 515 filt_pppx_read(struct knote *kn, long hint) 516 { 517 struct pppx_dev *pxd = (struct pppx_dev *)kn->kn_hook; 518 519 kn->kn_data = mq_hdatalen(&pxd->pxd_svcq); 520 521 return (kn->kn_data > 0); 522 } 523 524 void 525 filt_pppx_wdetach(struct knote *kn) 526 { 527 struct pppx_dev *pxd = (struct pppx_dev *)kn->kn_hook; 528 struct klist *klist = &pxd->pxd_wsel.si_note; 529 530 mtx_enter(&pxd->pxd_wsel_mtx); 531 klist_remove_locked(klist, kn); 532 mtx_leave(&pxd->pxd_wsel_mtx); 533 } 534 535 int 536 filt_pppx_write(struct knote *kn, long hint) 537 { 538 /* We're always ready to accept a write. */ 539 return (1); 540 } 541 542 int 543 pppxclose(dev_t dev, int flags, int mode, struct proc *p) 544 { 545 struct pppx_dev *pxd; 546 struct pppx_if *pxi; 547 548 pxd = pppx_dev_lookup(dev); 549 550 /* XXX */ 551 NET_LOCK(); 552 while ((pxi = LIST_FIRST(&pxd->pxd_pxis))) 553 pppx_if_destroy(pxd, pxi); 554 NET_UNLOCK(); 555 556 LIST_REMOVE(pxd, pxd_entry); 557 558 mq_purge(&pxd->pxd_svcq); 559 560 free(pxd, M_DEVBUF, sizeof(*pxd)); 561 562 return (0); 563 } 564 565 int 566 pppx_if_next_unit(void) 567 { 568 struct pppx_if *pxi; 569 int unit = 0; 570 571 /* this is safe without splnet since we're not modifying it */ 572 do { 573 int found = 0; 574 RBT_FOREACH(pxi, pppx_ifs, &pppx_ifs) { 575 if (pxi->pxi_unit == unit) { 576 found = 1; 577 break; 578 } 579 } 580 581 if (found == 0) 582 break; 583 unit++; 584 } while (unit > 0); 585 586 return (unit); 587 } 588 589 struct pppx_if * 590 pppx_if_find(struct pppx_dev *pxd, int session_id, int protocol) 591 { 592 struct pppx_if_key key; 593 struct pppx_if *pxi; 594 595 memset(&key, 0, sizeof(key)); 596 key.pxik_session_id = session_id; 597 key.pxik_protocol = protocol; 598 599 pxi = RBT_FIND(pppx_ifs, &pppx_ifs, (struct pppx_if *)&key); 600 if (pxi && pxi->pxi_ready == 0) 601 pxi = NULL; 602 603 return pxi; 604 } 605 606 int 607 pppx_add_session(struct pppx_dev *pxd, struct pipex_session_req *req) 608 { 609 struct pppx_if *pxi; 610 struct pipex_session *session; 611 struct ifnet *ifp; 612 int unit, error = 0; 613 struct in_ifaddr *ia; 614 struct sockaddr_in ifaddr; 615 616 /* 617 * XXX: As long as `session' is allocated as part of a `pxi' 618 * it isn't possible to free it separately. So disallow 619 * the timeout feature until this is fixed. 620 */ 621 if (req->pr_timeout_sec != 0) 622 return (EINVAL); 623 624 error = pipex_init_session(&session, req); 625 if (error) 626 return (error); 627 628 pxi = pool_get(&pppx_if_pl, PR_WAITOK | PR_ZERO); 629 ifp = &pxi->pxi_if; 630 631 pxi->pxi_session = session; 632 633 /* try to set the interface up */ 634 unit = pppx_if_next_unit(); 635 if (unit < 0) { 636 error = ENOMEM; 637 goto out; 638 } 639 640 pxi->pxi_unit = unit; 641 pxi->pxi_key.pxik_session_id = req->pr_session_id; 642 pxi->pxi_key.pxik_protocol = req->pr_protocol; 643 pxi->pxi_dev = pxd; 644 645 if (RBT_INSERT(pppx_ifs, &pppx_ifs, pxi) != NULL) { 646 error = EADDRINUSE; 647 goto out; 648 } 649 LIST_INSERT_HEAD(&pxd->pxd_pxis, pxi, pxi_list); 650 651 snprintf(ifp->if_xname, sizeof(ifp->if_xname), "%s%d", "pppx", unit); 652 ifp->if_mtu = req->pr_peer_mru; /* XXX */ 653 ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST | IFF_UP; 654 ifp->if_xflags = IFXF_CLONED | IFXF_MPSAFE; 655 ifp->if_qstart = pppx_if_qstart; 656 ifp->if_output = pppx_if_output; 657 ifp->if_ioctl = pppx_if_ioctl; 658 ifp->if_rtrequest = p2p_rtrequest; 659 ifp->if_type = IFT_PPP; 660 ifp->if_softc = pxi; 661 /* ifp->if_rdomain = req->pr_rdomain; */ 662 if_counters_alloc(ifp); 663 /* XXXSMP: be sure pppx_if_qstart() called with NET_LOCK held */ 664 ifq_set_maxlen(&ifp->if_snd, 1); 665 666 /* XXXSMP breaks atomicity */ 667 NET_UNLOCK(); 668 if_attach(ifp); 669 NET_LOCK(); 670 671 if_addgroup(ifp, "pppx"); 672 if_alloc_sadl(ifp); 673 674 #if NBPFILTER > 0 675 bpfattach(&ifp->if_bpf, ifp, DLT_LOOP, sizeof(u_int32_t)); 676 #endif 677 678 /* XXX ipv6 support? how does the caller indicate it wants ipv6 679 * instead of ipv4? 680 */ 681 memset(&ifaddr, 0, sizeof(ifaddr)); 682 ifaddr.sin_family = AF_INET; 683 ifaddr.sin_len = sizeof(ifaddr); 684 ifaddr.sin_addr = req->pr_ip_srcaddr; 685 686 ia = malloc(sizeof (*ia), M_IFADDR, M_WAITOK | M_ZERO); 687 688 ia->ia_addr.sin_family = AF_INET; 689 ia->ia_addr.sin_len = sizeof(struct sockaddr_in); 690 ia->ia_addr.sin_addr = req->pr_ip_srcaddr; 691 692 ia->ia_dstaddr.sin_family = AF_INET; 693 ia->ia_dstaddr.sin_len = sizeof(struct sockaddr_in); 694 ia->ia_dstaddr.sin_addr = req->pr_ip_address; 695 696 ia->ia_sockmask.sin_family = AF_INET; 697 ia->ia_sockmask.sin_len = sizeof(struct sockaddr_in); 698 ia->ia_sockmask.sin_addr = req->pr_ip_netmask; 699 700 ia->ia_ifa.ifa_addr = sintosa(&ia->ia_addr); 701 ia->ia_ifa.ifa_dstaddr = sintosa(&ia->ia_dstaddr); 702 ia->ia_ifa.ifa_netmask = sintosa(&ia->ia_sockmask); 703 ia->ia_ifa.ifa_ifp = ifp; 704 705 ia->ia_netmask = ia->ia_sockmask.sin_addr.s_addr; 706 707 error = in_ifinit(ifp, ia, &ifaddr, 1); 708 if (error) { 709 printf("pppx: unable to set addresses for %s, error=%d\n", 710 ifp->if_xname, error); 711 } else { 712 if_addrhooks_run(ifp); 713 } 714 715 error = pipex_link_session(session, ifp, pxd); 716 if (error) 717 goto detach; 718 719 SET(ifp->if_flags, IFF_RUNNING); 720 pxi->pxi_ready = 1; 721 722 return (error); 723 724 detach: 725 /* XXXSMP breaks atomicity */ 726 NET_UNLOCK(); 727 if_detach(ifp); 728 NET_LOCK(); 729 730 if (RBT_REMOVE(pppx_ifs, &pppx_ifs, pxi) == NULL) 731 panic("%s: inconsistent RB tree", __func__); 732 LIST_REMOVE(pxi, pxi_list); 733 out: 734 pool_put(&pppx_if_pl, pxi); 735 pipex_rele_session(session); 736 737 return (error); 738 } 739 740 int 741 pppx_del_session(struct pppx_dev *pxd, struct pipex_session_close_req *req) 742 { 743 struct pppx_if *pxi; 744 745 pxi = pppx_if_find(pxd, req->pcr_session_id, req->pcr_protocol); 746 if (pxi == NULL) 747 return (EINVAL); 748 749 pipex_export_session_stats(pxi->pxi_session, &req->pcr_stat); 750 pppx_if_destroy(pxd, pxi); 751 return (0); 752 } 753 754 int 755 pppx_set_session_descr(struct pppx_dev *pxd, 756 struct pipex_session_descr_req *req) 757 { 758 struct pppx_if *pxi; 759 760 pxi = pppx_if_find(pxd, req->pdr_session_id, req->pdr_protocol); 761 if (pxi == NULL) 762 return (EINVAL); 763 764 (void)memset(pxi->pxi_if.if_description, 0, IFDESCRSIZE); 765 strlcpy(pxi->pxi_if.if_description, req->pdr_descr, IFDESCRSIZE); 766 767 return (0); 768 } 769 770 void 771 pppx_if_destroy(struct pppx_dev *pxd, struct pppx_if *pxi) 772 { 773 struct ifnet *ifp; 774 struct pipex_session *session; 775 776 NET_ASSERT_LOCKED(); 777 session = pxi->pxi_session; 778 ifp = &pxi->pxi_if; 779 pxi->pxi_ready = 0; 780 CLR(ifp->if_flags, IFF_RUNNING); 781 782 pipex_unlink_session(session); 783 784 /* XXXSMP breaks atomicity */ 785 NET_UNLOCK(); 786 if_detach(ifp); 787 NET_LOCK(); 788 789 pipex_rele_session(session); 790 if (RBT_REMOVE(pppx_ifs, &pppx_ifs, pxi) == NULL) 791 panic("%s: inconsistent RB tree", __func__); 792 LIST_REMOVE(pxi, pxi_list); 793 794 pool_put(&pppx_if_pl, pxi); 795 } 796 797 void 798 pppx_if_qstart(struct ifqueue *ifq) 799 { 800 struct ifnet *ifp = ifq->ifq_if; 801 struct pppx_if *pxi = (struct pppx_if *)ifp->if_softc; 802 struct mbuf *m; 803 int proto; 804 805 NET_ASSERT_LOCKED(); 806 while ((m = ifq_dequeue(ifq)) != NULL) { 807 proto = *mtod(m, int *); 808 m_adj(m, sizeof(proto)); 809 810 pipex_ppp_output(m, pxi->pxi_session, proto); 811 } 812 } 813 814 int 815 pppx_if_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst, 816 struct rtentry *rt) 817 { 818 struct pppx_if *pxi = (struct pppx_if *)ifp->if_softc; 819 struct pppx_hdr *th; 820 int error = 0; 821 int proto; 822 823 NET_ASSERT_LOCKED(); 824 825 if (!ISSET(ifp->if_flags, IFF_RUNNING)) { 826 m_freem(m); 827 error = ENETDOWN; 828 goto out; 829 } 830 831 #if NBPFILTER > 0 832 if (ifp->if_bpf) 833 bpf_mtap_af(ifp->if_bpf, dst->sa_family, m, BPF_DIRECTION_OUT); 834 #endif 835 if (pipex_enable) { 836 switch (dst->sa_family) { 837 #ifdef INET6 838 case AF_INET6: 839 proto = PPP_IPV6; 840 break; 841 #endif 842 case AF_INET: 843 proto = PPP_IP; 844 break; 845 default: 846 m_freem(m); 847 error = EPFNOSUPPORT; 848 goto out; 849 } 850 } else 851 proto = htonl(dst->sa_family); 852 853 M_PREPEND(m, sizeof(int), M_DONTWAIT); 854 if (m == NULL) { 855 error = ENOBUFS; 856 goto out; 857 } 858 *mtod(m, int *) = proto; 859 860 if (pipex_enable) 861 error = if_enqueue(ifp, m); 862 else { 863 M_PREPEND(m, sizeof(struct pppx_hdr), M_DONTWAIT); 864 if (m == NULL) { 865 error = ENOBUFS; 866 goto out; 867 } 868 th = mtod(m, struct pppx_hdr *); 869 th->pppx_proto = 0; /* not used */ 870 th->pppx_id = pxi->pxi_session->ppp_id; 871 error = mq_enqueue(&pxi->pxi_dev->pxd_svcq, m); 872 if (error == 0) { 873 if (pxi->pxi_dev->pxd_waiting) { 874 wakeup((caddr_t)pxi->pxi_dev); 875 pxi->pxi_dev->pxd_waiting = 0; 876 } 877 selwakeup(&pxi->pxi_dev->pxd_rsel); 878 } 879 } 880 881 out: 882 if (error) 883 counters_inc(ifp->if_counters, ifc_oerrors); 884 return (error); 885 } 886 887 int 888 pppx_if_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr) 889 { 890 struct pppx_if *pxi = (struct pppx_if *)ifp->if_softc; 891 struct ifreq *ifr = (struct ifreq *)addr; 892 int error = 0; 893 894 switch (cmd) { 895 case SIOCSIFADDR: 896 break; 897 898 case SIOCSIFFLAGS: 899 break; 900 901 case SIOCADDMULTI: 902 case SIOCDELMULTI: 903 break; 904 905 case SIOCSIFMTU: 906 if (ifr->ifr_mtu < 512 || 907 ifr->ifr_mtu > pxi->pxi_session->peer_mru) 908 error = EINVAL; 909 else 910 ifp->if_mtu = ifr->ifr_mtu; 911 break; 912 913 default: 914 error = ENOTTY; 915 break; 916 } 917 918 return (error); 919 } 920 921 RBT_GENERATE(pppx_ifs, pppx_if, pxi_entry, pppx_if_cmp); 922 923 /* 924 * Locks used to protect struct members and global data 925 * I immutable after creation 926 * K kernel lock 927 * N net lock 928 */ 929 930 struct pppac_softc { 931 struct ifnet sc_if; 932 dev_t sc_dev; /* [I] */ 933 LIST_ENTRY(pppac_softc) 934 sc_entry; /* [K] */ 935 936 struct mutex sc_rsel_mtx; 937 struct selinfo sc_rsel; 938 struct mutex sc_wsel_mtx; 939 struct selinfo sc_wsel; 940 941 struct pipex_session 942 *sc_multicast_session; 943 944 struct mbuf_queue 945 sc_mq; 946 }; 947 948 LIST_HEAD(pppac_list, pppac_softc); /* [K] */ 949 950 static void filt_pppac_rdetach(struct knote *); 951 static int filt_pppac_read(struct knote *, long); 952 953 static const struct filterops pppac_rd_filtops = { 954 .f_flags = FILTEROP_ISFD, 955 .f_attach = NULL, 956 .f_detach = filt_pppac_rdetach, 957 .f_event = filt_pppac_read 958 }; 959 960 static void filt_pppac_wdetach(struct knote *); 961 static int filt_pppac_write(struct knote *, long); 962 963 static const struct filterops pppac_wr_filtops = { 964 .f_flags = FILTEROP_ISFD, 965 .f_attach = NULL, 966 .f_detach = filt_pppac_wdetach, 967 .f_event = filt_pppac_write 968 }; 969 970 static struct pppac_list pppac_devs = LIST_HEAD_INITIALIZER(pppac_devs); 971 972 static int pppac_ioctl(struct ifnet *, u_long, caddr_t); 973 974 static int pppac_add_session(struct pppac_softc *, 975 struct pipex_session_req *); 976 static int pppac_del_session(struct pppac_softc *, 977 struct pipex_session_close_req *); 978 static int pppac_output(struct ifnet *, struct mbuf *, struct sockaddr *, 979 struct rtentry *); 980 static void pppac_qstart(struct ifqueue *); 981 982 static inline struct pppac_softc * 983 pppac_lookup(dev_t dev) 984 { 985 struct pppac_softc *sc; 986 987 LIST_FOREACH(sc, &pppac_devs, sc_entry) { 988 if (sc->sc_dev == dev) 989 return (sc); 990 } 991 992 return (NULL); 993 } 994 995 void 996 pppacattach(int n) 997 { 998 pipex_init(); /* to be sure, to be sure */ 999 } 1000 1001 int 1002 pppacopen(dev_t dev, int flags, int mode, struct proc *p) 1003 { 1004 struct pppac_softc *sc; 1005 struct ifnet *ifp; 1006 struct pipex_session *session; 1007 1008 sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO); 1009 if (pppac_lookup(dev) != NULL) { 1010 free(sc, M_DEVBUF, sizeof(*sc)); 1011 return (EBUSY); 1012 } 1013 1014 /* virtual pipex_session entry for multicast */ 1015 session = pool_get(&pipex_session_pool, PR_WAITOK | PR_ZERO); 1016 session->is_multicast = 1; 1017 session->ownersc = sc; 1018 sc->sc_multicast_session = session; 1019 1020 sc->sc_dev = dev; 1021 1022 mtx_init(&sc->sc_rsel_mtx, IPL_SOFTNET); 1023 mtx_init(&sc->sc_wsel_mtx, IPL_SOFTNET); 1024 mq_init(&sc->sc_mq, IFQ_MAXLEN, IPL_SOFTNET); 1025 1026 LIST_INSERT_HEAD(&pppac_devs, sc, sc_entry); 1027 1028 ifp = &sc->sc_if; 1029 snprintf(ifp->if_xname, sizeof(ifp->if_xname), "pppac%u", minor(dev)); 1030 1031 ifp->if_softc = sc; 1032 ifp->if_type = IFT_L3IPVLAN; 1033 ifp->if_hdrlen = sizeof(uint32_t); /* for BPF */; 1034 ifp->if_mtu = MAXMCLBYTES - sizeof(uint32_t); 1035 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST; 1036 ifp->if_xflags = IFXF_CLONED | IFXF_MPSAFE; 1037 ifp->if_rtrequest = p2p_rtrequest; /* XXX */ 1038 ifp->if_output = pppac_output; 1039 ifp->if_qstart = pppac_qstart; 1040 ifp->if_ioctl = pppac_ioctl; 1041 /* XXXSMP: be sure pppac_qstart() called with NET_LOCK held */ 1042 ifq_set_maxlen(&ifp->if_snd, 1); 1043 1044 if_counters_alloc(ifp); 1045 if_attach(ifp); 1046 if_alloc_sadl(ifp); 1047 1048 #if NBPFILTER > 0 1049 bpfattach(&ifp->if_bpf, ifp, DLT_LOOP, sizeof(uint32_t)); 1050 #endif 1051 1052 return (0); 1053 } 1054 1055 int 1056 pppacread(dev_t dev, struct uio *uio, int ioflag) 1057 { 1058 struct pppac_softc *sc = pppac_lookup(dev); 1059 struct ifnet *ifp = &sc->sc_if; 1060 struct mbuf *m0, *m; 1061 int error = 0; 1062 size_t len; 1063 1064 if (!ISSET(ifp->if_flags, IFF_RUNNING)) 1065 return (EHOSTDOWN); 1066 1067 m0 = mq_dequeue(&sc->sc_mq); 1068 if (m0 == NULL) { 1069 if (ISSET(ioflag, IO_NDELAY)) 1070 return (EWOULDBLOCK); 1071 1072 do { 1073 error = tsleep_nsec(sc, (PZERO + 1)|PCATCH, 1074 "pppacrd", INFSLP); 1075 if (error != 0) 1076 return (error); 1077 1078 m0 = mq_dequeue(&sc->sc_mq); 1079 } while (m0 == NULL); 1080 } 1081 1082 m = m0; 1083 while (uio->uio_resid > 0) { 1084 len = ulmin(uio->uio_resid, m->m_len); 1085 if (len != 0) { 1086 error = uiomove(mtod(m, caddr_t), len, uio); 1087 if (error != 0) 1088 break; 1089 } 1090 1091 m = m->m_next; 1092 if (m == NULL) 1093 break; 1094 } 1095 m_freem(m0); 1096 1097 return (error); 1098 } 1099 1100 int 1101 pppacwrite(dev_t dev, struct uio *uio, int ioflag) 1102 { 1103 struct pppac_softc *sc = pppac_lookup(dev); 1104 struct ifnet *ifp = &sc->sc_if; 1105 uint32_t proto; 1106 int error; 1107 struct mbuf *m; 1108 1109 if (!ISSET(ifp->if_flags, IFF_RUNNING)) 1110 return (EHOSTDOWN); 1111 1112 if (uio->uio_resid < ifp->if_hdrlen || uio->uio_resid > MAXMCLBYTES) 1113 return (EMSGSIZE); 1114 1115 m = m_gethdr(M_DONTWAIT, MT_DATA); 1116 if (m == NULL) 1117 return (ENOMEM); 1118 1119 if (uio->uio_resid > MHLEN) { 1120 m_clget(m, M_WAITOK, uio->uio_resid); 1121 if (!ISSET(m->m_flags, M_EXT)) { 1122 m_free(m); 1123 return (ENOMEM); 1124 } 1125 } 1126 1127 m->m_pkthdr.len = m->m_len = uio->uio_resid; 1128 1129 error = uiomove(mtod(m, void *), m->m_len, uio); 1130 if (error != 0) { 1131 m_freem(m); 1132 return (error); 1133 } 1134 1135 #if NBPFILTER > 0 1136 if (ifp->if_bpf) 1137 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 1138 #endif 1139 1140 /* strip the tunnel header */ 1141 proto = ntohl(*mtod(m, uint32_t *)); 1142 m_adj(m, sizeof(uint32_t)); 1143 1144 m->m_flags &= ~(M_MCAST|M_BCAST); 1145 m->m_pkthdr.ph_ifidx = ifp->if_index; 1146 m->m_pkthdr.ph_rtableid = ifp->if_rdomain; 1147 1148 #if NPF > 0 1149 pf_pkt_addr_changed(m); 1150 #endif 1151 1152 counters_pkt(ifp->if_counters, 1153 ifc_ipackets, ifc_ibytes, m->m_pkthdr.len); 1154 1155 NET_LOCK(); 1156 1157 switch (proto) { 1158 case AF_INET: 1159 ipv4_input(ifp, m); 1160 break; 1161 #ifdef INET6 1162 case AF_INET6: 1163 ipv6_input(ifp, m); 1164 break; 1165 #endif 1166 default: 1167 m_freem(m); 1168 error = EAFNOSUPPORT; 1169 break; 1170 } 1171 1172 NET_UNLOCK(); 1173 1174 return (error); 1175 } 1176 1177 int 1178 pppacioctl(dev_t dev, u_long cmd, caddr_t data, int flags, struct proc *p) 1179 { 1180 struct pppac_softc *sc = pppac_lookup(dev); 1181 int error = 0; 1182 1183 NET_LOCK(); 1184 switch (cmd) { 1185 case FIONBIO: 1186 break; 1187 case FIONREAD: 1188 *(int *)data = mq_hdatalen(&sc->sc_mq); 1189 break; 1190 1191 case PIPEXASESSION: 1192 error = pppac_add_session(sc, (struct pipex_session_req *)data); 1193 break; 1194 case PIPEXDSESSION: 1195 error = pppac_del_session(sc, 1196 (struct pipex_session_close_req *)data); 1197 break; 1198 default: 1199 error = pipex_ioctl(sc, cmd, data); 1200 break; 1201 } 1202 NET_UNLOCK(); 1203 1204 return (error); 1205 } 1206 1207 int 1208 pppacpoll(dev_t dev, int events, struct proc *p) 1209 { 1210 struct pppac_softc *sc = pppac_lookup(dev); 1211 int revents = 0; 1212 1213 if (events & (POLLIN | POLLRDNORM)) { 1214 if (!mq_empty(&sc->sc_mq)) 1215 revents |= events & (POLLIN | POLLRDNORM); 1216 } 1217 if (events & (POLLOUT | POLLWRNORM)) 1218 revents |= events & (POLLOUT | POLLWRNORM); 1219 1220 if (revents == 0) { 1221 if (events & (POLLIN | POLLRDNORM)) 1222 selrecord(p, &sc->sc_rsel); 1223 } 1224 1225 return (revents); 1226 } 1227 1228 int 1229 pppackqfilter(dev_t dev, struct knote *kn) 1230 { 1231 struct pppac_softc *sc = pppac_lookup(dev); 1232 struct mutex *mtx; 1233 struct klist *klist; 1234 1235 switch (kn->kn_filter) { 1236 case EVFILT_READ: 1237 mtx = &sc->sc_rsel_mtx; 1238 klist = &sc->sc_rsel.si_note; 1239 kn->kn_fop = &pppac_rd_filtops; 1240 break; 1241 case EVFILT_WRITE: 1242 mtx = &sc->sc_wsel_mtx; 1243 klist = &sc->sc_wsel.si_note; 1244 kn->kn_fop = &pppac_wr_filtops; 1245 break; 1246 default: 1247 return (EINVAL); 1248 } 1249 1250 kn->kn_hook = sc; 1251 1252 mtx_enter(mtx); 1253 klist_insert_locked(klist, kn); 1254 mtx_leave(mtx); 1255 1256 return (0); 1257 } 1258 1259 static void 1260 filt_pppac_rdetach(struct knote *kn) 1261 { 1262 struct pppac_softc *sc = kn->kn_hook; 1263 struct klist *klist = &sc->sc_rsel.si_note; 1264 1265 mtx_enter(&sc->sc_rsel_mtx); 1266 klist_remove_locked(klist, kn); 1267 mtx_leave(&sc->sc_rsel_mtx); 1268 } 1269 1270 static int 1271 filt_pppac_read(struct knote *kn, long hint) 1272 { 1273 struct pppac_softc *sc = kn->kn_hook; 1274 1275 kn->kn_data = mq_hdatalen(&sc->sc_mq); 1276 1277 return (kn->kn_data > 0); 1278 } 1279 1280 static void 1281 filt_pppac_wdetach(struct knote *kn) 1282 { 1283 struct pppac_softc *sc = kn->kn_hook; 1284 struct klist *klist = &sc->sc_wsel.si_note; 1285 1286 mtx_enter(&sc->sc_wsel_mtx); 1287 klist_remove_locked(klist, kn); 1288 mtx_leave(&sc->sc_wsel_mtx); 1289 } 1290 1291 static int 1292 filt_pppac_write(struct knote *kn, long hint) 1293 { 1294 /* We're always ready to accept a write. */ 1295 return (1); 1296 } 1297 1298 int 1299 pppacclose(dev_t dev, int flags, int mode, struct proc *p) 1300 { 1301 struct pppac_softc *sc = pppac_lookup(dev); 1302 struct ifnet *ifp = &sc->sc_if; 1303 int s; 1304 1305 NET_LOCK(); 1306 CLR(ifp->if_flags, IFF_RUNNING); 1307 NET_UNLOCK(); 1308 1309 if_detach(ifp); 1310 1311 s = splhigh(); 1312 klist_invalidate(&sc->sc_rsel.si_note); 1313 klist_invalidate(&sc->sc_wsel.si_note); 1314 splx(s); 1315 1316 pool_put(&pipex_session_pool, sc->sc_multicast_session); 1317 NET_LOCK(); 1318 pipex_destroy_all_sessions(sc); 1319 NET_UNLOCK(); 1320 1321 LIST_REMOVE(sc, sc_entry); 1322 free(sc, M_DEVBUF, sizeof(*sc)); 1323 1324 return (0); 1325 } 1326 1327 static int 1328 pppac_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1329 { 1330 /* struct ifreq *ifr = (struct ifreq *)data; */ 1331 int error = 0; 1332 1333 switch (cmd) { 1334 case SIOCSIFADDR: 1335 SET(ifp->if_flags, IFF_UP); /* XXX cry cry */ 1336 /* FALLTHROUGH */ 1337 case SIOCSIFFLAGS: 1338 if (ISSET(ifp->if_flags, IFF_UP)) 1339 SET(ifp->if_flags, IFF_RUNNING); 1340 else 1341 CLR(ifp->if_flags, IFF_RUNNING); 1342 break; 1343 case SIOCSIFMTU: 1344 break; 1345 case SIOCADDMULTI: 1346 case SIOCDELMULTI: 1347 /* XXX */ 1348 break; 1349 1350 default: 1351 error = ENOTTY; 1352 break; 1353 } 1354 1355 return (error); 1356 } 1357 1358 static int 1359 pppac_add_session(struct pppac_softc *sc, struct pipex_session_req *req) 1360 { 1361 int error; 1362 struct pipex_session *session; 1363 1364 error = pipex_init_session(&session, req); 1365 if (error != 0) 1366 return (error); 1367 error = pipex_link_session(session, &sc->sc_if, sc); 1368 if (error != 0) 1369 pipex_rele_session(session); 1370 1371 return (error); 1372 } 1373 1374 static int 1375 pppac_del_session(struct pppac_softc *sc, struct pipex_session_close_req *req) 1376 { 1377 struct pipex_session *session; 1378 1379 session = pipex_lookup_by_session_id(req->pcr_protocol, 1380 req->pcr_session_id); 1381 if (session == NULL || session->ownersc != sc) 1382 return (EINVAL); 1383 pipex_unlink_session(session); 1384 pipex_rele_session(session); 1385 1386 return (0); 1387 } 1388 1389 static int 1390 pppac_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst, 1391 struct rtentry *rt) 1392 { 1393 int error; 1394 1395 if (!ISSET(ifp->if_flags, IFF_RUNNING)) { 1396 error = EHOSTDOWN; 1397 goto drop; 1398 } 1399 1400 switch (dst->sa_family) { 1401 case AF_INET: 1402 #ifdef INET6 1403 case AF_INET6: 1404 #endif 1405 break; 1406 default: 1407 error = EAFNOSUPPORT; 1408 goto drop; 1409 } 1410 1411 m->m_pkthdr.ph_family = dst->sa_family; 1412 1413 return (if_enqueue(ifp, m)); 1414 1415 drop: 1416 m_freem(m); 1417 return (error); 1418 } 1419 1420 static void 1421 pppac_qstart(struct ifqueue *ifq) 1422 { 1423 struct ifnet *ifp = ifq->ifq_if; 1424 struct pppac_softc *sc = ifp->if_softc; 1425 struct mbuf *m, *m0; 1426 struct pipex_session *session; 1427 struct ip ip; 1428 int rv; 1429 1430 NET_ASSERT_LOCKED(); 1431 while ((m = ifq_dequeue(ifq)) != NULL) { 1432 #if NBPFILTER > 0 1433 if (ifp->if_bpf) { 1434 bpf_mtap_af(ifp->if_bpf, m->m_pkthdr.ph_family, m, 1435 BPF_DIRECTION_OUT); 1436 } 1437 #endif 1438 1439 switch (m->m_pkthdr.ph_family) { 1440 case AF_INET: 1441 if (m->m_pkthdr.len < sizeof(struct ip)) 1442 goto bad; 1443 m_copydata(m, 0, sizeof(struct ip), &ip); 1444 if (IN_MULTICAST(ip.ip_dst.s_addr)) { 1445 /* pass a copy to pipex */ 1446 m0 = m_copym(m, 0, M_COPYALL, M_NOWAIT); 1447 if (m0 != NULL) 1448 pipex_ip_output(m0, 1449 sc->sc_multicast_session); 1450 else 1451 goto bad; 1452 } else { 1453 session = pipex_lookup_by_ip_address(ip.ip_dst); 1454 if (session != NULL) { 1455 pipex_ip_output(m, session); 1456 m = NULL; 1457 } 1458 } 1459 break; 1460 } 1461 if (m == NULL) /* handled by pipex */ 1462 continue; 1463 1464 m = m_prepend(m, sizeof(uint32_t), M_DONTWAIT); 1465 if (m == NULL) 1466 goto bad; 1467 *mtod(m, uint32_t *) = htonl(m->m_pkthdr.ph_family); 1468 1469 rv = mq_enqueue(&sc->sc_mq, m); 1470 if (rv == 1) 1471 counters_inc(ifp->if_counters, ifc_collisions); 1472 continue; 1473 bad: 1474 counters_inc(ifp->if_counters, ifc_oerrors); 1475 if (m != NULL) 1476 m_freem(m); 1477 continue; 1478 } 1479 1480 if (!mq_empty(&sc->sc_mq)) { 1481 wakeup(sc); 1482 selwakeup(&sc->sc_rsel); 1483 } 1484 } 1485