1 /* $OpenBSD: if_pppx.c,v 1.128 2023/12/23 10:52:54 bluhm Exp $ */ 2 3 /* 4 * Copyright (c) 2010 Claudio Jeker <claudio@openbsd.org> 5 * Copyright (c) 2010 David Gwynne <dlg@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /*- 21 * Copyright (c) 2009 Internet Initiative Japan Inc. 22 * All rights reserved. 23 * 24 * Redistribution and use in source and binary forms, with or without 25 * modification, are permitted provided that the following conditions 26 * are met: 27 * 1. Redistributions of source code must retain the above copyright 28 * notice, this list of conditions and the following disclaimer. 29 * 2. Redistributions in binary form must reproduce the above copyright 30 * notice, this list of conditions and the following disclaimer in the 31 * documentation and/or other materials provided with the distribution. 32 * 33 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 34 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 36 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 37 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 38 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 39 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 41 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 42 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 43 * SUCH DAMAGE. 44 */ 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/buf.h> 48 #include <sys/kernel.h> 49 #include <sys/malloc.h> 50 #include <sys/device.h> 51 #include <sys/conf.h> 52 #include <sys/queue.h> 53 #include <sys/pool.h> 54 #include <sys/mbuf.h> 55 #include <sys/errno.h> 56 #include <sys/socket.h> 57 #include <sys/ioctl.h> 58 #include <sys/vnode.h> 59 #include <sys/event.h> 60 #include <sys/mutex.h> 61 #include <sys/refcnt.h> 62 63 #include <net/if.h> 64 #include <net/if_types.h> 65 #include <netinet/in.h> 66 #include <netinet/if_ether.h> 67 #include <net/if_dl.h> 68 69 #include <netinet/in_var.h> 70 #include <netinet/ip.h> 71 #include <netinet/ip_var.h> 72 73 #ifdef INET6 74 #include <netinet6/in6_var.h> 75 #include <netinet/ip6.h> 76 #include <netinet6/nd6.h> 77 #endif /* INET6 */ 78 79 #include "bpfilter.h" 80 #if NBPFILTER > 0 81 #include <net/bpf.h> 82 #endif 83 84 #include "pf.h" 85 #if NPF > 0 86 #include <net/pfvar.h> 87 #endif 88 89 #include <net/ppp_defs.h> 90 #include <net/ppp-comp.h> 91 #include <crypto/arc4.h> 92 93 #ifdef PIPEX 94 #include <net/radix.h> 95 #include <net/pipex.h> 96 #include <net/pipex_local.h> 97 #else 98 #error PIPEX option not enabled 99 #endif 100 101 #ifdef PPPX_DEBUG 102 #define PPPX_D_INIT (1<<0) 103 104 int pppxdebug = 0; 105 106 #define DPRINTF(_m, _p...) do { \ 107 if (ISSET(pppxdebug, (_m))) \ 108 printf(_p); \ 109 } while (0) 110 #else 111 #define DPRINTF(_m, _p...) /* _m, _p */ 112 #endif 113 114 115 struct pppx_if; 116 117 /* 118 * Locks used to protect struct members and global data 119 * I immutable after creation 120 * K kernel lock 121 * N net lock 122 * m pxd_mtx 123 */ 124 125 struct pppx_dev { 126 LIST_ENTRY(pppx_dev) pxd_entry; /* [K] */ 127 int pxd_unit; /* [I] */ 128 129 /* kq shizz */ 130 struct mutex pxd_mtx; 131 struct klist pxd_rklist; /* [m] */ 132 struct klist pxd_wklist; /* [m] */ 133 134 /* queue of packets for userland to service - protected by splnet */ 135 struct mbuf_queue pxd_svcq; 136 int pxd_waiting; /* [N] */ 137 LIST_HEAD(,pppx_if) pxd_pxis; /* [K] */ 138 }; 139 140 LIST_HEAD(, pppx_dev) pppx_devs = 141 LIST_HEAD_INITIALIZER(pppx_devs); /* [K] */ 142 struct pool pppx_if_pl; 143 144 struct pppx_dev *pppx_dev_lookup(dev_t); 145 struct pppx_dev *pppx_dev2pxd(dev_t); 146 147 struct pppx_if_key { 148 int pxik_session_id; /* [I] */ 149 int pxik_protocol; /* [I] */ 150 }; 151 152 struct pppx_if { 153 struct pppx_if_key pxi_key; /* [I] must be first 154 in the struct */ 155 struct refcnt pxi_refcnt; 156 157 RBT_ENTRY(pppx_if) pxi_entry; /* [K] */ 158 LIST_ENTRY(pppx_if) pxi_list; /* [K] */ 159 160 int pxi_ready; /* [K] */ 161 162 int pxi_unit; /* [I] */ 163 struct ifnet pxi_if; 164 struct pppx_dev *pxi_dev; /* [I] */ 165 struct pipex_session *pxi_session; /* [I] */ 166 }; 167 168 static inline int 169 pppx_if_cmp(const struct pppx_if *a, const struct pppx_if *b) 170 { 171 return memcmp(&a->pxi_key, &b->pxi_key, sizeof(a->pxi_key)); 172 } 173 174 RBT_HEAD(pppx_ifs, pppx_if) pppx_ifs = RBT_INITIALIZER(&pppx_ifs); /* [N] */ 175 RBT_PROTOTYPE(pppx_ifs, pppx_if, pxi_entry, pppx_if_cmp); 176 177 int pppx_if_next_unit(void); 178 struct pppx_if *pppx_if_find_locked(struct pppx_dev *, int, int); 179 static inline struct pppx_if *pppx_if_find(struct pppx_dev *, int, int); 180 static inline void pppx_if_rele(struct pppx_if *); 181 int pppx_add_session(struct pppx_dev *, 182 struct pipex_session_req *); 183 int pppx_del_session(struct pppx_dev *, 184 struct pipex_session_close_req *); 185 int pppx_set_session_descr(struct pppx_dev *, 186 struct pipex_session_descr_req *); 187 188 void pppx_if_destroy(struct pppx_dev *, struct pppx_if *); 189 void pppx_if_qstart(struct ifqueue *); 190 int pppx_if_output(struct ifnet *, struct mbuf *, 191 struct sockaddr *, struct rtentry *); 192 int pppx_if_ioctl(struct ifnet *, u_long, caddr_t); 193 194 195 void pppxattach(int); 196 197 void filt_pppx_rdetach(struct knote *); 198 int filt_pppx_read(struct knote *, long); 199 int filt_pppx_modify(struct kevent *, struct knote *); 200 int filt_pppx_process(struct knote *, struct kevent *); 201 202 const struct filterops pppx_rd_filtops = { 203 .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE, 204 .f_attach = NULL, 205 .f_detach = filt_pppx_rdetach, 206 .f_event = filt_pppx_read, 207 .f_modify = filt_pppx_modify, 208 .f_process = filt_pppx_process, 209 }; 210 211 void filt_pppx_wdetach(struct knote *); 212 int filt_pppx_write(struct knote *, long); 213 214 const struct filterops pppx_wr_filtops = { 215 .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE, 216 .f_attach = NULL, 217 .f_detach = filt_pppx_wdetach, 218 .f_event = filt_pppx_write, 219 .f_modify = filt_pppx_modify, 220 .f_process = filt_pppx_process, 221 }; 222 223 struct pppx_dev * 224 pppx_dev_lookup(dev_t dev) 225 { 226 struct pppx_dev *pxd; 227 int unit = minor(dev); 228 229 LIST_FOREACH(pxd, &pppx_devs, pxd_entry) { 230 if (pxd->pxd_unit == unit) 231 return (pxd); 232 } 233 234 return (NULL); 235 } 236 237 struct pppx_dev * 238 pppx_dev2pxd(dev_t dev) 239 { 240 struct pppx_dev *pxd; 241 242 pxd = pppx_dev_lookup(dev); 243 244 return (pxd); 245 } 246 247 void 248 pppxattach(int n) 249 { 250 pool_init(&pppx_if_pl, sizeof(struct pppx_if), 0, IPL_NONE, 251 PR_WAITOK, "pppxif", NULL); 252 pipex_init(); 253 } 254 255 int 256 pppxopen(dev_t dev, int flags, int mode, struct proc *p) 257 { 258 struct pppx_dev *pxd; 259 260 pxd = malloc(sizeof(*pxd), M_DEVBUF, M_WAITOK | M_ZERO); 261 if (pppx_dev_lookup(dev) != NULL) { 262 free(pxd, M_DEVBUF, sizeof(*pxd)); 263 return (EBUSY); 264 } 265 266 pxd->pxd_unit = minor(dev); 267 mtx_init(&pxd->pxd_mtx, IPL_NET); 268 klist_init_mutex(&pxd->pxd_rklist, &pxd->pxd_mtx); 269 klist_init_mutex(&pxd->pxd_wklist, &pxd->pxd_mtx); 270 LIST_INIT(&pxd->pxd_pxis); 271 272 mq_init(&pxd->pxd_svcq, 128, IPL_NET); 273 LIST_INSERT_HEAD(&pppx_devs, pxd, pxd_entry); 274 275 return 0; 276 } 277 278 int 279 pppxread(dev_t dev, struct uio *uio, int ioflag) 280 { 281 struct pppx_dev *pxd = pppx_dev2pxd(dev); 282 struct mbuf *m, *m0; 283 int error = 0; 284 size_t len; 285 286 if (!pxd) 287 return (ENXIO); 288 289 while ((m0 = mq_dequeue(&pxd->pxd_svcq)) == NULL) { 290 if (ISSET(ioflag, IO_NDELAY)) 291 return (EWOULDBLOCK); 292 293 NET_LOCK(); 294 pxd->pxd_waiting = 1; 295 error = rwsleep_nsec(pxd, &netlock, 296 (PZERO + 1)|PCATCH, "pppxread", INFSLP); 297 NET_UNLOCK(); 298 if (error != 0) { 299 return (error); 300 } 301 } 302 303 while (m0 != NULL && uio->uio_resid > 0 && error == 0) { 304 len = ulmin(uio->uio_resid, m0->m_len); 305 if (len != 0) 306 error = uiomove(mtod(m0, caddr_t), len, uio); 307 m = m_free(m0); 308 m0 = m; 309 } 310 311 m_freem(m0); 312 313 return (error); 314 } 315 316 int 317 pppxwrite(dev_t dev, struct uio *uio, int ioflag) 318 { 319 struct pppx_dev *pxd = pppx_dev2pxd(dev); 320 struct pppx_hdr *th; 321 struct pppx_if *pxi; 322 uint32_t proto; 323 struct mbuf *top, **mp, *m; 324 int tlen; 325 int error = 0; 326 size_t mlen; 327 328 if (uio->uio_resid < sizeof(*th) + sizeof(uint32_t) || 329 uio->uio_resid > MCLBYTES) 330 return (EMSGSIZE); 331 332 tlen = uio->uio_resid; 333 334 MGETHDR(m, M_DONTWAIT, MT_DATA); 335 if (m == NULL) 336 return (ENOBUFS); 337 mlen = MHLEN; 338 if (uio->uio_resid > MHLEN) { 339 MCLGET(m, M_DONTWAIT); 340 if (!(m->m_flags & M_EXT)) { 341 m_free(m); 342 return (ENOBUFS); 343 } 344 mlen = MCLBYTES; 345 } 346 347 top = NULL; 348 mp = ⊤ 349 350 while (error == 0 && uio->uio_resid > 0) { 351 m->m_len = ulmin(mlen, uio->uio_resid); 352 error = uiomove(mtod (m, caddr_t), m->m_len, uio); 353 *mp = m; 354 mp = &m->m_next; 355 if (error == 0 && uio->uio_resid > 0) { 356 MGET(m, M_DONTWAIT, MT_DATA); 357 if (m == NULL) { 358 error = ENOBUFS; 359 break; 360 } 361 mlen = MLEN; 362 if (uio->uio_resid >= MINCLSIZE) { 363 MCLGET(m, M_DONTWAIT); 364 if (!(m->m_flags & M_EXT)) { 365 error = ENOBUFS; 366 m_free(m); 367 break; 368 } 369 mlen = MCLBYTES; 370 } 371 } 372 } 373 374 if (error) { 375 m_freem(top); 376 return (error); 377 } 378 379 top->m_pkthdr.len = tlen; 380 381 /* Find the interface */ 382 th = mtod(top, struct pppx_hdr *); 383 m_adj(top, sizeof(struct pppx_hdr)); 384 385 pxi = pppx_if_find(pxd, th->pppx_id, th->pppx_proto); 386 if (pxi == NULL) { 387 m_freem(top); 388 return (EINVAL); 389 } 390 top->m_pkthdr.ph_ifidx = pxi->pxi_if.if_index; 391 392 #if NBPFILTER > 0 393 if (pxi->pxi_if.if_bpf) 394 bpf_mtap(pxi->pxi_if.if_bpf, top, BPF_DIRECTION_IN); 395 #endif 396 /* strip the tunnel header */ 397 proto = ntohl(*(uint32_t *)(th + 1)); 398 m_adj(top, sizeof(uint32_t)); 399 400 NET_LOCK(); 401 402 switch (proto) { 403 case AF_INET: 404 ipv4_input(&pxi->pxi_if, top); 405 break; 406 #ifdef INET6 407 case AF_INET6: 408 ipv6_input(&pxi->pxi_if, top); 409 break; 410 #endif 411 default: 412 m_freem(top); 413 error = EAFNOSUPPORT; 414 break; 415 } 416 417 NET_UNLOCK(); 418 419 pppx_if_rele(pxi); 420 421 return (error); 422 } 423 424 int 425 pppxioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) 426 { 427 struct pppx_dev *pxd = pppx_dev2pxd(dev); 428 int error = 0; 429 430 switch (cmd) { 431 case PIPEXASESSION: 432 error = pppx_add_session(pxd, 433 (struct pipex_session_req *)addr); 434 break; 435 436 case PIPEXDSESSION: 437 error = pppx_del_session(pxd, 438 (struct pipex_session_close_req *)addr); 439 break; 440 441 case PIPEXSIFDESCR: 442 error = pppx_set_session_descr(pxd, 443 (struct pipex_session_descr_req *)addr); 444 break; 445 446 case FIONBIO: 447 break; 448 case FIONREAD: 449 *(int *)addr = mq_hdatalen(&pxd->pxd_svcq); 450 break; 451 452 default: 453 error = pipex_ioctl(pxd, cmd, addr); 454 break; 455 } 456 457 return (error); 458 } 459 460 int 461 pppxkqfilter(dev_t dev, struct knote *kn) 462 { 463 struct pppx_dev *pxd = pppx_dev2pxd(dev); 464 struct klist *klist; 465 466 switch (kn->kn_filter) { 467 case EVFILT_READ: 468 klist = &pxd->pxd_rklist; 469 kn->kn_fop = &pppx_rd_filtops; 470 break; 471 case EVFILT_WRITE: 472 klist = &pxd->pxd_wklist; 473 kn->kn_fop = &pppx_wr_filtops; 474 break; 475 default: 476 return (EINVAL); 477 } 478 479 kn->kn_hook = pxd; 480 481 klist_insert(klist, kn); 482 483 return (0); 484 } 485 486 void 487 filt_pppx_rdetach(struct knote *kn) 488 { 489 struct pppx_dev *pxd = kn->kn_hook; 490 491 klist_remove(&pxd->pxd_rklist, kn); 492 } 493 494 int 495 filt_pppx_read(struct knote *kn, long hint) 496 { 497 struct pppx_dev *pxd = kn->kn_hook; 498 499 MUTEX_ASSERT_LOCKED(&pxd->pxd_mtx); 500 501 kn->kn_data = mq_hdatalen(&pxd->pxd_svcq); 502 503 return (kn->kn_data > 0); 504 } 505 506 void 507 filt_pppx_wdetach(struct knote *kn) 508 { 509 struct pppx_dev *pxd = kn->kn_hook; 510 511 klist_remove(&pxd->pxd_wklist, kn); 512 } 513 514 int 515 filt_pppx_write(struct knote *kn, long hint) 516 { 517 /* We're always ready to accept a write. */ 518 return (1); 519 } 520 521 int 522 filt_pppx_modify(struct kevent *kev, struct knote *kn) 523 { 524 struct pppx_dev *pxd = kn->kn_hook; 525 int active; 526 527 mtx_enter(&pxd->pxd_mtx); 528 active = knote_modify(kev, kn); 529 mtx_leave(&pxd->pxd_mtx); 530 531 return (active); 532 } 533 534 int 535 filt_pppx_process(struct knote *kn, struct kevent *kev) 536 { 537 struct pppx_dev *pxd = kn->kn_hook; 538 int active; 539 540 mtx_enter(&pxd->pxd_mtx); 541 active = knote_process(kn, kev); 542 mtx_leave(&pxd->pxd_mtx); 543 544 return (active); 545 } 546 547 int 548 pppxclose(dev_t dev, int flags, int mode, struct proc *p) 549 { 550 struct pppx_dev *pxd; 551 struct pppx_if *pxi; 552 553 pxd = pppx_dev_lookup(dev); 554 555 while ((pxi = LIST_FIRST(&pxd->pxd_pxis))) { 556 pxi->pxi_ready = 0; 557 pppx_if_destroy(pxd, pxi); 558 } 559 560 LIST_REMOVE(pxd, pxd_entry); 561 562 mq_purge(&pxd->pxd_svcq); 563 564 klist_free(&pxd->pxd_rklist); 565 klist_free(&pxd->pxd_rklist); 566 567 free(pxd, M_DEVBUF, sizeof(*pxd)); 568 569 return (0); 570 } 571 572 int 573 pppx_if_next_unit(void) 574 { 575 struct pppx_if *pxi; 576 int unit = 0; 577 578 /* this is safe without splnet since we're not modifying it */ 579 do { 580 int found = 0; 581 RBT_FOREACH(pxi, pppx_ifs, &pppx_ifs) { 582 if (pxi->pxi_unit == unit) { 583 found = 1; 584 break; 585 } 586 } 587 588 if (found == 0) 589 break; 590 unit++; 591 } while (unit > 0); 592 593 return (unit); 594 } 595 596 struct pppx_if * 597 pppx_if_find_locked(struct pppx_dev *pxd, int session_id, int protocol) 598 { 599 struct pppx_if_key key; 600 struct pppx_if *pxi; 601 602 memset(&key, 0, sizeof(key)); 603 key.pxik_session_id = session_id; 604 key.pxik_protocol = protocol; 605 606 pxi = RBT_FIND(pppx_ifs, &pppx_ifs, (struct pppx_if *)&key); 607 if (pxi && pxi->pxi_ready == 0) 608 pxi = NULL; 609 610 return pxi; 611 } 612 613 static inline struct pppx_if * 614 pppx_if_find(struct pppx_dev *pxd, int session_id, int protocol) 615 { 616 struct pppx_if *pxi; 617 618 if ((pxi = pppx_if_find_locked(pxd, session_id, protocol))) 619 refcnt_take(&pxi->pxi_refcnt); 620 621 return pxi; 622 } 623 624 static inline void 625 pppx_if_rele(struct pppx_if *pxi) 626 { 627 refcnt_rele_wake(&pxi->pxi_refcnt); 628 } 629 630 int 631 pppx_add_session(struct pppx_dev *pxd, struct pipex_session_req *req) 632 { 633 struct pppx_if *pxi; 634 struct pipex_session *session; 635 struct ifnet *ifp; 636 int unit, error = 0; 637 struct in_ifaddr *ia; 638 struct sockaddr_in ifaddr; 639 640 /* 641 * XXX: As long as `session' is allocated as part of a `pxi' 642 * it isn't possible to free it separately. So disallow 643 * the timeout feature until this is fixed. 644 */ 645 if (req->pr_timeout_sec != 0) 646 return (EINVAL); 647 648 error = pipex_init_session(&session, req); 649 if (error) 650 return (error); 651 652 pxi = pool_get(&pppx_if_pl, PR_WAITOK | PR_ZERO); 653 ifp = &pxi->pxi_if; 654 655 pxi->pxi_session = session; 656 657 /* try to set the interface up */ 658 unit = pppx_if_next_unit(); 659 if (unit < 0) { 660 error = ENOMEM; 661 goto out; 662 } 663 664 refcnt_init(&pxi->pxi_refcnt); 665 pxi->pxi_unit = unit; 666 pxi->pxi_key.pxik_session_id = req->pr_session_id; 667 pxi->pxi_key.pxik_protocol = req->pr_protocol; 668 pxi->pxi_dev = pxd; 669 670 if (RBT_INSERT(pppx_ifs, &pppx_ifs, pxi) != NULL) { 671 error = EADDRINUSE; 672 goto out; 673 } 674 LIST_INSERT_HEAD(&pxd->pxd_pxis, pxi, pxi_list); 675 676 snprintf(ifp->if_xname, sizeof(ifp->if_xname), "%s%d", "pppx", unit); 677 ifp->if_mtu = req->pr_peer_mru; /* XXX */ 678 ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST | IFF_UP; 679 ifp->if_xflags = IFXF_CLONED | IFXF_MPSAFE; 680 ifp->if_qstart = pppx_if_qstart; 681 ifp->if_output = pppx_if_output; 682 ifp->if_ioctl = pppx_if_ioctl; 683 ifp->if_rtrequest = p2p_rtrequest; 684 ifp->if_type = IFT_PPP; 685 ifp->if_softc = pxi; 686 /* ifp->if_rdomain = req->pr_rdomain; */ 687 if_counters_alloc(ifp); 688 689 if_attach(ifp); 690 691 NET_LOCK(); 692 if_addgroup(ifp, "pppx"); 693 if_alloc_sadl(ifp); 694 NET_UNLOCK(); 695 696 #if NBPFILTER > 0 697 bpfattach(&ifp->if_bpf, ifp, DLT_LOOP, sizeof(u_int32_t)); 698 #endif 699 700 /* XXX ipv6 support? how does the caller indicate it wants ipv6 701 * instead of ipv4? 702 */ 703 memset(&ifaddr, 0, sizeof(ifaddr)); 704 ifaddr.sin_family = AF_INET; 705 ifaddr.sin_len = sizeof(ifaddr); 706 ifaddr.sin_addr = req->pr_ip_srcaddr; 707 708 ia = malloc(sizeof (*ia), M_IFADDR, M_WAITOK | M_ZERO); 709 refcnt_init_trace(&ia->ia_ifa.ifa_refcnt, DT_REFCNT_IDX_IFADDR); 710 711 ia->ia_addr.sin_family = AF_INET; 712 ia->ia_addr.sin_len = sizeof(struct sockaddr_in); 713 ia->ia_addr.sin_addr = req->pr_ip_srcaddr; 714 715 ia->ia_dstaddr.sin_family = AF_INET; 716 ia->ia_dstaddr.sin_len = sizeof(struct sockaddr_in); 717 ia->ia_dstaddr.sin_addr = req->pr_ip_address; 718 719 ia->ia_sockmask.sin_family = AF_INET; 720 ia->ia_sockmask.sin_len = sizeof(struct sockaddr_in); 721 ia->ia_sockmask.sin_addr = req->pr_ip_netmask; 722 723 ia->ia_ifa.ifa_addr = sintosa(&ia->ia_addr); 724 ia->ia_ifa.ifa_dstaddr = sintosa(&ia->ia_dstaddr); 725 ia->ia_ifa.ifa_netmask = sintosa(&ia->ia_sockmask); 726 ia->ia_ifa.ifa_ifp = ifp; 727 728 ia->ia_netmask = ia->ia_sockmask.sin_addr.s_addr; 729 730 NET_LOCK(); 731 error = in_ifinit(ifp, ia, &ifaddr, 1); 732 if (error) { 733 printf("pppx: unable to set addresses for %s, error=%d\n", 734 ifp->if_xname, error); 735 } else { 736 if_addrhooks_run(ifp); 737 } 738 NET_UNLOCK(); 739 740 error = pipex_link_session(session, ifp, pxd); 741 if (error) 742 goto detach; 743 744 NET_LOCK(); 745 SET(ifp->if_flags, IFF_RUNNING); 746 NET_UNLOCK(); 747 pxi->pxi_ready = 1; 748 749 return (error); 750 751 detach: 752 if_detach(ifp); 753 754 if (RBT_REMOVE(pppx_ifs, &pppx_ifs, pxi) == NULL) 755 panic("%s: inconsistent RB tree", __func__); 756 LIST_REMOVE(pxi, pxi_list); 757 out: 758 pool_put(&pppx_if_pl, pxi); 759 pipex_rele_session(session); 760 761 return (error); 762 } 763 764 int 765 pppx_del_session(struct pppx_dev *pxd, struct pipex_session_close_req *req) 766 { 767 struct pppx_if *pxi; 768 769 pxi = pppx_if_find_locked(pxd, req->pcr_session_id, req->pcr_protocol); 770 if (pxi == NULL) 771 return (EINVAL); 772 773 pxi->pxi_ready = 0; 774 pipex_export_session_stats(pxi->pxi_session, &req->pcr_stat); 775 pppx_if_destroy(pxd, pxi); 776 return (0); 777 } 778 779 int 780 pppx_set_session_descr(struct pppx_dev *pxd, 781 struct pipex_session_descr_req *req) 782 { 783 struct pppx_if *pxi; 784 785 pxi = pppx_if_find(pxd, req->pdr_session_id, req->pdr_protocol); 786 if (pxi == NULL) 787 return (EINVAL); 788 789 NET_LOCK(); 790 (void)memset(pxi->pxi_if.if_description, 0, IFDESCRSIZE); 791 strlcpy(pxi->pxi_if.if_description, req->pdr_descr, IFDESCRSIZE); 792 NET_UNLOCK(); 793 794 pppx_if_rele(pxi); 795 796 return (0); 797 } 798 799 void 800 pppx_if_destroy(struct pppx_dev *pxd, struct pppx_if *pxi) 801 { 802 struct ifnet *ifp; 803 struct pipex_session *session; 804 805 session = pxi->pxi_session; 806 ifp = &pxi->pxi_if; 807 808 refcnt_finalize(&pxi->pxi_refcnt, "pxifinal"); 809 810 NET_LOCK(); 811 CLR(ifp->if_flags, IFF_RUNNING); 812 NET_UNLOCK(); 813 814 pipex_unlink_session(session); 815 if_detach(ifp); 816 817 pipex_rele_session(session); 818 if (RBT_REMOVE(pppx_ifs, &pppx_ifs, pxi) == NULL) 819 panic("%s: inconsistent RB tree", __func__); 820 LIST_REMOVE(pxi, pxi_list); 821 822 pool_put(&pppx_if_pl, pxi); 823 } 824 825 void 826 pppx_if_qstart(struct ifqueue *ifq) 827 { 828 struct ifnet *ifp = ifq->ifq_if; 829 struct pppx_if *pxi = (struct pppx_if *)ifp->if_softc; 830 struct mbuf *m; 831 int proto; 832 833 while ((m = ifq_dequeue(ifq)) != NULL) { 834 proto = *mtod(m, int *); 835 m_adj(m, sizeof(proto)); 836 837 pipex_ppp_output(m, pxi->pxi_session, proto); 838 } 839 } 840 841 int 842 pppx_if_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst, 843 struct rtentry *rt) 844 { 845 struct pppx_if *pxi = (struct pppx_if *)ifp->if_softc; 846 struct pppx_hdr *th; 847 int error = 0; 848 int pipex_enable_local, proto; 849 850 pipex_enable_local = atomic_load_int(&pipex_enable); 851 852 NET_ASSERT_LOCKED(); 853 854 if (!ISSET(ifp->if_flags, IFF_RUNNING)) { 855 m_freem(m); 856 error = ENETDOWN; 857 goto out; 858 } 859 860 #if NBPFILTER > 0 861 if (ifp->if_bpf) 862 bpf_mtap_af(ifp->if_bpf, dst->sa_family, m, BPF_DIRECTION_OUT); 863 #endif 864 if (pipex_enable_local) { 865 switch (dst->sa_family) { 866 #ifdef INET6 867 case AF_INET6: 868 proto = PPP_IPV6; 869 break; 870 #endif 871 case AF_INET: 872 proto = PPP_IP; 873 break; 874 default: 875 m_freem(m); 876 error = EPFNOSUPPORT; 877 goto out; 878 } 879 } else 880 proto = htonl(dst->sa_family); 881 882 M_PREPEND(m, sizeof(int), M_DONTWAIT); 883 if (m == NULL) { 884 error = ENOBUFS; 885 goto out; 886 } 887 *mtod(m, int *) = proto; 888 889 if (pipex_enable_local) 890 error = if_enqueue(ifp, m); 891 else { 892 M_PREPEND(m, sizeof(struct pppx_hdr), M_DONTWAIT); 893 if (m == NULL) { 894 error = ENOBUFS; 895 goto out; 896 } 897 th = mtod(m, struct pppx_hdr *); 898 th->pppx_proto = 0; /* not used */ 899 th->pppx_id = pxi->pxi_session->ppp_id; 900 error = mq_enqueue(&pxi->pxi_dev->pxd_svcq, m); 901 if (error == 0) { 902 if (pxi->pxi_dev->pxd_waiting) { 903 wakeup((caddr_t)pxi->pxi_dev); 904 pxi->pxi_dev->pxd_waiting = 0; 905 } 906 knote(&pxi->pxi_dev->pxd_rklist, 0); 907 } 908 } 909 910 out: 911 if (error) 912 counters_inc(ifp->if_counters, ifc_oerrors); 913 return (error); 914 } 915 916 int 917 pppx_if_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr) 918 { 919 struct pppx_if *pxi = (struct pppx_if *)ifp->if_softc; 920 struct ifreq *ifr = (struct ifreq *)addr; 921 int error = 0; 922 923 switch (cmd) { 924 case SIOCSIFADDR: 925 break; 926 927 case SIOCSIFFLAGS: 928 break; 929 930 case SIOCADDMULTI: 931 case SIOCDELMULTI: 932 break; 933 934 case SIOCSIFMTU: 935 if (ifr->ifr_mtu < 512 || 936 ifr->ifr_mtu > pxi->pxi_session->peer_mru) 937 error = EINVAL; 938 else 939 ifp->if_mtu = ifr->ifr_mtu; 940 break; 941 942 default: 943 error = ENOTTY; 944 break; 945 } 946 947 return (error); 948 } 949 950 RBT_GENERATE(pppx_ifs, pppx_if, pxi_entry, pppx_if_cmp); 951 952 /* 953 * Locks used to protect struct members and global data 954 * I immutable after creation 955 * K kernel lock 956 * N net lock 957 * m sc_mtx 958 */ 959 960 struct pppac_softc { 961 struct ifnet sc_if; 962 dev_t sc_dev; /* [I] */ 963 int sc_ready; /* [K] */ 964 LIST_ENTRY(pppac_softc) 965 sc_entry; /* [K] */ 966 967 struct mutex sc_mtx; 968 struct klist sc_rklist; /* [m] */ 969 struct klist sc_wklist; /* [m] */ 970 971 struct pipex_session 972 *sc_multicast_session; 973 974 struct mbuf_queue 975 sc_mq; 976 }; 977 978 LIST_HEAD(pppac_list, pppac_softc); /* [K] */ 979 980 static void filt_pppac_rdetach(struct knote *); 981 static int filt_pppac_read(struct knote *, long); 982 static int filt_pppac_modify(struct kevent *, struct knote *); 983 static int filt_pppac_process(struct knote *, struct kevent *); 984 985 static const struct filterops pppac_rd_filtops = { 986 .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE, 987 .f_attach = NULL, 988 .f_detach = filt_pppac_rdetach, 989 .f_event = filt_pppac_read, 990 .f_modify = filt_pppac_modify, 991 .f_process = filt_pppac_process, 992 }; 993 994 static void filt_pppac_wdetach(struct knote *); 995 static int filt_pppac_write(struct knote *, long); 996 997 static const struct filterops pppac_wr_filtops = { 998 .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE, 999 .f_attach = NULL, 1000 .f_detach = filt_pppac_wdetach, 1001 .f_event = filt_pppac_write, 1002 .f_modify = filt_pppac_modify, 1003 .f_process = filt_pppac_process, 1004 }; 1005 1006 static struct pppac_list pppac_devs = LIST_HEAD_INITIALIZER(pppac_devs); 1007 1008 static int pppac_ioctl(struct ifnet *, u_long, caddr_t); 1009 1010 static int pppac_add_session(struct pppac_softc *, 1011 struct pipex_session_req *); 1012 static int pppac_del_session(struct pppac_softc *, 1013 struct pipex_session_close_req *); 1014 static int pppac_output(struct ifnet *, struct mbuf *, struct sockaddr *, 1015 struct rtentry *); 1016 static void pppac_qstart(struct ifqueue *); 1017 1018 static inline struct pppac_softc * 1019 pppac_lookup(dev_t dev) 1020 { 1021 struct pppac_softc *sc; 1022 1023 LIST_FOREACH(sc, &pppac_devs, sc_entry) { 1024 if (sc->sc_dev == dev) { 1025 if (sc->sc_ready == 0) 1026 break; 1027 1028 return (sc); 1029 } 1030 } 1031 1032 return (NULL); 1033 } 1034 1035 void 1036 pppacattach(int n) 1037 { 1038 pipex_init(); /* to be sure, to be sure */ 1039 } 1040 1041 int 1042 pppacopen(dev_t dev, int flags, int mode, struct proc *p) 1043 { 1044 struct pppac_softc *sc, *tmp; 1045 struct ifnet *ifp; 1046 struct pipex_session *session; 1047 1048 sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO); 1049 sc->sc_dev = dev; 1050 LIST_FOREACH(tmp, &pppac_devs, sc_entry) { 1051 if (tmp->sc_dev == dev) { 1052 free(sc, M_DEVBUF, sizeof(*sc)); 1053 return (EBUSY); 1054 } 1055 } 1056 LIST_INSERT_HEAD(&pppac_devs, sc, sc_entry); 1057 1058 /* virtual pipex_session entry for multicast */ 1059 session = pool_get(&pipex_session_pool, PR_WAITOK | PR_ZERO); 1060 session->flags |= PIPEX_SFLAGS_MULTICAST; 1061 session->ownersc = sc; 1062 sc->sc_multicast_session = session; 1063 1064 mtx_init(&sc->sc_mtx, IPL_SOFTNET); 1065 klist_init_mutex(&sc->sc_rklist, &sc->sc_mtx); 1066 klist_init_mutex(&sc->sc_wklist, &sc->sc_mtx); 1067 mq_init(&sc->sc_mq, IFQ_MAXLEN, IPL_SOFTNET); 1068 1069 ifp = &sc->sc_if; 1070 snprintf(ifp->if_xname, sizeof(ifp->if_xname), "pppac%u", minor(dev)); 1071 1072 ifp->if_softc = sc; 1073 ifp->if_type = IFT_L3IPVLAN; 1074 ifp->if_hdrlen = sizeof(uint32_t); /* for BPF */; 1075 ifp->if_mtu = MAXMCLBYTES - sizeof(uint32_t); 1076 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST; 1077 ifp->if_xflags = IFXF_CLONED | IFXF_MPSAFE; 1078 ifp->if_rtrequest = p2p_rtrequest; /* XXX */ 1079 ifp->if_output = pppac_output; 1080 ifp->if_qstart = pppac_qstart; 1081 ifp->if_ioctl = pppac_ioctl; 1082 1083 if_counters_alloc(ifp); 1084 if_attach(ifp); 1085 if_alloc_sadl(ifp); 1086 1087 #if NBPFILTER > 0 1088 bpfattach(&ifp->if_bpf, ifp, DLT_LOOP, sizeof(uint32_t)); 1089 #endif 1090 1091 sc->sc_ready = 1; 1092 1093 return (0); 1094 } 1095 1096 int 1097 pppacread(dev_t dev, struct uio *uio, int ioflag) 1098 { 1099 struct pppac_softc *sc = pppac_lookup(dev); 1100 struct ifnet *ifp = &sc->sc_if; 1101 struct mbuf *m0, *m; 1102 int error = 0; 1103 size_t len; 1104 1105 if (!ISSET(ifp->if_flags, IFF_RUNNING)) 1106 return (EHOSTDOWN); 1107 1108 m0 = mq_dequeue(&sc->sc_mq); 1109 if (m0 == NULL) { 1110 if (ISSET(ioflag, IO_NDELAY)) 1111 return (EWOULDBLOCK); 1112 1113 do { 1114 error = tsleep_nsec(sc, (PZERO + 1)|PCATCH, 1115 "pppacrd", INFSLP); 1116 if (error != 0) 1117 return (error); 1118 1119 m0 = mq_dequeue(&sc->sc_mq); 1120 } while (m0 == NULL); 1121 } 1122 1123 m = m0; 1124 while (uio->uio_resid > 0) { 1125 len = ulmin(uio->uio_resid, m->m_len); 1126 if (len != 0) { 1127 error = uiomove(mtod(m, caddr_t), len, uio); 1128 if (error != 0) 1129 break; 1130 } 1131 1132 m = m->m_next; 1133 if (m == NULL) 1134 break; 1135 } 1136 m_freem(m0); 1137 1138 return (error); 1139 } 1140 1141 int 1142 pppacwrite(dev_t dev, struct uio *uio, int ioflag) 1143 { 1144 struct pppac_softc *sc = pppac_lookup(dev); 1145 struct ifnet *ifp = &sc->sc_if; 1146 uint32_t proto; 1147 int error; 1148 struct mbuf *m; 1149 1150 if (!ISSET(ifp->if_flags, IFF_RUNNING)) 1151 return (EHOSTDOWN); 1152 1153 if (uio->uio_resid < ifp->if_hdrlen || uio->uio_resid > MAXMCLBYTES) 1154 return (EMSGSIZE); 1155 1156 m = m_gethdr(M_DONTWAIT, MT_DATA); 1157 if (m == NULL) 1158 return (ENOMEM); 1159 1160 if (uio->uio_resid > MHLEN) { 1161 m_clget(m, M_WAITOK, uio->uio_resid); 1162 if (!ISSET(m->m_flags, M_EXT)) { 1163 m_free(m); 1164 return (ENOMEM); 1165 } 1166 } 1167 1168 m->m_pkthdr.len = m->m_len = uio->uio_resid; 1169 1170 error = uiomove(mtod(m, void *), m->m_len, uio); 1171 if (error != 0) { 1172 m_freem(m); 1173 return (error); 1174 } 1175 1176 #if NBPFILTER > 0 1177 if (ifp->if_bpf) 1178 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 1179 #endif 1180 1181 /* strip the tunnel header */ 1182 proto = ntohl(*mtod(m, uint32_t *)); 1183 m_adj(m, sizeof(uint32_t)); 1184 1185 m->m_flags &= ~(M_MCAST|M_BCAST); 1186 m->m_pkthdr.ph_ifidx = ifp->if_index; 1187 m->m_pkthdr.ph_rtableid = ifp->if_rdomain; 1188 1189 #if NPF > 0 1190 pf_pkt_addr_changed(m); 1191 #endif 1192 1193 counters_pkt(ifp->if_counters, 1194 ifc_ipackets, ifc_ibytes, m->m_pkthdr.len); 1195 1196 NET_LOCK(); 1197 1198 switch (proto) { 1199 case AF_INET: 1200 ipv4_input(ifp, m); 1201 break; 1202 #ifdef INET6 1203 case AF_INET6: 1204 ipv6_input(ifp, m); 1205 break; 1206 #endif 1207 default: 1208 m_freem(m); 1209 error = EAFNOSUPPORT; 1210 break; 1211 } 1212 1213 NET_UNLOCK(); 1214 1215 return (error); 1216 } 1217 1218 int 1219 pppacioctl(dev_t dev, u_long cmd, caddr_t data, int flags, struct proc *p) 1220 { 1221 struct pppac_softc *sc = pppac_lookup(dev); 1222 int error = 0; 1223 1224 switch (cmd) { 1225 case FIONBIO: 1226 break; 1227 case FIONREAD: 1228 *(int *)data = mq_hdatalen(&sc->sc_mq); 1229 break; 1230 1231 case PIPEXASESSION: 1232 error = pppac_add_session(sc, (struct pipex_session_req *)data); 1233 break; 1234 case PIPEXDSESSION: 1235 error = pppac_del_session(sc, 1236 (struct pipex_session_close_req *)data); 1237 break; 1238 default: 1239 error = pipex_ioctl(sc, cmd, data); 1240 break; 1241 } 1242 1243 return (error); 1244 } 1245 1246 int 1247 pppackqfilter(dev_t dev, struct knote *kn) 1248 { 1249 struct pppac_softc *sc = pppac_lookup(dev); 1250 struct klist *klist; 1251 1252 switch (kn->kn_filter) { 1253 case EVFILT_READ: 1254 klist = &sc->sc_rklist; 1255 kn->kn_fop = &pppac_rd_filtops; 1256 break; 1257 case EVFILT_WRITE: 1258 klist = &sc->sc_wklist; 1259 kn->kn_fop = &pppac_wr_filtops; 1260 break; 1261 default: 1262 return (EINVAL); 1263 } 1264 1265 kn->kn_hook = sc; 1266 1267 klist_insert(klist, kn); 1268 1269 return (0); 1270 } 1271 1272 static void 1273 filt_pppac_rdetach(struct knote *kn) 1274 { 1275 struct pppac_softc *sc = kn->kn_hook; 1276 1277 klist_remove(&sc->sc_rklist, kn); 1278 } 1279 1280 static int 1281 filt_pppac_read(struct knote *kn, long hint) 1282 { 1283 struct pppac_softc *sc = kn->kn_hook; 1284 1285 MUTEX_ASSERT_LOCKED(&sc->sc_mtx); 1286 1287 kn->kn_data = mq_hdatalen(&sc->sc_mq); 1288 1289 return (kn->kn_data > 0); 1290 } 1291 1292 static void 1293 filt_pppac_wdetach(struct knote *kn) 1294 { 1295 struct pppac_softc *sc = kn->kn_hook; 1296 1297 klist_remove(&sc->sc_wklist, kn); 1298 } 1299 1300 static int 1301 filt_pppac_write(struct knote *kn, long hint) 1302 { 1303 /* We're always ready to accept a write. */ 1304 return (1); 1305 } 1306 1307 static int 1308 filt_pppac_modify(struct kevent *kev, struct knote *kn) 1309 { 1310 struct pppac_softc *sc = kn->kn_hook; 1311 int active; 1312 1313 mtx_enter(&sc->sc_mtx); 1314 active = knote_modify(kev, kn); 1315 mtx_leave(&sc->sc_mtx); 1316 1317 return (active); 1318 } 1319 1320 static int 1321 filt_pppac_process(struct knote *kn, struct kevent *kev) 1322 { 1323 struct pppac_softc *sc = kn->kn_hook; 1324 int active; 1325 1326 mtx_enter(&sc->sc_mtx); 1327 active = knote_process(kn, kev); 1328 mtx_leave(&sc->sc_mtx); 1329 1330 return (active); 1331 } 1332 1333 int 1334 pppacclose(dev_t dev, int flags, int mode, struct proc *p) 1335 { 1336 struct pppac_softc *sc = pppac_lookup(dev); 1337 struct ifnet *ifp = &sc->sc_if; 1338 1339 sc->sc_ready = 0; 1340 1341 NET_LOCK(); 1342 CLR(ifp->if_flags, IFF_RUNNING); 1343 NET_UNLOCK(); 1344 1345 if_detach(ifp); 1346 1347 klist_free(&sc->sc_rklist); 1348 klist_free(&sc->sc_wklist); 1349 1350 pool_put(&pipex_session_pool, sc->sc_multicast_session); 1351 pipex_destroy_all_sessions(sc); 1352 1353 LIST_REMOVE(sc, sc_entry); 1354 free(sc, M_DEVBUF, sizeof(*sc)); 1355 1356 return (0); 1357 } 1358 1359 static int 1360 pppac_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1361 { 1362 /* struct ifreq *ifr = (struct ifreq *)data; */ 1363 int error = 0; 1364 1365 switch (cmd) { 1366 case SIOCSIFADDR: 1367 SET(ifp->if_flags, IFF_UP); /* XXX cry cry */ 1368 /* FALLTHROUGH */ 1369 case SIOCSIFFLAGS: 1370 if (ISSET(ifp->if_flags, IFF_UP)) 1371 SET(ifp->if_flags, IFF_RUNNING); 1372 else 1373 CLR(ifp->if_flags, IFF_RUNNING); 1374 break; 1375 case SIOCSIFMTU: 1376 break; 1377 case SIOCADDMULTI: 1378 case SIOCDELMULTI: 1379 /* XXX */ 1380 break; 1381 1382 default: 1383 error = ENOTTY; 1384 break; 1385 } 1386 1387 return (error); 1388 } 1389 1390 static int 1391 pppac_add_session(struct pppac_softc *sc, struct pipex_session_req *req) 1392 { 1393 int error; 1394 struct pipex_session *session; 1395 1396 error = pipex_init_session(&session, req); 1397 if (error != 0) 1398 return (error); 1399 error = pipex_link_session(session, &sc->sc_if, sc); 1400 if (error != 0) 1401 pipex_rele_session(session); 1402 1403 return (error); 1404 } 1405 1406 static int 1407 pppac_del_session(struct pppac_softc *sc, struct pipex_session_close_req *req) 1408 { 1409 struct pipex_session *session; 1410 1411 mtx_enter(&pipex_list_mtx); 1412 1413 session = pipex_lookup_by_session_id_locked(req->pcr_protocol, 1414 req->pcr_session_id); 1415 if (session == NULL || session->ownersc != sc) { 1416 mtx_leave(&pipex_list_mtx); 1417 return (EINVAL); 1418 } 1419 pipex_unlink_session_locked(session); 1420 pipex_rele_session(session); 1421 1422 mtx_leave(&pipex_list_mtx); 1423 1424 return (0); 1425 } 1426 1427 static int 1428 pppac_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst, 1429 struct rtentry *rt) 1430 { 1431 int error; 1432 1433 if (!ISSET(ifp->if_flags, IFF_RUNNING)) { 1434 error = EHOSTDOWN; 1435 goto drop; 1436 } 1437 1438 switch (dst->sa_family) { 1439 case AF_INET: 1440 #ifdef INET6 1441 case AF_INET6: 1442 #endif 1443 break; 1444 default: 1445 error = EAFNOSUPPORT; 1446 goto drop; 1447 } 1448 1449 m->m_pkthdr.ph_family = dst->sa_family; 1450 1451 return (if_enqueue(ifp, m)); 1452 1453 drop: 1454 m_freem(m); 1455 return (error); 1456 } 1457 1458 static void 1459 pppac_qstart(struct ifqueue *ifq) 1460 { 1461 struct ifnet *ifp = ifq->ifq_if; 1462 struct pppac_softc *sc = ifp->if_softc; 1463 struct mbuf *m, *m0; 1464 struct pipex_session *session; 1465 struct ip ip; 1466 int rv; 1467 1468 while ((m = ifq_dequeue(ifq)) != NULL) { 1469 #if NBPFILTER > 0 1470 if (ifp->if_bpf) { 1471 bpf_mtap_af(ifp->if_bpf, m->m_pkthdr.ph_family, m, 1472 BPF_DIRECTION_OUT); 1473 } 1474 #endif 1475 1476 switch (m->m_pkthdr.ph_family) { 1477 case AF_INET: 1478 if (m->m_pkthdr.len < sizeof(struct ip)) 1479 goto bad; 1480 m_copydata(m, 0, sizeof(struct ip), &ip); 1481 if (IN_MULTICAST(ip.ip_dst.s_addr)) { 1482 /* pass a copy to pipex */ 1483 m0 = m_copym(m, 0, M_COPYALL, M_NOWAIT); 1484 if (m0 != NULL) 1485 pipex_ip_output(m0, 1486 sc->sc_multicast_session); 1487 else 1488 goto bad; 1489 } else { 1490 session = pipex_lookup_by_ip_address(ip.ip_dst); 1491 if (session != NULL) { 1492 pipex_ip_output(m, session); 1493 pipex_rele_session(session); 1494 m = NULL; 1495 } 1496 } 1497 break; 1498 } 1499 if (m == NULL) /* handled by pipex */ 1500 continue; 1501 1502 m = m_prepend(m, sizeof(uint32_t), M_DONTWAIT); 1503 if (m == NULL) 1504 goto bad; 1505 *mtod(m, uint32_t *) = htonl(m->m_pkthdr.ph_family); 1506 1507 rv = mq_enqueue(&sc->sc_mq, m); 1508 if (rv == 1) 1509 counters_inc(ifp->if_counters, ifc_collisions); 1510 continue; 1511 bad: 1512 counters_inc(ifp->if_counters, ifc_oerrors); 1513 if (m != NULL) 1514 m_freem(m); 1515 continue; 1516 } 1517 1518 if (!mq_empty(&sc->sc_mq)) { 1519 wakeup(sc); 1520 knote(&sc->sc_rklist, 0); 1521 } 1522 } 1523