1 /* 2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 1982, 1986, 1988, 1990, 1993 4 * The Regents of the University of California. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by the University of 17 * California, Berkeley and its contributors. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)uipc_socket2.c 8.1 (Berkeley) 6/10/93 35 * $FreeBSD: src/sys/kern/uipc_socket2.c,v 1.55.2.17 2002/08/31 19:04:55 dwmalone Exp $ 36 * $DragonFly: src/sys/kern/uipc_socket2.c,v 1.33 2008/09/02 16:17:52 dillon Exp $ 37 */ 38 39 #include "opt_param.h" 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/domain.h> 43 #include <sys/file.h> /* for maxfiles */ 44 #include <sys/kernel.h> 45 #include <sys/proc.h> 46 #include <sys/malloc.h> 47 #include <sys/mbuf.h> 48 #include <sys/protosw.h> 49 #include <sys/resourcevar.h> 50 #include <sys/stat.h> 51 #include <sys/socket.h> 52 #include <sys/socketvar.h> 53 #include <sys/socketops.h> 54 #include <sys/signalvar.h> 55 #include <sys/sysctl.h> 56 #include <sys/event.h> 57 58 #include <sys/thread2.h> 59 #include <sys/msgport2.h> 60 #include <sys/socketvar2.h> 61 62 int maxsockets; 63 64 /* 65 * Primitive routines for operating on sockets and socket buffers 66 */ 67 68 u_long sb_max = SB_MAX; 69 u_long sb_max_adj = 70 SB_MAX * MCLBYTES / (MSIZE + MCLBYTES); /* adjusted sb_max */ 71 72 static u_long sb_efficiency = 8; /* parameter for sbreserve() */ 73 74 /************************************************************************ 75 * signalsockbuf procedures * 76 ************************************************************************/ 77 78 /* 79 * Wait for data to arrive at/drain from a socket buffer. 80 * 81 * NOTE: Caller must generally hold the ssb_lock (client side lock) since 82 * WAIT/WAKEUP only works for one client at a time. 83 * 84 * NOTE: Caller always retries whatever operation it was waiting on. 85 */ 86 int 87 ssb_wait(struct signalsockbuf *ssb) 88 { 89 uint32_t flags; 90 int pflags; 91 int error; 92 93 pflags = (ssb->ssb_flags & SSB_NOINTR) ? 0 : PCATCH; 94 95 for (;;) { 96 flags = ssb->ssb_flags; 97 cpu_ccfence(); 98 99 /* 100 * WAKEUP and WAIT interlock eachother. We can catch the 101 * race by checking to see if WAKEUP has already been set, 102 * and only setting WAIT if WAKEUP is clear. 103 */ 104 if (flags & SSB_WAKEUP) { 105 if (atomic_cmpset_int(&ssb->ssb_flags, flags, 106 flags & ~SSB_WAKEUP)) { 107 error = 0; 108 break; 109 } 110 continue; 111 } 112 113 /* 114 * Only set WAIT if WAKEUP is clear. 115 */ 116 tsleep_interlock(&ssb->ssb_cc, pflags); 117 if (atomic_cmpset_int(&ssb->ssb_flags, flags, 118 flags | SSB_WAIT)) { 119 error = tsleep(&ssb->ssb_cc, pflags | PINTERLOCKED, 120 "sbwait", ssb->ssb_timeo); 121 break; 122 } 123 } 124 return (error); 125 } 126 127 /* 128 * Lock a sockbuf already known to be locked; 129 * return any error returned from sleep (EINTR). 130 */ 131 int 132 _ssb_lock(struct signalsockbuf *ssb) 133 { 134 uint32_t flags; 135 int pflags; 136 int error; 137 138 pflags = (ssb->ssb_flags & SSB_NOINTR) ? 0 : PCATCH; 139 140 for (;;) { 141 flags = ssb->ssb_flags; 142 cpu_ccfence(); 143 if (flags & SSB_LOCK) { 144 tsleep_interlock(&ssb->ssb_flags, pflags); 145 if (atomic_cmpset_int(&ssb->ssb_flags, flags, 146 flags | SSB_WANT)) { 147 error = tsleep(&ssb->ssb_flags, 148 pflags | PINTERLOCKED, 149 "sblock", 0); 150 if (error) 151 break; 152 } 153 } else { 154 if (atomic_cmpset_int(&ssb->ssb_flags, flags, 155 flags | SSB_LOCK)) { 156 lwkt_gettoken(&ssb->ssb_token); 157 error = 0; 158 break; 159 } 160 } 161 } 162 return (error); 163 } 164 165 /* 166 * This does the same for sockbufs. Note that the xsockbuf structure, 167 * since it is always embedded in a socket, does not include a self 168 * pointer nor a length. We make this entry point public in case 169 * some other mechanism needs it. 170 */ 171 void 172 ssbtoxsockbuf(struct signalsockbuf *ssb, struct xsockbuf *xsb) 173 { 174 xsb->sb_cc = ssb->ssb_cc; 175 xsb->sb_hiwat = ssb->ssb_hiwat; 176 xsb->sb_mbcnt = ssb->ssb_mbcnt; 177 xsb->sb_mbmax = ssb->ssb_mbmax; 178 xsb->sb_lowat = ssb->ssb_lowat; 179 xsb->sb_flags = ssb->ssb_flags; 180 xsb->sb_timeo = ssb->ssb_timeo; 181 } 182 183 184 /************************************************************************ 185 * Procedures which manipulate socket state flags, wakeups, etc. * 186 ************************************************************************ 187 * 188 * Normal sequence from the active (originating) side is that 189 * soisconnecting() is called during processing of connect() call, resulting 190 * in an eventual call to soisconnected() if/when the connection is 191 * established. When the connection is torn down soisdisconnecting() is 192 * called during processing of disconnect() call, and soisdisconnected() is 193 * called when the connection to the peer is totally severed. 194 * 195 * The semantics of these routines are such that connectionless protocols 196 * can call soisconnected() and soisdisconnected() only, bypassing the 197 * in-progress calls when setting up a ``connection'' takes no time. 198 * 199 * From the passive side, a socket is created with two queues of sockets: 200 * so_incomp for connections in progress and so_comp for connections 201 * already made and awaiting user acceptance. As a protocol is preparing 202 * incoming connections, it creates a socket structure queued on so_incomp 203 * by calling sonewconn(). When the connection is established, 204 * soisconnected() is called, and transfers the socket structure to so_comp, 205 * making it available to accept(). 206 * 207 * If a socket is closed with sockets on either so_incomp or so_comp, these 208 * sockets are dropped. 209 * 210 * If higher level protocols are implemented in the kernel, the wakeups 211 * done here will sometimes cause software-interrupt process scheduling. 212 */ 213 214 void 215 soisconnecting(struct socket *so) 216 { 217 soclrstate(so, SS_ISCONNECTED | SS_ISDISCONNECTING); 218 sosetstate(so, SS_ISCONNECTING); 219 } 220 221 void 222 soisconnected(struct socket *so) 223 { 224 struct socket *head; 225 226 while ((head = so->so_head) != NULL) { 227 lwkt_getpooltoken(head); 228 if (so->so_head == head) 229 break; 230 lwkt_relpooltoken(head); 231 } 232 233 soclrstate(so, SS_ISCONNECTING | SS_ISDISCONNECTING | SS_ISCONFIRMING); 234 sosetstate(so, SS_ISCONNECTED); 235 if (head && (so->so_state & SS_INCOMP)) { 236 if ((so->so_options & SO_ACCEPTFILTER) != 0) { 237 so->so_upcall = head->so_accf->so_accept_filter->accf_callback; 238 so->so_upcallarg = head->so_accf->so_accept_filter_arg; 239 atomic_set_int(&so->so_rcv.ssb_flags, SSB_UPCALL); 240 so->so_options &= ~SO_ACCEPTFILTER; 241 so->so_upcall(so, so->so_upcallarg, 0); 242 lwkt_relpooltoken(head); 243 return; 244 } 245 246 /* 247 * Listen socket are not per-cpu. 248 */ 249 TAILQ_REMOVE(&head->so_incomp, so, so_list); 250 head->so_incqlen--; 251 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list); 252 head->so_qlen++; 253 sosetstate(so, SS_COMP); 254 soclrstate(so, SS_INCOMP); 255 256 /* 257 * XXX head may be on a different protocol thread. 258 * sorwakeup()->sowakeup() is hacked atm. 259 */ 260 sorwakeup(head); 261 wakeup_one(&head->so_timeo); 262 } else { 263 wakeup(&so->so_timeo); 264 sorwakeup(so); 265 sowwakeup(so); 266 } 267 if (head) 268 lwkt_relpooltoken(head); 269 } 270 271 void 272 soisdisconnecting(struct socket *so) 273 { 274 soclrstate(so, SS_ISCONNECTING); 275 sosetstate(so, SS_ISDISCONNECTING | SS_CANTRCVMORE | SS_CANTSENDMORE); 276 wakeup((caddr_t)&so->so_timeo); 277 sowwakeup(so); 278 sorwakeup(so); 279 } 280 281 void 282 soisdisconnected(struct socket *so) 283 { 284 soclrstate(so, SS_ISCONNECTING | SS_ISCONNECTED | SS_ISDISCONNECTING); 285 sosetstate(so, SS_CANTRCVMORE | SS_CANTSENDMORE | SS_ISDISCONNECTED); 286 wakeup((caddr_t)&so->so_timeo); 287 sbdrop(&so->so_snd.sb, so->so_snd.ssb_cc); 288 sowwakeup(so); 289 sorwakeup(so); 290 } 291 292 void 293 soisreconnecting(struct socket *so) 294 { 295 soclrstate(so, SS_ISDISCONNECTING | SS_ISDISCONNECTED | 296 SS_CANTRCVMORE | SS_CANTSENDMORE); 297 sosetstate(so, SS_ISCONNECTING); 298 } 299 300 void 301 soisreconnected(struct socket *so) 302 { 303 soclrstate(so, SS_ISDISCONNECTED | SS_CANTRCVMORE | SS_CANTSENDMORE); 304 soisconnected(so); 305 } 306 307 /* 308 * Set or change the message port a socket receives commands on. 309 * 310 * XXX 311 */ 312 void 313 sosetport(struct socket *so, lwkt_port_t port) 314 { 315 so->so_port = port; 316 } 317 318 /* 319 * When an attempt at a new connection is noted on a socket 320 * which accepts connections, sonewconn is called. If the 321 * connection is possible (subject to space constraints, etc.) 322 * then we allocate a new structure, propoerly linked into the 323 * data structure of the original socket, and return this. 324 * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED. 325 * 326 * The new socket is returned with one ref and so_pcb assigned. 327 * The reference is implied by so_pcb. 328 */ 329 struct socket * 330 sonewconn_faddr(struct socket *head, int connstatus, 331 const struct sockaddr *faddr) 332 { 333 struct socket *so; 334 struct socket *sp; 335 struct pru_attach_info ai; 336 337 if (head->so_qlen > 3 * head->so_qlimit / 2) 338 return (NULL); 339 so = soalloc(1, head->so_proto); 340 if (so == NULL) 341 return (NULL); 342 343 /* 344 * Set the port prior to attaching the inpcb to the current 345 * cpu's protocol thread (which should be the current thread 346 * but might not be in all cases). This serializes any pcb ops 347 * which occur to our cpu allowing us to complete the attachment 348 * without racing anything. 349 */ 350 if (head->so_proto->pr_flags & PR_SYNC_PORT) 351 sosetport(so, &netisr_sync_port); 352 else 353 sosetport(so, netisr_portfn(mycpu->gd_cpuid)); 354 if ((head->so_options & SO_ACCEPTFILTER) != 0) 355 connstatus = 0; 356 so->so_head = head; 357 so->so_type = head->so_type; 358 so->so_options = head->so_options &~ SO_ACCEPTCONN; 359 so->so_linger = head->so_linger; 360 361 /* 362 * NOTE: Clearing NOFDREF implies referencing the so with 363 * soreference(). 364 */ 365 so->so_state = head->so_state | SS_NOFDREF | SS_ASSERTINPROG; 366 so->so_cred = crhold(head->so_cred); 367 ai.sb_rlimit = NULL; 368 ai.p_ucred = NULL; 369 ai.fd_rdir = NULL; /* jail code cruft XXX JH */ 370 371 /* 372 * Reserve space and call pru_attach. We can direct-call the 373 * function since we're already in the protocol thread. 374 */ 375 if (soreserve(so, head->so_snd.ssb_hiwat, 376 head->so_rcv.ssb_hiwat, NULL) || 377 so_pru_attach_direct(so, 0, &ai)) { 378 so->so_head = NULL; 379 soclrstate(so, SS_ASSERTINPROG); 380 sofree(so); /* remove implied pcb ref */ 381 return (NULL); 382 } 383 KKASSERT(((so->so_proto->pr_flags & PR_ASYNC_RCVD) == 0 && 384 so->so_refs == 2) || /* attach + our base ref */ 385 ((so->so_proto->pr_flags & PR_ASYNC_RCVD) && 386 so->so_refs == 3)); /* + async rcvd ref */ 387 sofree(so); 388 KKASSERT(so->so_port != NULL); 389 so->so_rcv.ssb_lowat = head->so_rcv.ssb_lowat; 390 so->so_snd.ssb_lowat = head->so_snd.ssb_lowat; 391 so->so_rcv.ssb_timeo = head->so_rcv.ssb_timeo; 392 so->so_snd.ssb_timeo = head->so_snd.ssb_timeo; 393 394 if (head->so_rcv.ssb_flags & SSB_AUTOLOWAT) 395 so->so_rcv.ssb_flags |= SSB_AUTOLOWAT; 396 else 397 so->so_rcv.ssb_flags &= ~SSB_AUTOLOWAT; 398 399 if (head->so_snd.ssb_flags & SSB_AUTOLOWAT) 400 so->so_snd.ssb_flags |= SSB_AUTOLOWAT; 401 else 402 so->so_snd.ssb_flags &= ~SSB_AUTOLOWAT; 403 404 if (head->so_rcv.ssb_flags & SSB_AUTOSIZE) 405 so->so_rcv.ssb_flags |= SSB_AUTOSIZE; 406 else 407 so->so_rcv.ssb_flags &= ~SSB_AUTOSIZE; 408 409 if (head->so_snd.ssb_flags & SSB_AUTOSIZE) 410 so->so_snd.ssb_flags |= SSB_AUTOSIZE; 411 else 412 so->so_snd.ssb_flags &= ~SSB_AUTOSIZE; 413 414 /* 415 * Save the faddr, if the information is provided and 416 * the protocol can perform the saving opertation. 417 */ 418 if (faddr != NULL && so->so_proto->pr_usrreqs->pru_savefaddr != NULL) 419 so->so_proto->pr_usrreqs->pru_savefaddr(so, faddr); 420 421 lwkt_getpooltoken(head); 422 if (connstatus) { 423 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list); 424 sosetstate(so, SS_COMP); 425 head->so_qlen++; 426 } else { 427 if (head->so_incqlen > head->so_qlimit) { 428 sp = TAILQ_FIRST(&head->so_incomp); 429 TAILQ_REMOVE(&head->so_incomp, sp, so_list); 430 head->so_incqlen--; 431 soclrstate(sp, SS_INCOMP); 432 sp->so_head = NULL; 433 soaborta(sp); 434 } 435 TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list); 436 sosetstate(so, SS_INCOMP); 437 head->so_incqlen++; 438 } 439 lwkt_relpooltoken(head); 440 if (connstatus) { 441 /* 442 * XXX head may be on a different protocol thread. 443 * sorwakeup()->sowakeup() is hacked atm. 444 */ 445 sorwakeup(head); 446 wakeup((caddr_t)&head->so_timeo); 447 sosetstate(so, connstatus); 448 } 449 soclrstate(so, SS_ASSERTINPROG); 450 return (so); 451 } 452 453 struct socket * 454 sonewconn(struct socket *head, int connstatus) 455 { 456 return sonewconn_faddr(head, connstatus, NULL); 457 } 458 459 /* 460 * Socantsendmore indicates that no more data will be sent on the 461 * socket; it would normally be applied to a socket when the user 462 * informs the system that no more data is to be sent, by the protocol 463 * code (in case PRU_SHUTDOWN). Socantrcvmore indicates that no more data 464 * will be received, and will normally be applied to the socket by a 465 * protocol when it detects that the peer will send no more data. 466 * Data queued for reading in the socket may yet be read. 467 */ 468 void 469 socantsendmore(struct socket *so) 470 { 471 sosetstate(so, SS_CANTSENDMORE); 472 sowwakeup(so); 473 } 474 475 void 476 socantrcvmore(struct socket *so) 477 { 478 sosetstate(so, SS_CANTRCVMORE); 479 sorwakeup(so); 480 } 481 482 /* 483 * Wakeup processes waiting on a socket buffer. Do asynchronous notification 484 * via SIGIO if the socket has the SS_ASYNC flag set. 485 * 486 * For users waiting on send/recv try to avoid unnecessary context switch 487 * thrashing. Particularly for senders of large buffers (needs to be 488 * extended to sel and aio? XXX) 489 * 490 * WARNING! Can be called on a foreign socket from the wrong protocol 491 * thread. aka is called on the 'head' listen socket when 492 * a new connection comes in. 493 */ 494 void 495 sowakeup(struct socket *so, struct signalsockbuf *ssb) 496 { 497 struct kqinfo *kqinfo = &ssb->ssb_kq; 498 uint32_t flags; 499 500 /* 501 * Check conditions, set the WAKEUP flag, and clear and signal if 502 * the WAIT flag is found to be set. This interlocks against the 503 * client side. 504 */ 505 for (;;) { 506 flags = ssb->ssb_flags; 507 cpu_ccfence(); 508 509 if ((ssb == &so->so_snd && ssb_space(ssb) >= ssb->ssb_lowat) || 510 (ssb == &so->so_rcv && ssb->ssb_cc >= ssb->ssb_lowat) || 511 (ssb == &so->so_snd && (so->so_state & SS_CANTSENDMORE)) || 512 (ssb == &so->so_rcv && (so->so_state & SS_CANTRCVMORE)) 513 ) { 514 if (atomic_cmpset_int(&ssb->ssb_flags, flags, 515 (flags | SSB_WAKEUP) & ~SSB_WAIT)) { 516 if (flags & SSB_WAIT) 517 wakeup(&ssb->ssb_cc); 518 break; 519 } 520 } else { 521 break; 522 } 523 } 524 525 /* 526 * Misc other events 527 */ 528 if ((so->so_state & SS_ASYNC) && so->so_sigio != NULL) 529 pgsigio(so->so_sigio, SIGIO, 0); 530 if (ssb->ssb_flags & SSB_UPCALL) 531 (*so->so_upcall)(so, so->so_upcallarg, MB_DONTWAIT); 532 KNOTE(&kqinfo->ki_note, 0); 533 534 /* 535 * This is a bit of a hack. Multiple threads can wind up scanning 536 * ki_mlist concurrently due to the fact that this function can be 537 * called on a foreign socket, so we can't afford to block here. 538 * 539 * We need the pool token for (so) (likely the listne socket if 540 * SSB_MEVENT is set) because the predicate function may have 541 * to access the accept queue. 542 */ 543 if (ssb->ssb_flags & SSB_MEVENT) { 544 struct netmsg_so_notify *msg, *nmsg; 545 546 lwkt_getpooltoken(so); 547 TAILQ_FOREACH_MUTABLE(msg, &kqinfo->ki_mlist, nm_list, nmsg) { 548 if (msg->nm_predicate(msg)) { 549 TAILQ_REMOVE(&kqinfo->ki_mlist, msg, nm_list); 550 lwkt_replymsg(&msg->base.lmsg, 551 msg->base.lmsg.ms_error); 552 } 553 } 554 if (TAILQ_EMPTY(&ssb->ssb_kq.ki_mlist)) 555 atomic_clear_int(&ssb->ssb_flags, SSB_MEVENT); 556 lwkt_relpooltoken(so); 557 } 558 } 559 560 /* 561 * Socket buffer (struct signalsockbuf) utility routines. 562 * 563 * Each socket contains two socket buffers: one for sending data and 564 * one for receiving data. Each buffer contains a queue of mbufs, 565 * information about the number of mbufs and amount of data in the 566 * queue, and other fields allowing kevent()/select()/poll() statements 567 * and notification on data availability to be implemented. 568 * 569 * Data stored in a socket buffer is maintained as a list of records. 570 * Each record is a list of mbufs chained together with the m_next 571 * field. Records are chained together with the m_nextpkt field. The upper 572 * level routine soreceive() expects the following conventions to be 573 * observed when placing information in the receive buffer: 574 * 575 * 1. If the protocol requires each message be preceded by the sender's 576 * name, then a record containing that name must be present before 577 * any associated data (mbuf's must be of type MT_SONAME). 578 * 2. If the protocol supports the exchange of ``access rights'' (really 579 * just additional data associated with the message), and there are 580 * ``rights'' to be received, then a record containing this data 581 * should be present (mbuf's must be of type MT_RIGHTS). 582 * 3. If a name or rights record exists, then it must be followed by 583 * a data record, perhaps of zero length. 584 * 585 * Before using a new socket structure it is first necessary to reserve 586 * buffer space to the socket, by calling sbreserve(). This should commit 587 * some of the available buffer space in the system buffer pool for the 588 * socket (currently, it does nothing but enforce limits). The space 589 * should be released by calling ssb_release() when the socket is destroyed. 590 */ 591 int 592 soreserve(struct socket *so, u_long sndcc, u_long rcvcc, struct rlimit *rl) 593 { 594 if (so->so_snd.ssb_lowat == 0) 595 atomic_set_int(&so->so_snd.ssb_flags, SSB_AUTOLOWAT); 596 if (ssb_reserve(&so->so_snd, sndcc, so, rl) == 0) 597 goto bad; 598 if (ssb_reserve(&so->so_rcv, rcvcc, so, rl) == 0) 599 goto bad2; 600 if (so->so_rcv.ssb_lowat == 0) 601 so->so_rcv.ssb_lowat = 1; 602 if (so->so_snd.ssb_lowat == 0) 603 so->so_snd.ssb_lowat = MCLBYTES; 604 if (so->so_snd.ssb_lowat > so->so_snd.ssb_hiwat) 605 so->so_snd.ssb_lowat = so->so_snd.ssb_hiwat; 606 return (0); 607 bad2: 608 ssb_release(&so->so_snd, so); 609 bad: 610 return (ENOBUFS); 611 } 612 613 static int 614 sysctl_handle_sb_max(SYSCTL_HANDLER_ARGS) 615 { 616 int error = 0; 617 u_long old_sb_max = sb_max; 618 619 error = SYSCTL_OUT(req, arg1, sizeof(int)); 620 if (error || !req->newptr) 621 return (error); 622 error = SYSCTL_IN(req, arg1, sizeof(int)); 623 if (error) 624 return (error); 625 if (sb_max < MSIZE + MCLBYTES) { 626 sb_max = old_sb_max; 627 return (EINVAL); 628 } 629 sb_max_adj = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES); 630 return (0); 631 } 632 633 /* 634 * Allot mbufs to a signalsockbuf. 635 * 636 * Attempt to scale mbmax so that mbcnt doesn't become limiting 637 * if buffering efficiency is near the normal case. 638 * 639 * sb_max only applies to user-sockets (where rl != NULL). It does 640 * not apply to kernel sockets or kernel-controlled sockets. Note 641 * that NFS overrides the sockbuf limits created when nfsd creates 642 * a socket. 643 */ 644 int 645 ssb_reserve(struct signalsockbuf *ssb, u_long cc, struct socket *so, 646 struct rlimit *rl) 647 { 648 /* 649 * rl will only be NULL when we're in an interrupt (eg, in tcp_input) 650 * or when called from netgraph (ie, ngd_attach) 651 */ 652 if (rl && cc > sb_max_adj) 653 cc = sb_max_adj; 654 if (!chgsbsize(so->so_cred->cr_uidinfo, &ssb->ssb_hiwat, cc, 655 rl ? rl->rlim_cur : RLIM_INFINITY)) { 656 return (0); 657 } 658 if (rl) 659 ssb->ssb_mbmax = min(cc * sb_efficiency, sb_max); 660 else 661 ssb->ssb_mbmax = cc * sb_efficiency; 662 663 /* 664 * AUTOLOWAT is set on send buffers and prevents large writes 665 * from generating a huge number of context switches. 666 */ 667 if (ssb->ssb_flags & SSB_AUTOLOWAT) { 668 ssb->ssb_lowat = ssb->ssb_hiwat / 2; 669 if (ssb->ssb_lowat < MCLBYTES) 670 ssb->ssb_lowat = MCLBYTES; 671 } 672 if (ssb->ssb_lowat > ssb->ssb_hiwat) 673 ssb->ssb_lowat = ssb->ssb_hiwat; 674 return (1); 675 } 676 677 /* 678 * Free mbufs held by a socket, and reserved mbuf space. 679 */ 680 void 681 ssb_release(struct signalsockbuf *ssb, struct socket *so) 682 { 683 sbflush(&ssb->sb); 684 (void)chgsbsize(so->so_cred->cr_uidinfo, &ssb->ssb_hiwat, 0, 685 RLIM_INFINITY); 686 ssb->ssb_mbmax = 0; 687 } 688 689 /* 690 * Some routines that return EOPNOTSUPP for entry points that are not 691 * supported by a protocol. Fill in as needed. 692 */ 693 void 694 pr_generic_notsupp(netmsg_t msg) 695 { 696 lwkt_replymsg(&msg->lmsg, EOPNOTSUPP); 697 } 698 699 int 700 pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, struct uio *uio, 701 struct mbuf *top, struct mbuf *control, int flags, 702 struct thread *td) 703 { 704 if (top) 705 m_freem(top); 706 if (control) 707 m_freem(control); 708 return (EOPNOTSUPP); 709 } 710 711 int 712 pru_soreceive_notsupp(struct socket *so, struct sockaddr **paddr, 713 struct uio *uio, struct sockbuf *sio, 714 struct mbuf **controlp, int *flagsp) 715 { 716 return (EOPNOTSUPP); 717 } 718 719 /* 720 * This isn't really a ``null'' operation, but it's the default one 721 * and doesn't do anything destructive. 722 */ 723 void 724 pru_sense_null(netmsg_t msg) 725 { 726 msg->sense.nm_stat->st_blksize = msg->base.nm_so->so_snd.ssb_hiwat; 727 lwkt_replymsg(&msg->lmsg, 0); 728 } 729 730 /* 731 * Make a copy of a sockaddr in a malloced buffer of type M_SONAME. Callers 732 * of this routine assume that it always succeeds, so we have to use a 733 * blockable allocation even though we might be called from a critical thread. 734 */ 735 struct sockaddr * 736 dup_sockaddr(const struct sockaddr *sa) 737 { 738 struct sockaddr *sa2; 739 740 sa2 = kmalloc(sa->sa_len, M_SONAME, M_INTWAIT); 741 bcopy(sa, sa2, sa->sa_len); 742 return (sa2); 743 } 744 745 /* 746 * Create an external-format (``xsocket'') structure using the information 747 * in the kernel-format socket structure pointed to by so. This is done 748 * to reduce the spew of irrelevant information over this interface, 749 * to isolate user code from changes in the kernel structure, and 750 * potentially to provide information-hiding if we decide that 751 * some of this information should be hidden from users. 752 */ 753 void 754 sotoxsocket(struct socket *so, struct xsocket *xso) 755 { 756 xso->xso_len = sizeof *xso; 757 xso->xso_so = so; 758 xso->so_type = so->so_type; 759 xso->so_options = so->so_options; 760 xso->so_linger = so->so_linger; 761 xso->so_state = so->so_state; 762 xso->so_pcb = so->so_pcb; 763 xso->xso_protocol = so->so_proto->pr_protocol; 764 xso->xso_family = so->so_proto->pr_domain->dom_family; 765 xso->so_qlen = so->so_qlen; 766 xso->so_incqlen = so->so_incqlen; 767 xso->so_qlimit = so->so_qlimit; 768 xso->so_timeo = so->so_timeo; 769 xso->so_error = so->so_error; 770 xso->so_pgid = so->so_sigio ? so->so_sigio->sio_pgid : 0; 771 xso->so_oobmark = so->so_oobmark; 772 ssbtoxsockbuf(&so->so_snd, &xso->so_snd); 773 ssbtoxsockbuf(&so->so_rcv, &xso->so_rcv); 774 xso->so_uid = so->so_cred->cr_uid; 775 } 776 777 /* 778 * Here is the definition of some of the basic objects in the kern.ipc 779 * branch of the MIB. 780 */ 781 SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC"); 782 783 /* 784 * This takes the place of kern.maxsockbuf, which moved to kern.ipc. 785 * 786 * NOTE! sb_max only applies to user-created socket buffers. 787 */ 788 static int dummy; 789 SYSCTL_INT(_kern, KERN_DUMMY, dummy, CTLFLAG_RW, &dummy, 0, ""); 790 SYSCTL_OID(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf, CTLTYPE_INT|CTLFLAG_RW, 791 &sb_max, 0, sysctl_handle_sb_max, "I", "Maximum socket buffer size"); 792 SYSCTL_INT(_kern_ipc, OID_AUTO, maxsockets, CTLFLAG_RD, 793 &maxsockets, 0, "Maximum number of sockets available"); 794 SYSCTL_INT(_kern_ipc, KIPC_SOCKBUF_WASTE, sockbuf_waste_factor, CTLFLAG_RW, 795 &sb_efficiency, 0, 796 "Socket buffer limit scaler"); 797 798 /* 799 * Initialize maxsockets 800 */ 801 static void 802 init_maxsockets(void *ignored) 803 { 804 TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets); 805 maxsockets = imax(maxsockets, imax(maxfiles, nmbclusters)); 806 } 807 SYSINIT(param, SI_BOOT1_TUNABLES, SI_ORDER_ANY, 808 init_maxsockets, NULL); 809 810