1 /* 2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 1982, 1986, 1988, 1990, 1993 4 * The Regents of the University of California. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Neither the name of the University nor the names of its contributors 15 * may be used to endorse or promote products derived from this software 16 * without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 * @(#)uipc_socket2.c 8.1 (Berkeley) 6/10/93 31 * $FreeBSD: src/sys/kern/uipc_socket2.c,v 1.55.2.17 2002/08/31 19:04:55 dwmalone Exp $ 32 * $DragonFly: src/sys/kern/uipc_socket2.c,v 1.33 2008/09/02 16:17:52 dillon Exp $ 33 */ 34 35 #include "opt_param.h" 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/domain.h> 39 #include <sys/file.h> /* for maxfiles */ 40 #include <sys/kernel.h> 41 #include <sys/proc.h> 42 #include <sys/malloc.h> 43 #include <sys/mbuf.h> 44 #include <sys/protosw.h> 45 #include <sys/resourcevar.h> 46 #include <sys/stat.h> 47 #include <sys/socket.h> 48 #include <sys/socketvar.h> 49 #include <sys/socketops.h> 50 #include <sys/signalvar.h> 51 #include <sys/sysctl.h> 52 #include <sys/event.h> 53 54 #include <sys/thread2.h> 55 #include <sys/msgport2.h> 56 #include <sys/socketvar2.h> 57 58 #include <net/netisr2.h> 59 60 int maxsockets; 61 62 /* 63 * Primitive routines for operating on sockets and socket buffers 64 */ 65 66 u_long sb_max = SB_MAX; 67 u_long sb_max_adj = 68 SB_MAX * MCLBYTES / (MSIZE + MCLBYTES); /* adjusted sb_max */ 69 70 static u_long sb_efficiency = 8; /* parameter for sbreserve() */ 71 72 /************************************************************************ 73 * signalsockbuf procedures * 74 ************************************************************************/ 75 76 /* 77 * Wait for data to arrive at/drain from a socket buffer. 78 * 79 * NOTE: Caller must generally hold the ssb_lock (client side lock) since 80 * WAIT/WAKEUP only works for one client at a time. 81 * 82 * NOTE: Caller always retries whatever operation it was waiting on. 83 */ 84 int 85 ssb_wait(struct signalsockbuf *ssb) 86 { 87 uint32_t flags; 88 int pflags; 89 int error; 90 91 pflags = (ssb->ssb_flags & SSB_NOINTR) ? 0 : PCATCH; 92 93 for (;;) { 94 flags = ssb->ssb_flags; 95 cpu_ccfence(); 96 97 /* 98 * WAKEUP and WAIT interlock eachother. We can catch the 99 * race by checking to see if WAKEUP has already been set, 100 * and only setting WAIT if WAKEUP is clear. 101 */ 102 if (flags & SSB_WAKEUP) { 103 if (atomic_cmpset_int(&ssb->ssb_flags, flags, 104 flags & ~SSB_WAKEUP)) { 105 error = 0; 106 break; 107 } 108 continue; 109 } 110 111 /* 112 * Only set WAIT if WAKEUP is clear. 113 */ 114 tsleep_interlock(&ssb->ssb_cc, pflags); 115 if (atomic_cmpset_int(&ssb->ssb_flags, flags, 116 flags | SSB_WAIT)) { 117 error = tsleep(&ssb->ssb_cc, pflags | PINTERLOCKED, 118 "sbwait", ssb->ssb_timeo); 119 break; 120 } 121 } 122 return (error); 123 } 124 125 /* 126 * Lock a sockbuf already known to be locked; 127 * return any error returned from sleep (EINTR). 128 */ 129 int 130 _ssb_lock(struct signalsockbuf *ssb) 131 { 132 uint32_t flags; 133 int pflags; 134 int error; 135 136 pflags = (ssb->ssb_flags & SSB_NOINTR) ? 0 : PCATCH; 137 138 for (;;) { 139 flags = ssb->ssb_flags; 140 cpu_ccfence(); 141 if (flags & SSB_LOCK) { 142 tsleep_interlock(&ssb->ssb_flags, pflags); 143 if (atomic_cmpset_int(&ssb->ssb_flags, flags, 144 flags | SSB_WANT)) { 145 error = tsleep(&ssb->ssb_flags, 146 pflags | PINTERLOCKED, 147 "sblock", 0); 148 if (error) 149 break; 150 } 151 } else { 152 if (atomic_cmpset_int(&ssb->ssb_flags, flags, 153 flags | SSB_LOCK)) { 154 lwkt_gettoken(&ssb->ssb_token); 155 error = 0; 156 break; 157 } 158 } 159 } 160 return (error); 161 } 162 163 /* 164 * This does the same for sockbufs. Note that the xsockbuf structure, 165 * since it is always embedded in a socket, does not include a self 166 * pointer nor a length. We make this entry point public in case 167 * some other mechanism needs it. 168 */ 169 void 170 ssbtoxsockbuf(struct signalsockbuf *ssb, struct xsockbuf *xsb) 171 { 172 xsb->sb_cc = ssb->ssb_cc; 173 xsb->sb_hiwat = ssb->ssb_hiwat; 174 xsb->sb_mbcnt = ssb->ssb_mbcnt; 175 xsb->sb_mbmax = ssb->ssb_mbmax; 176 xsb->sb_lowat = ssb->ssb_lowat; 177 xsb->sb_flags = ssb->ssb_flags; 178 xsb->sb_timeo = ssb->ssb_timeo; 179 } 180 181 182 /************************************************************************ 183 * Procedures which manipulate socket state flags, wakeups, etc. * 184 ************************************************************************ 185 * 186 * Normal sequence from the active (originating) side is that 187 * soisconnecting() is called during processing of connect() call, resulting 188 * in an eventual call to soisconnected() if/when the connection is 189 * established. When the connection is torn down soisdisconnecting() is 190 * called during processing of disconnect() call, and soisdisconnected() is 191 * called when the connection to the peer is totally severed. 192 * 193 * The semantics of these routines are such that connectionless protocols 194 * can call soisconnected() and soisdisconnected() only, bypassing the 195 * in-progress calls when setting up a ``connection'' takes no time. 196 * 197 * From the passive side, a socket is created with two queues of sockets: 198 * so_incomp for connections in progress and so_comp for connections 199 * already made and awaiting user acceptance. As a protocol is preparing 200 * incoming connections, it creates a socket structure queued on so_incomp 201 * by calling sonewconn(). When the connection is established, 202 * soisconnected() is called, and transfers the socket structure to so_comp, 203 * making it available to accept(). 204 * 205 * If a socket is closed with sockets on either so_incomp or so_comp, these 206 * sockets are dropped. 207 * 208 * If higher level protocols are implemented in the kernel, the wakeups 209 * done here will sometimes cause software-interrupt process scheduling. 210 */ 211 212 void 213 soisconnecting(struct socket *so) 214 { 215 soclrstate(so, SS_ISCONNECTED | SS_ISDISCONNECTING); 216 sosetstate(so, SS_ISCONNECTING); 217 } 218 219 void 220 soisconnected(struct socket *so) 221 { 222 struct socket *head; 223 224 while ((head = so->so_head) != NULL) { 225 lwkt_getpooltoken(head); 226 if (so->so_head == head) 227 break; 228 lwkt_relpooltoken(head); 229 } 230 231 soclrstate(so, SS_ISCONNECTING | SS_ISDISCONNECTING | SS_ISCONFIRMING); 232 sosetstate(so, SS_ISCONNECTED); 233 if (head && (so->so_state & SS_INCOMP)) { 234 if ((so->so_options & SO_ACCEPTFILTER) != 0) { 235 so->so_upcall = head->so_accf->so_accept_filter->accf_callback; 236 so->so_upcallarg = head->so_accf->so_accept_filter_arg; 237 atomic_set_int(&so->so_rcv.ssb_flags, SSB_UPCALL); 238 so->so_options &= ~SO_ACCEPTFILTER; 239 so->so_upcall(so, so->so_upcallarg, 0); 240 lwkt_relpooltoken(head); 241 return; 242 } 243 244 /* 245 * Listen socket are not per-cpu. 246 */ 247 TAILQ_REMOVE(&head->so_incomp, so, so_list); 248 head->so_incqlen--; 249 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list); 250 head->so_qlen++; 251 sosetstate(so, SS_COMP); 252 soclrstate(so, SS_INCOMP); 253 254 /* 255 * XXX head may be on a different protocol thread. 256 * sorwakeup()->sowakeup() is hacked atm. 257 */ 258 sorwakeup(head); 259 wakeup_one(&head->so_timeo); 260 } else { 261 wakeup(&so->so_timeo); 262 sorwakeup(so); 263 sowwakeup(so); 264 } 265 if (head) 266 lwkt_relpooltoken(head); 267 } 268 269 void 270 soisdisconnecting(struct socket *so) 271 { 272 soclrstate(so, SS_ISCONNECTING); 273 sosetstate(so, SS_ISDISCONNECTING | SS_CANTRCVMORE | SS_CANTSENDMORE); 274 wakeup((caddr_t)&so->so_timeo); 275 sowwakeup(so); 276 sorwakeup(so); 277 } 278 279 void 280 soisdisconnected(struct socket *so) 281 { 282 soclrstate(so, SS_ISCONNECTING | SS_ISCONNECTED | SS_ISDISCONNECTING); 283 sosetstate(so, SS_CANTRCVMORE | SS_CANTSENDMORE | SS_ISDISCONNECTED); 284 wakeup((caddr_t)&so->so_timeo); 285 sbdrop(&so->so_snd.sb, so->so_snd.ssb_cc); 286 sowwakeup(so); 287 sorwakeup(so); 288 } 289 290 void 291 soisreconnecting(struct socket *so) 292 { 293 soclrstate(so, SS_ISDISCONNECTING | SS_ISDISCONNECTED | 294 SS_CANTRCVMORE | SS_CANTSENDMORE); 295 sosetstate(so, SS_ISCONNECTING); 296 } 297 298 void 299 soisreconnected(struct socket *so) 300 { 301 soclrstate(so, SS_ISDISCONNECTED | SS_CANTRCVMORE | SS_CANTSENDMORE); 302 soisconnected(so); 303 } 304 305 /* 306 * Set or change the message port a socket receives commands on. 307 * 308 * XXX 309 */ 310 void 311 sosetport(struct socket *so, lwkt_port_t port) 312 { 313 so->so_port = port; 314 } 315 316 /* 317 * When an attempt at a new connection is noted on a socket 318 * which accepts connections, sonewconn is called. If the 319 * connection is possible (subject to space constraints, etc.) 320 * then we allocate a new structure, propoerly linked into the 321 * data structure of the original socket, and return this. 322 * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED. 323 * 324 * The new socket is returned with one ref and so_pcb assigned. 325 * The reference is implied by so_pcb. 326 */ 327 struct socket * 328 sonewconn_faddr(struct socket *head, int connstatus, 329 const struct sockaddr *faddr) 330 { 331 struct socket *so; 332 struct socket *sp; 333 struct pru_attach_info ai; 334 335 if (head->so_qlen > 3 * head->so_qlimit / 2) 336 return (NULL); 337 so = soalloc(1, head->so_proto); 338 if (so == NULL) 339 return (NULL); 340 341 /* 342 * Set the port prior to attaching the inpcb to the current 343 * cpu's protocol thread (which should be the current thread 344 * but might not be in all cases). This serializes any pcb ops 345 * which occur to our cpu allowing us to complete the attachment 346 * without racing anything. 347 */ 348 if (head->so_proto->pr_flags & PR_SYNC_PORT) 349 sosetport(so, &netisr_sync_port); 350 else 351 sosetport(so, netisr_cpuport(mycpuid)); 352 if ((head->so_options & SO_ACCEPTFILTER) != 0) 353 connstatus = 0; 354 so->so_head = head; 355 so->so_type = head->so_type; 356 so->so_options = head->so_options &~ SO_ACCEPTCONN; 357 so->so_linger = head->so_linger; 358 359 /* 360 * NOTE: Clearing NOFDREF implies referencing the so with 361 * soreference(). 362 */ 363 so->so_state = head->so_state | SS_NOFDREF | SS_ASSERTINPROG; 364 so->so_cred = crhold(head->so_cred); 365 ai.sb_rlimit = NULL; 366 ai.p_ucred = NULL; 367 ai.fd_rdir = NULL; /* jail code cruft XXX JH */ 368 369 /* 370 * Reserve space and call pru_attach. We can direct-call the 371 * function since we're already in the protocol thread. 372 */ 373 if (soreserve(so, head->so_snd.ssb_hiwat, 374 head->so_rcv.ssb_hiwat, NULL) || 375 so_pru_attach_direct(so, 0, &ai)) { 376 so->so_head = NULL; 377 soclrstate(so, SS_ASSERTINPROG); 378 sofree(so); /* remove implied pcb ref */ 379 return (NULL); 380 } 381 KKASSERT(((so->so_proto->pr_flags & PR_ASYNC_RCVD) == 0 && 382 so->so_refs == 2) || /* attach + our base ref */ 383 ((so->so_proto->pr_flags & PR_ASYNC_RCVD) && 384 so->so_refs == 3)); /* + async rcvd ref */ 385 sofree(so); 386 KKASSERT(so->so_port != NULL); 387 so->so_rcv.ssb_lowat = head->so_rcv.ssb_lowat; 388 so->so_snd.ssb_lowat = head->so_snd.ssb_lowat; 389 so->so_rcv.ssb_timeo = head->so_rcv.ssb_timeo; 390 so->so_snd.ssb_timeo = head->so_snd.ssb_timeo; 391 392 if (head->so_rcv.ssb_flags & SSB_AUTOLOWAT) 393 so->so_rcv.ssb_flags |= SSB_AUTOLOWAT; 394 else 395 so->so_rcv.ssb_flags &= ~SSB_AUTOLOWAT; 396 397 if (head->so_snd.ssb_flags & SSB_AUTOLOWAT) 398 so->so_snd.ssb_flags |= SSB_AUTOLOWAT; 399 else 400 so->so_snd.ssb_flags &= ~SSB_AUTOLOWAT; 401 402 if (head->so_rcv.ssb_flags & SSB_AUTOSIZE) 403 so->so_rcv.ssb_flags |= SSB_AUTOSIZE; 404 else 405 so->so_rcv.ssb_flags &= ~SSB_AUTOSIZE; 406 407 if (head->so_snd.ssb_flags & SSB_AUTOSIZE) 408 so->so_snd.ssb_flags |= SSB_AUTOSIZE; 409 else 410 so->so_snd.ssb_flags &= ~SSB_AUTOSIZE; 411 412 /* 413 * Save the faddr, if the information is provided and 414 * the protocol can perform the saving opertation. 415 */ 416 if (faddr != NULL && so->so_proto->pr_usrreqs->pru_savefaddr != NULL) 417 so->so_proto->pr_usrreqs->pru_savefaddr(so, faddr); 418 419 lwkt_getpooltoken(head); 420 if (connstatus) { 421 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list); 422 sosetstate(so, SS_COMP); 423 head->so_qlen++; 424 } else { 425 if (head->so_incqlen > head->so_qlimit) { 426 sp = TAILQ_FIRST(&head->so_incomp); 427 TAILQ_REMOVE(&head->so_incomp, sp, so_list); 428 head->so_incqlen--; 429 soclrstate(sp, SS_INCOMP); 430 sp->so_head = NULL; 431 soaborta(sp); 432 } 433 TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list); 434 sosetstate(so, SS_INCOMP); 435 head->so_incqlen++; 436 } 437 lwkt_relpooltoken(head); 438 if (connstatus) { 439 /* 440 * XXX head may be on a different protocol thread. 441 * sorwakeup()->sowakeup() is hacked atm. 442 */ 443 sorwakeup(head); 444 wakeup((caddr_t)&head->so_timeo); 445 sosetstate(so, connstatus); 446 } 447 soclrstate(so, SS_ASSERTINPROG); 448 return (so); 449 } 450 451 struct socket * 452 sonewconn(struct socket *head, int connstatus) 453 { 454 return sonewconn_faddr(head, connstatus, NULL); 455 } 456 457 /* 458 * Socantsendmore indicates that no more data will be sent on the 459 * socket; it would normally be applied to a socket when the user 460 * informs the system that no more data is to be sent, by the protocol 461 * code (in case PRU_SHUTDOWN). Socantrcvmore indicates that no more data 462 * will be received, and will normally be applied to the socket by a 463 * protocol when it detects that the peer will send no more data. 464 * Data queued for reading in the socket may yet be read. 465 */ 466 void 467 socantsendmore(struct socket *so) 468 { 469 sosetstate(so, SS_CANTSENDMORE); 470 sowwakeup(so); 471 } 472 473 void 474 socantrcvmore(struct socket *so) 475 { 476 sosetstate(so, SS_CANTRCVMORE); 477 sorwakeup(so); 478 } 479 480 /* 481 * Wakeup processes waiting on a socket buffer. Do asynchronous notification 482 * via SIGIO if the socket has the SS_ASYNC flag set. 483 * 484 * For users waiting on send/recv try to avoid unnecessary context switch 485 * thrashing. Particularly for senders of large buffers (needs to be 486 * extended to sel and aio? XXX) 487 * 488 * WARNING! Can be called on a foreign socket from the wrong protocol 489 * thread. aka is called on the 'head' listen socket when 490 * a new connection comes in. 491 */ 492 void 493 sowakeup(struct socket *so, struct signalsockbuf *ssb) 494 { 495 struct kqinfo *kqinfo = &ssb->ssb_kq; 496 uint32_t flags; 497 498 /* 499 * Check conditions, set the WAKEUP flag, and clear and signal if 500 * the WAIT flag is found to be set. This interlocks against the 501 * client side. 502 */ 503 for (;;) { 504 flags = ssb->ssb_flags; 505 cpu_ccfence(); 506 507 if ((ssb == &so->so_snd && ssb_space(ssb) >= ssb->ssb_lowat) || 508 (ssb == &so->so_rcv && ssb->ssb_cc >= ssb->ssb_lowat) || 509 (ssb == &so->so_snd && (so->so_state & SS_CANTSENDMORE)) || 510 (ssb == &so->so_rcv && (so->so_state & SS_CANTRCVMORE)) 511 ) { 512 if (atomic_cmpset_int(&ssb->ssb_flags, flags, 513 (flags | SSB_WAKEUP) & ~SSB_WAIT)) { 514 if (flags & SSB_WAIT) 515 wakeup(&ssb->ssb_cc); 516 break; 517 } 518 } else { 519 break; 520 } 521 } 522 523 /* 524 * Misc other events 525 */ 526 if ((so->so_state & SS_ASYNC) && so->so_sigio != NULL) 527 pgsigio(so->so_sigio, SIGIO, 0); 528 if (ssb->ssb_flags & SSB_UPCALL) 529 (*so->so_upcall)(so, so->so_upcallarg, MB_DONTWAIT); 530 KNOTE(&kqinfo->ki_note, 0); 531 532 /* 533 * This is a bit of a hack. Multiple threads can wind up scanning 534 * ki_mlist concurrently due to the fact that this function can be 535 * called on a foreign socket, so we can't afford to block here. 536 * 537 * We need the pool token for (so) (likely the listne socket if 538 * SSB_MEVENT is set) because the predicate function may have 539 * to access the accept queue. 540 */ 541 if (ssb->ssb_flags & SSB_MEVENT) { 542 struct netmsg_so_notify *msg, *nmsg; 543 544 lwkt_getpooltoken(so); 545 TAILQ_FOREACH_MUTABLE(msg, &kqinfo->ki_mlist, nm_list, nmsg) { 546 if (msg->nm_predicate(msg)) { 547 TAILQ_REMOVE(&kqinfo->ki_mlist, msg, nm_list); 548 lwkt_replymsg(&msg->base.lmsg, 549 msg->base.lmsg.ms_error); 550 } 551 } 552 if (TAILQ_EMPTY(&ssb->ssb_kq.ki_mlist)) 553 atomic_clear_int(&ssb->ssb_flags, SSB_MEVENT); 554 lwkt_relpooltoken(so); 555 } 556 } 557 558 /* 559 * Socket buffer (struct signalsockbuf) utility routines. 560 * 561 * Each socket contains two socket buffers: one for sending data and 562 * one for receiving data. Each buffer contains a queue of mbufs, 563 * information about the number of mbufs and amount of data in the 564 * queue, and other fields allowing kevent()/select()/poll() statements 565 * and notification on data availability to be implemented. 566 * 567 * Data stored in a socket buffer is maintained as a list of records. 568 * Each record is a list of mbufs chained together with the m_next 569 * field. Records are chained together with the m_nextpkt field. The upper 570 * level routine soreceive() expects the following conventions to be 571 * observed when placing information in the receive buffer: 572 * 573 * 1. If the protocol requires each message be preceded by the sender's 574 * name, then a record containing that name must be present before 575 * any associated data (mbuf's must be of type MT_SONAME). 576 * 2. If the protocol supports the exchange of ``access rights'' (really 577 * just additional data associated with the message), and there are 578 * ``rights'' to be received, then a record containing this data 579 * should be present (mbuf's must be of type MT_RIGHTS). 580 * 3. If a name or rights record exists, then it must be followed by 581 * a data record, perhaps of zero length. 582 * 583 * Before using a new socket structure it is first necessary to reserve 584 * buffer space to the socket, by calling sbreserve(). This should commit 585 * some of the available buffer space in the system buffer pool for the 586 * socket (currently, it does nothing but enforce limits). The space 587 * should be released by calling ssb_release() when the socket is destroyed. 588 */ 589 int 590 soreserve(struct socket *so, u_long sndcc, u_long rcvcc, struct rlimit *rl) 591 { 592 if (so->so_snd.ssb_lowat == 0) 593 atomic_set_int(&so->so_snd.ssb_flags, SSB_AUTOLOWAT); 594 if (ssb_reserve(&so->so_snd, sndcc, so, rl) == 0) 595 goto bad; 596 if (ssb_reserve(&so->so_rcv, rcvcc, so, rl) == 0) 597 goto bad2; 598 if (so->so_rcv.ssb_lowat == 0) 599 so->so_rcv.ssb_lowat = 1; 600 if (so->so_snd.ssb_lowat == 0) 601 so->so_snd.ssb_lowat = MCLBYTES; 602 if (so->so_snd.ssb_lowat > so->so_snd.ssb_hiwat) 603 so->so_snd.ssb_lowat = so->so_snd.ssb_hiwat; 604 return (0); 605 bad2: 606 ssb_release(&so->so_snd, so); 607 bad: 608 return (ENOBUFS); 609 } 610 611 static int 612 sysctl_handle_sb_max(SYSCTL_HANDLER_ARGS) 613 { 614 int error = 0; 615 u_long old_sb_max = sb_max; 616 617 error = SYSCTL_OUT(req, arg1, sizeof(int)); 618 if (error || !req->newptr) 619 return (error); 620 error = SYSCTL_IN(req, arg1, sizeof(int)); 621 if (error) 622 return (error); 623 if (sb_max < MSIZE + MCLBYTES) { 624 sb_max = old_sb_max; 625 return (EINVAL); 626 } 627 sb_max_adj = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES); 628 return (0); 629 } 630 631 /* 632 * Allot mbufs to a signalsockbuf. 633 * 634 * Attempt to scale mbmax so that mbcnt doesn't become limiting 635 * if buffering efficiency is near the normal case. 636 * 637 * sb_max only applies to user-sockets (where rl != NULL). It does 638 * not apply to kernel sockets or kernel-controlled sockets. Note 639 * that NFS overrides the sockbuf limits created when nfsd creates 640 * a socket. 641 */ 642 int 643 ssb_reserve(struct signalsockbuf *ssb, u_long cc, struct socket *so, 644 struct rlimit *rl) 645 { 646 /* 647 * rl will only be NULL when we're in an interrupt (eg, in tcp_input) 648 * or when called from netgraph (ie, ngd_attach) 649 */ 650 if (rl && cc > sb_max_adj) 651 cc = sb_max_adj; 652 if (!chgsbsize(so->so_cred->cr_uidinfo, &ssb->ssb_hiwat, cc, 653 rl ? rl->rlim_cur : RLIM_INFINITY)) { 654 return (0); 655 } 656 if (rl) 657 ssb->ssb_mbmax = min(cc * sb_efficiency, sb_max); 658 else 659 ssb->ssb_mbmax = cc * sb_efficiency; 660 661 /* 662 * AUTOLOWAT is set on send buffers and prevents large writes 663 * from generating a huge number of context switches. 664 */ 665 if (ssb->ssb_flags & SSB_AUTOLOWAT) { 666 ssb->ssb_lowat = ssb->ssb_hiwat / 2; 667 if (ssb->ssb_lowat < MCLBYTES) 668 ssb->ssb_lowat = MCLBYTES; 669 } 670 if (ssb->ssb_lowat > ssb->ssb_hiwat) 671 ssb->ssb_lowat = ssb->ssb_hiwat; 672 return (1); 673 } 674 675 /* 676 * Free mbufs held by a socket, and reserved mbuf space. 677 */ 678 void 679 ssb_release(struct signalsockbuf *ssb, struct socket *so) 680 { 681 sbflush(&ssb->sb); 682 (void)chgsbsize(so->so_cred->cr_uidinfo, &ssb->ssb_hiwat, 0, 683 RLIM_INFINITY); 684 ssb->ssb_mbmax = 0; 685 } 686 687 /* 688 * Some routines that return EOPNOTSUPP for entry points that are not 689 * supported by a protocol. Fill in as needed. 690 */ 691 void 692 pr_generic_notsupp(netmsg_t msg) 693 { 694 lwkt_replymsg(&msg->lmsg, EOPNOTSUPP); 695 } 696 697 int 698 pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, struct uio *uio, 699 struct mbuf *top, struct mbuf *control, int flags, 700 struct thread *td) 701 { 702 if (top) 703 m_freem(top); 704 if (control) 705 m_freem(control); 706 return (EOPNOTSUPP); 707 } 708 709 int 710 pru_soreceive_notsupp(struct socket *so, struct sockaddr **paddr, 711 struct uio *uio, struct sockbuf *sio, 712 struct mbuf **controlp, int *flagsp) 713 { 714 return (EOPNOTSUPP); 715 } 716 717 /* 718 * This isn't really a ``null'' operation, but it's the default one 719 * and doesn't do anything destructive. 720 */ 721 void 722 pru_sense_null(netmsg_t msg) 723 { 724 msg->sense.nm_stat->st_blksize = msg->base.nm_so->so_snd.ssb_hiwat; 725 lwkt_replymsg(&msg->lmsg, 0); 726 } 727 728 /* 729 * Make a copy of a sockaddr in a malloced buffer of type M_SONAME. Callers 730 * of this routine assume that it always succeeds, so we have to use a 731 * blockable allocation even though we might be called from a critical thread. 732 */ 733 struct sockaddr * 734 dup_sockaddr(const struct sockaddr *sa) 735 { 736 struct sockaddr *sa2; 737 738 sa2 = kmalloc(sa->sa_len, M_SONAME, M_INTWAIT); 739 bcopy(sa, sa2, sa->sa_len); 740 return (sa2); 741 } 742 743 /* 744 * Create an external-format (``xsocket'') structure using the information 745 * in the kernel-format socket structure pointed to by so. This is done 746 * to reduce the spew of irrelevant information over this interface, 747 * to isolate user code from changes in the kernel structure, and 748 * potentially to provide information-hiding if we decide that 749 * some of this information should be hidden from users. 750 */ 751 void 752 sotoxsocket(struct socket *so, struct xsocket *xso) 753 { 754 xso->xso_len = sizeof *xso; 755 xso->xso_so = so; 756 xso->so_type = so->so_type; 757 xso->so_options = so->so_options; 758 xso->so_linger = so->so_linger; 759 xso->so_state = so->so_state; 760 xso->so_pcb = so->so_pcb; 761 xso->xso_protocol = so->so_proto->pr_protocol; 762 xso->xso_family = so->so_proto->pr_domain->dom_family; 763 xso->so_qlen = so->so_qlen; 764 xso->so_incqlen = so->so_incqlen; 765 xso->so_qlimit = so->so_qlimit; 766 xso->so_timeo = so->so_timeo; 767 xso->so_error = so->so_error; 768 xso->so_pgid = so->so_sigio ? so->so_sigio->sio_pgid : 0; 769 xso->so_oobmark = so->so_oobmark; 770 ssbtoxsockbuf(&so->so_snd, &xso->so_snd); 771 ssbtoxsockbuf(&so->so_rcv, &xso->so_rcv); 772 xso->so_uid = so->so_cred->cr_uid; 773 } 774 775 /* 776 * Here is the definition of some of the basic objects in the kern.ipc 777 * branch of the MIB. 778 */ 779 SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC"); 780 781 /* 782 * This takes the place of kern.maxsockbuf, which moved to kern.ipc. 783 * 784 * NOTE! sb_max only applies to user-created socket buffers. 785 */ 786 static int dummy; 787 SYSCTL_INT(_kern, KERN_DUMMY, dummy, CTLFLAG_RW, &dummy, 0, ""); 788 SYSCTL_OID(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf, CTLTYPE_INT|CTLFLAG_RW, 789 &sb_max, 0, sysctl_handle_sb_max, "I", "Maximum socket buffer size"); 790 SYSCTL_INT(_kern_ipc, OID_AUTO, maxsockets, CTLFLAG_RD, 791 &maxsockets, 0, "Maximum number of sockets available"); 792 SYSCTL_INT(_kern_ipc, KIPC_SOCKBUF_WASTE, sockbuf_waste_factor, CTLFLAG_RW, 793 &sb_efficiency, 0, 794 "Socket buffer limit scaler"); 795 796 /* 797 * Initialize maxsockets 798 */ 799 static void 800 init_maxsockets(void *ignored) 801 { 802 TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets); 803 maxsockets = imax(maxsockets, imax(maxfiles, nmbclusters)); 804 } 805 SYSINIT(param, SI_BOOT1_TUNABLES, SI_ORDER_ANY, 806 init_maxsockets, NULL); 807 808