1 /* 2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 1982, 1986, 1988, 1990, 1993 4 * The Regents of the University of California. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by the University of 17 * California, Berkeley and its contributors. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)uipc_socket2.c 8.1 (Berkeley) 6/10/93 35 * $FreeBSD: src/sys/kern/uipc_socket2.c,v 1.55.2.17 2002/08/31 19:04:55 dwmalone Exp $ 36 * $DragonFly: src/sys/kern/uipc_socket2.c,v 1.33 2008/09/02 16:17:52 dillon Exp $ 37 */ 38 39 #include "opt_param.h" 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/domain.h> 43 #include <sys/file.h> /* for maxfiles */ 44 #include <sys/kernel.h> 45 #include <sys/proc.h> 46 #include <sys/malloc.h> 47 #include <sys/mbuf.h> 48 #include <sys/protosw.h> 49 #include <sys/resourcevar.h> 50 #include <sys/stat.h> 51 #include <sys/socket.h> 52 #include <sys/socketvar.h> 53 #include <sys/socketops.h> 54 #include <sys/signalvar.h> 55 #include <sys/sysctl.h> 56 #include <sys/event.h> 57 58 #include <sys/thread2.h> 59 #include <sys/msgport2.h> 60 #include <sys/socketvar2.h> 61 62 #include <net/netisr2.h> 63 64 int maxsockets; 65 66 /* 67 * Primitive routines for operating on sockets and socket buffers 68 */ 69 70 u_long sb_max = SB_MAX; 71 u_long sb_max_adj = 72 SB_MAX * MCLBYTES / (MSIZE + MCLBYTES); /* adjusted sb_max */ 73 74 static u_long sb_efficiency = 8; /* parameter for sbreserve() */ 75 76 /************************************************************************ 77 * signalsockbuf procedures * 78 ************************************************************************/ 79 80 /* 81 * Wait for data to arrive at/drain from a socket buffer. 82 * 83 * NOTE: Caller must generally hold the ssb_lock (client side lock) since 84 * WAIT/WAKEUP only works for one client at a time. 85 * 86 * NOTE: Caller always retries whatever operation it was waiting on. 87 */ 88 int 89 ssb_wait(struct signalsockbuf *ssb) 90 { 91 uint32_t flags; 92 int pflags; 93 int error; 94 95 pflags = (ssb->ssb_flags & SSB_NOINTR) ? 0 : PCATCH; 96 97 for (;;) { 98 flags = ssb->ssb_flags; 99 cpu_ccfence(); 100 101 /* 102 * WAKEUP and WAIT interlock eachother. We can catch the 103 * race by checking to see if WAKEUP has already been set, 104 * and only setting WAIT if WAKEUP is clear. 105 */ 106 if (flags & SSB_WAKEUP) { 107 if (atomic_cmpset_int(&ssb->ssb_flags, flags, 108 flags & ~SSB_WAKEUP)) { 109 error = 0; 110 break; 111 } 112 continue; 113 } 114 115 /* 116 * Only set WAIT if WAKEUP is clear. 117 */ 118 tsleep_interlock(&ssb->ssb_cc, pflags); 119 if (atomic_cmpset_int(&ssb->ssb_flags, flags, 120 flags | SSB_WAIT)) { 121 error = tsleep(&ssb->ssb_cc, pflags | PINTERLOCKED, 122 "sbwait", ssb->ssb_timeo); 123 break; 124 } 125 } 126 return (error); 127 } 128 129 /* 130 * Lock a sockbuf already known to be locked; 131 * return any error returned from sleep (EINTR). 132 */ 133 int 134 _ssb_lock(struct signalsockbuf *ssb) 135 { 136 uint32_t flags; 137 int pflags; 138 int error; 139 140 pflags = (ssb->ssb_flags & SSB_NOINTR) ? 0 : PCATCH; 141 142 for (;;) { 143 flags = ssb->ssb_flags; 144 cpu_ccfence(); 145 if (flags & SSB_LOCK) { 146 tsleep_interlock(&ssb->ssb_flags, pflags); 147 if (atomic_cmpset_int(&ssb->ssb_flags, flags, 148 flags | SSB_WANT)) { 149 error = tsleep(&ssb->ssb_flags, 150 pflags | PINTERLOCKED, 151 "sblock", 0); 152 if (error) 153 break; 154 } 155 } else { 156 if (atomic_cmpset_int(&ssb->ssb_flags, flags, 157 flags | SSB_LOCK)) { 158 lwkt_gettoken(&ssb->ssb_token); 159 error = 0; 160 break; 161 } 162 } 163 } 164 return (error); 165 } 166 167 /* 168 * This does the same for sockbufs. Note that the xsockbuf structure, 169 * since it is always embedded in a socket, does not include a self 170 * pointer nor a length. We make this entry point public in case 171 * some other mechanism needs it. 172 */ 173 void 174 ssbtoxsockbuf(struct signalsockbuf *ssb, struct xsockbuf *xsb) 175 { 176 xsb->sb_cc = ssb->ssb_cc; 177 xsb->sb_hiwat = ssb->ssb_hiwat; 178 xsb->sb_mbcnt = ssb->ssb_mbcnt; 179 xsb->sb_mbmax = ssb->ssb_mbmax; 180 xsb->sb_lowat = ssb->ssb_lowat; 181 xsb->sb_flags = ssb->ssb_flags; 182 xsb->sb_timeo = ssb->ssb_timeo; 183 } 184 185 186 /************************************************************************ 187 * Procedures which manipulate socket state flags, wakeups, etc. * 188 ************************************************************************ 189 * 190 * Normal sequence from the active (originating) side is that 191 * soisconnecting() is called during processing of connect() call, resulting 192 * in an eventual call to soisconnected() if/when the connection is 193 * established. When the connection is torn down soisdisconnecting() is 194 * called during processing of disconnect() call, and soisdisconnected() is 195 * called when the connection to the peer is totally severed. 196 * 197 * The semantics of these routines are such that connectionless protocols 198 * can call soisconnected() and soisdisconnected() only, bypassing the 199 * in-progress calls when setting up a ``connection'' takes no time. 200 * 201 * From the passive side, a socket is created with two queues of sockets: 202 * so_incomp for connections in progress and so_comp for connections 203 * already made and awaiting user acceptance. As a protocol is preparing 204 * incoming connections, it creates a socket structure queued on so_incomp 205 * by calling sonewconn(). When the connection is established, 206 * soisconnected() is called, and transfers the socket structure to so_comp, 207 * making it available to accept(). 208 * 209 * If a socket is closed with sockets on either so_incomp or so_comp, these 210 * sockets are dropped. 211 * 212 * If higher level protocols are implemented in the kernel, the wakeups 213 * done here will sometimes cause software-interrupt process scheduling. 214 */ 215 216 void 217 soisconnecting(struct socket *so) 218 { 219 soclrstate(so, SS_ISCONNECTED | SS_ISDISCONNECTING); 220 sosetstate(so, SS_ISCONNECTING); 221 } 222 223 void 224 soisconnected(struct socket *so) 225 { 226 struct socket *head; 227 228 while ((head = so->so_head) != NULL) { 229 lwkt_getpooltoken(head); 230 if (so->so_head == head) 231 break; 232 lwkt_relpooltoken(head); 233 } 234 235 soclrstate(so, SS_ISCONNECTING | SS_ISDISCONNECTING | SS_ISCONFIRMING); 236 sosetstate(so, SS_ISCONNECTED); 237 if (head && (so->so_state & SS_INCOMP)) { 238 if ((so->so_options & SO_ACCEPTFILTER) != 0) { 239 so->so_upcall = head->so_accf->so_accept_filter->accf_callback; 240 so->so_upcallarg = head->so_accf->so_accept_filter_arg; 241 atomic_set_int(&so->so_rcv.ssb_flags, SSB_UPCALL); 242 so->so_options &= ~SO_ACCEPTFILTER; 243 so->so_upcall(so, so->so_upcallarg, 0); 244 lwkt_relpooltoken(head); 245 return; 246 } 247 248 /* 249 * Listen socket are not per-cpu. 250 */ 251 TAILQ_REMOVE(&head->so_incomp, so, so_list); 252 head->so_incqlen--; 253 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list); 254 head->so_qlen++; 255 sosetstate(so, SS_COMP); 256 soclrstate(so, SS_INCOMP); 257 258 /* 259 * XXX head may be on a different protocol thread. 260 * sorwakeup()->sowakeup() is hacked atm. 261 */ 262 sorwakeup(head); 263 wakeup_one(&head->so_timeo); 264 } else { 265 wakeup(&so->so_timeo); 266 sorwakeup(so); 267 sowwakeup(so); 268 } 269 if (head) 270 lwkt_relpooltoken(head); 271 } 272 273 void 274 soisdisconnecting(struct socket *so) 275 { 276 soclrstate(so, SS_ISCONNECTING); 277 sosetstate(so, SS_ISDISCONNECTING | SS_CANTRCVMORE | SS_CANTSENDMORE); 278 wakeup((caddr_t)&so->so_timeo); 279 sowwakeup(so); 280 sorwakeup(so); 281 } 282 283 void 284 soisdisconnected(struct socket *so) 285 { 286 soclrstate(so, SS_ISCONNECTING | SS_ISCONNECTED | SS_ISDISCONNECTING); 287 sosetstate(so, SS_CANTRCVMORE | SS_CANTSENDMORE | SS_ISDISCONNECTED); 288 wakeup((caddr_t)&so->so_timeo); 289 sbdrop(&so->so_snd.sb, so->so_snd.ssb_cc); 290 sowwakeup(so); 291 sorwakeup(so); 292 } 293 294 void 295 soisreconnecting(struct socket *so) 296 { 297 soclrstate(so, SS_ISDISCONNECTING | SS_ISDISCONNECTED | 298 SS_CANTRCVMORE | SS_CANTSENDMORE); 299 sosetstate(so, SS_ISCONNECTING); 300 } 301 302 void 303 soisreconnected(struct socket *so) 304 { 305 soclrstate(so, SS_ISDISCONNECTED | SS_CANTRCVMORE | SS_CANTSENDMORE); 306 soisconnected(so); 307 } 308 309 /* 310 * Set or change the message port a socket receives commands on. 311 * 312 * XXX 313 */ 314 void 315 sosetport(struct socket *so, lwkt_port_t port) 316 { 317 so->so_port = port; 318 } 319 320 /* 321 * When an attempt at a new connection is noted on a socket 322 * which accepts connections, sonewconn is called. If the 323 * connection is possible (subject to space constraints, etc.) 324 * then we allocate a new structure, propoerly linked into the 325 * data structure of the original socket, and return this. 326 * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED. 327 * 328 * The new socket is returned with one ref and so_pcb assigned. 329 * The reference is implied by so_pcb. 330 */ 331 struct socket * 332 sonewconn_faddr(struct socket *head, int connstatus, 333 const struct sockaddr *faddr) 334 { 335 struct socket *so; 336 struct socket *sp; 337 struct pru_attach_info ai; 338 339 if (head->so_qlen > 3 * head->so_qlimit / 2) 340 return (NULL); 341 so = soalloc(1, head->so_proto); 342 if (so == NULL) 343 return (NULL); 344 345 /* 346 * Set the port prior to attaching the inpcb to the current 347 * cpu's protocol thread (which should be the current thread 348 * but might not be in all cases). This serializes any pcb ops 349 * which occur to our cpu allowing us to complete the attachment 350 * without racing anything. 351 */ 352 if (head->so_proto->pr_flags & PR_SYNC_PORT) 353 sosetport(so, &netisr_sync_port); 354 else 355 sosetport(so, netisr_cpuport(mycpuid)); 356 if ((head->so_options & SO_ACCEPTFILTER) != 0) 357 connstatus = 0; 358 so->so_head = head; 359 so->so_type = head->so_type; 360 so->so_options = head->so_options &~ SO_ACCEPTCONN; 361 so->so_linger = head->so_linger; 362 363 /* 364 * NOTE: Clearing NOFDREF implies referencing the so with 365 * soreference(). 366 */ 367 so->so_state = head->so_state | SS_NOFDREF | SS_ASSERTINPROG; 368 so->so_cred = crhold(head->so_cred); 369 ai.sb_rlimit = NULL; 370 ai.p_ucred = NULL; 371 ai.fd_rdir = NULL; /* jail code cruft XXX JH */ 372 373 /* 374 * Reserve space and call pru_attach. We can direct-call the 375 * function since we're already in the protocol thread. 376 */ 377 if (soreserve(so, head->so_snd.ssb_hiwat, 378 head->so_rcv.ssb_hiwat, NULL) || 379 so_pru_attach_direct(so, 0, &ai)) { 380 so->so_head = NULL; 381 soclrstate(so, SS_ASSERTINPROG); 382 sofree(so); /* remove implied pcb ref */ 383 return (NULL); 384 } 385 KKASSERT(((so->so_proto->pr_flags & PR_ASYNC_RCVD) == 0 && 386 so->so_refs == 2) || /* attach + our base ref */ 387 ((so->so_proto->pr_flags & PR_ASYNC_RCVD) && 388 so->so_refs == 3)); /* + async rcvd ref */ 389 sofree(so); 390 KKASSERT(so->so_port != NULL); 391 so->so_rcv.ssb_lowat = head->so_rcv.ssb_lowat; 392 so->so_snd.ssb_lowat = head->so_snd.ssb_lowat; 393 so->so_rcv.ssb_timeo = head->so_rcv.ssb_timeo; 394 so->so_snd.ssb_timeo = head->so_snd.ssb_timeo; 395 396 if (head->so_rcv.ssb_flags & SSB_AUTOLOWAT) 397 so->so_rcv.ssb_flags |= SSB_AUTOLOWAT; 398 else 399 so->so_rcv.ssb_flags &= ~SSB_AUTOLOWAT; 400 401 if (head->so_snd.ssb_flags & SSB_AUTOLOWAT) 402 so->so_snd.ssb_flags |= SSB_AUTOLOWAT; 403 else 404 so->so_snd.ssb_flags &= ~SSB_AUTOLOWAT; 405 406 if (head->so_rcv.ssb_flags & SSB_AUTOSIZE) 407 so->so_rcv.ssb_flags |= SSB_AUTOSIZE; 408 else 409 so->so_rcv.ssb_flags &= ~SSB_AUTOSIZE; 410 411 if (head->so_snd.ssb_flags & SSB_AUTOSIZE) 412 so->so_snd.ssb_flags |= SSB_AUTOSIZE; 413 else 414 so->so_snd.ssb_flags &= ~SSB_AUTOSIZE; 415 416 /* 417 * Save the faddr, if the information is provided and 418 * the protocol can perform the saving opertation. 419 */ 420 if (faddr != NULL && so->so_proto->pr_usrreqs->pru_savefaddr != NULL) 421 so->so_proto->pr_usrreqs->pru_savefaddr(so, faddr); 422 423 lwkt_getpooltoken(head); 424 if (connstatus) { 425 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list); 426 sosetstate(so, SS_COMP); 427 head->so_qlen++; 428 } else { 429 if (head->so_incqlen > head->so_qlimit) { 430 sp = TAILQ_FIRST(&head->so_incomp); 431 TAILQ_REMOVE(&head->so_incomp, sp, so_list); 432 head->so_incqlen--; 433 soclrstate(sp, SS_INCOMP); 434 sp->so_head = NULL; 435 soaborta(sp); 436 } 437 TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list); 438 sosetstate(so, SS_INCOMP); 439 head->so_incqlen++; 440 } 441 lwkt_relpooltoken(head); 442 if (connstatus) { 443 /* 444 * XXX head may be on a different protocol thread. 445 * sorwakeup()->sowakeup() is hacked atm. 446 */ 447 sorwakeup(head); 448 wakeup((caddr_t)&head->so_timeo); 449 sosetstate(so, connstatus); 450 } 451 soclrstate(so, SS_ASSERTINPROG); 452 return (so); 453 } 454 455 struct socket * 456 sonewconn(struct socket *head, int connstatus) 457 { 458 return sonewconn_faddr(head, connstatus, NULL); 459 } 460 461 /* 462 * Socantsendmore indicates that no more data will be sent on the 463 * socket; it would normally be applied to a socket when the user 464 * informs the system that no more data is to be sent, by the protocol 465 * code (in case PRU_SHUTDOWN). Socantrcvmore indicates that no more data 466 * will be received, and will normally be applied to the socket by a 467 * protocol when it detects that the peer will send no more data. 468 * Data queued for reading in the socket may yet be read. 469 */ 470 void 471 socantsendmore(struct socket *so) 472 { 473 sosetstate(so, SS_CANTSENDMORE); 474 sowwakeup(so); 475 } 476 477 void 478 socantrcvmore(struct socket *so) 479 { 480 sosetstate(so, SS_CANTRCVMORE); 481 sorwakeup(so); 482 } 483 484 /* 485 * Wakeup processes waiting on a socket buffer. Do asynchronous notification 486 * via SIGIO if the socket has the SS_ASYNC flag set. 487 * 488 * For users waiting on send/recv try to avoid unnecessary context switch 489 * thrashing. Particularly for senders of large buffers (needs to be 490 * extended to sel and aio? XXX) 491 * 492 * WARNING! Can be called on a foreign socket from the wrong protocol 493 * thread. aka is called on the 'head' listen socket when 494 * a new connection comes in. 495 */ 496 void 497 sowakeup(struct socket *so, struct signalsockbuf *ssb) 498 { 499 struct kqinfo *kqinfo = &ssb->ssb_kq; 500 uint32_t flags; 501 502 /* 503 * Check conditions, set the WAKEUP flag, and clear and signal if 504 * the WAIT flag is found to be set. This interlocks against the 505 * client side. 506 */ 507 for (;;) { 508 flags = ssb->ssb_flags; 509 cpu_ccfence(); 510 511 if ((ssb == &so->so_snd && ssb_space(ssb) >= ssb->ssb_lowat) || 512 (ssb == &so->so_rcv && ssb->ssb_cc >= ssb->ssb_lowat) || 513 (ssb == &so->so_snd && (so->so_state & SS_CANTSENDMORE)) || 514 (ssb == &so->so_rcv && (so->so_state & SS_CANTRCVMORE)) 515 ) { 516 if (atomic_cmpset_int(&ssb->ssb_flags, flags, 517 (flags | SSB_WAKEUP) & ~SSB_WAIT)) { 518 if (flags & SSB_WAIT) 519 wakeup(&ssb->ssb_cc); 520 break; 521 } 522 } else { 523 break; 524 } 525 } 526 527 /* 528 * Misc other events 529 */ 530 if ((so->so_state & SS_ASYNC) && so->so_sigio != NULL) 531 pgsigio(so->so_sigio, SIGIO, 0); 532 if (ssb->ssb_flags & SSB_UPCALL) 533 (*so->so_upcall)(so, so->so_upcallarg, MB_DONTWAIT); 534 KNOTE(&kqinfo->ki_note, 0); 535 536 /* 537 * This is a bit of a hack. Multiple threads can wind up scanning 538 * ki_mlist concurrently due to the fact that this function can be 539 * called on a foreign socket, so we can't afford to block here. 540 * 541 * We need the pool token for (so) (likely the listne socket if 542 * SSB_MEVENT is set) because the predicate function may have 543 * to access the accept queue. 544 */ 545 if (ssb->ssb_flags & SSB_MEVENT) { 546 struct netmsg_so_notify *msg, *nmsg; 547 548 lwkt_getpooltoken(so); 549 TAILQ_FOREACH_MUTABLE(msg, &kqinfo->ki_mlist, nm_list, nmsg) { 550 if (msg->nm_predicate(msg)) { 551 TAILQ_REMOVE(&kqinfo->ki_mlist, msg, nm_list); 552 lwkt_replymsg(&msg->base.lmsg, 553 msg->base.lmsg.ms_error); 554 } 555 } 556 if (TAILQ_EMPTY(&ssb->ssb_kq.ki_mlist)) 557 atomic_clear_int(&ssb->ssb_flags, SSB_MEVENT); 558 lwkt_relpooltoken(so); 559 } 560 } 561 562 /* 563 * Socket buffer (struct signalsockbuf) utility routines. 564 * 565 * Each socket contains two socket buffers: one for sending data and 566 * one for receiving data. Each buffer contains a queue of mbufs, 567 * information about the number of mbufs and amount of data in the 568 * queue, and other fields allowing kevent()/select()/poll() statements 569 * and notification on data availability to be implemented. 570 * 571 * Data stored in a socket buffer is maintained as a list of records. 572 * Each record is a list of mbufs chained together with the m_next 573 * field. Records are chained together with the m_nextpkt field. The upper 574 * level routine soreceive() expects the following conventions to be 575 * observed when placing information in the receive buffer: 576 * 577 * 1. If the protocol requires each message be preceded by the sender's 578 * name, then a record containing that name must be present before 579 * any associated data (mbuf's must be of type MT_SONAME). 580 * 2. If the protocol supports the exchange of ``access rights'' (really 581 * just additional data associated with the message), and there are 582 * ``rights'' to be received, then a record containing this data 583 * should be present (mbuf's must be of type MT_RIGHTS). 584 * 3. If a name or rights record exists, then it must be followed by 585 * a data record, perhaps of zero length. 586 * 587 * Before using a new socket structure it is first necessary to reserve 588 * buffer space to the socket, by calling sbreserve(). This should commit 589 * some of the available buffer space in the system buffer pool for the 590 * socket (currently, it does nothing but enforce limits). The space 591 * should be released by calling ssb_release() when the socket is destroyed. 592 */ 593 int 594 soreserve(struct socket *so, u_long sndcc, u_long rcvcc, struct rlimit *rl) 595 { 596 if (so->so_snd.ssb_lowat == 0) 597 atomic_set_int(&so->so_snd.ssb_flags, SSB_AUTOLOWAT); 598 if (ssb_reserve(&so->so_snd, sndcc, so, rl) == 0) 599 goto bad; 600 if (ssb_reserve(&so->so_rcv, rcvcc, so, rl) == 0) 601 goto bad2; 602 if (so->so_rcv.ssb_lowat == 0) 603 so->so_rcv.ssb_lowat = 1; 604 if (so->so_snd.ssb_lowat == 0) 605 so->so_snd.ssb_lowat = MCLBYTES; 606 if (so->so_snd.ssb_lowat > so->so_snd.ssb_hiwat) 607 so->so_snd.ssb_lowat = so->so_snd.ssb_hiwat; 608 return (0); 609 bad2: 610 ssb_release(&so->so_snd, so); 611 bad: 612 return (ENOBUFS); 613 } 614 615 static int 616 sysctl_handle_sb_max(SYSCTL_HANDLER_ARGS) 617 { 618 int error = 0; 619 u_long old_sb_max = sb_max; 620 621 error = SYSCTL_OUT(req, arg1, sizeof(int)); 622 if (error || !req->newptr) 623 return (error); 624 error = SYSCTL_IN(req, arg1, sizeof(int)); 625 if (error) 626 return (error); 627 if (sb_max < MSIZE + MCLBYTES) { 628 sb_max = old_sb_max; 629 return (EINVAL); 630 } 631 sb_max_adj = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES); 632 return (0); 633 } 634 635 /* 636 * Allot mbufs to a signalsockbuf. 637 * 638 * Attempt to scale mbmax so that mbcnt doesn't become limiting 639 * if buffering efficiency is near the normal case. 640 * 641 * sb_max only applies to user-sockets (where rl != NULL). It does 642 * not apply to kernel sockets or kernel-controlled sockets. Note 643 * that NFS overrides the sockbuf limits created when nfsd creates 644 * a socket. 645 */ 646 int 647 ssb_reserve(struct signalsockbuf *ssb, u_long cc, struct socket *so, 648 struct rlimit *rl) 649 { 650 /* 651 * rl will only be NULL when we're in an interrupt (eg, in tcp_input) 652 * or when called from netgraph (ie, ngd_attach) 653 */ 654 if (rl && cc > sb_max_adj) 655 cc = sb_max_adj; 656 if (!chgsbsize(so->so_cred->cr_uidinfo, &ssb->ssb_hiwat, cc, 657 rl ? rl->rlim_cur : RLIM_INFINITY)) { 658 return (0); 659 } 660 if (rl) 661 ssb->ssb_mbmax = min(cc * sb_efficiency, sb_max); 662 else 663 ssb->ssb_mbmax = cc * sb_efficiency; 664 665 /* 666 * AUTOLOWAT is set on send buffers and prevents large writes 667 * from generating a huge number of context switches. 668 */ 669 if (ssb->ssb_flags & SSB_AUTOLOWAT) { 670 ssb->ssb_lowat = ssb->ssb_hiwat / 2; 671 if (ssb->ssb_lowat < MCLBYTES) 672 ssb->ssb_lowat = MCLBYTES; 673 } 674 if (ssb->ssb_lowat > ssb->ssb_hiwat) 675 ssb->ssb_lowat = ssb->ssb_hiwat; 676 return (1); 677 } 678 679 /* 680 * Free mbufs held by a socket, and reserved mbuf space. 681 */ 682 void 683 ssb_release(struct signalsockbuf *ssb, struct socket *so) 684 { 685 sbflush(&ssb->sb); 686 (void)chgsbsize(so->so_cred->cr_uidinfo, &ssb->ssb_hiwat, 0, 687 RLIM_INFINITY); 688 ssb->ssb_mbmax = 0; 689 } 690 691 /* 692 * Some routines that return EOPNOTSUPP for entry points that are not 693 * supported by a protocol. Fill in as needed. 694 */ 695 void 696 pr_generic_notsupp(netmsg_t msg) 697 { 698 lwkt_replymsg(&msg->lmsg, EOPNOTSUPP); 699 } 700 701 int 702 pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, struct uio *uio, 703 struct mbuf *top, struct mbuf *control, int flags, 704 struct thread *td) 705 { 706 if (top) 707 m_freem(top); 708 if (control) 709 m_freem(control); 710 return (EOPNOTSUPP); 711 } 712 713 int 714 pru_soreceive_notsupp(struct socket *so, struct sockaddr **paddr, 715 struct uio *uio, struct sockbuf *sio, 716 struct mbuf **controlp, int *flagsp) 717 { 718 return (EOPNOTSUPP); 719 } 720 721 /* 722 * This isn't really a ``null'' operation, but it's the default one 723 * and doesn't do anything destructive. 724 */ 725 void 726 pru_sense_null(netmsg_t msg) 727 { 728 msg->sense.nm_stat->st_blksize = msg->base.nm_so->so_snd.ssb_hiwat; 729 lwkt_replymsg(&msg->lmsg, 0); 730 } 731 732 /* 733 * Make a copy of a sockaddr in a malloced buffer of type M_SONAME. Callers 734 * of this routine assume that it always succeeds, so we have to use a 735 * blockable allocation even though we might be called from a critical thread. 736 */ 737 struct sockaddr * 738 dup_sockaddr(const struct sockaddr *sa) 739 { 740 struct sockaddr *sa2; 741 742 sa2 = kmalloc(sa->sa_len, M_SONAME, M_INTWAIT); 743 bcopy(sa, sa2, sa->sa_len); 744 return (sa2); 745 } 746 747 /* 748 * Create an external-format (``xsocket'') structure using the information 749 * in the kernel-format socket structure pointed to by so. This is done 750 * to reduce the spew of irrelevant information over this interface, 751 * to isolate user code from changes in the kernel structure, and 752 * potentially to provide information-hiding if we decide that 753 * some of this information should be hidden from users. 754 */ 755 void 756 sotoxsocket(struct socket *so, struct xsocket *xso) 757 { 758 xso->xso_len = sizeof *xso; 759 xso->xso_so = so; 760 xso->so_type = so->so_type; 761 xso->so_options = so->so_options; 762 xso->so_linger = so->so_linger; 763 xso->so_state = so->so_state; 764 xso->so_pcb = so->so_pcb; 765 xso->xso_protocol = so->so_proto->pr_protocol; 766 xso->xso_family = so->so_proto->pr_domain->dom_family; 767 xso->so_qlen = so->so_qlen; 768 xso->so_incqlen = so->so_incqlen; 769 xso->so_qlimit = so->so_qlimit; 770 xso->so_timeo = so->so_timeo; 771 xso->so_error = so->so_error; 772 xso->so_pgid = so->so_sigio ? so->so_sigio->sio_pgid : 0; 773 xso->so_oobmark = so->so_oobmark; 774 ssbtoxsockbuf(&so->so_snd, &xso->so_snd); 775 ssbtoxsockbuf(&so->so_rcv, &xso->so_rcv); 776 xso->so_uid = so->so_cred->cr_uid; 777 } 778 779 /* 780 * Here is the definition of some of the basic objects in the kern.ipc 781 * branch of the MIB. 782 */ 783 SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC"); 784 785 /* 786 * This takes the place of kern.maxsockbuf, which moved to kern.ipc. 787 * 788 * NOTE! sb_max only applies to user-created socket buffers. 789 */ 790 static int dummy; 791 SYSCTL_INT(_kern, KERN_DUMMY, dummy, CTLFLAG_RW, &dummy, 0, ""); 792 SYSCTL_OID(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf, CTLTYPE_INT|CTLFLAG_RW, 793 &sb_max, 0, sysctl_handle_sb_max, "I", "Maximum socket buffer size"); 794 SYSCTL_INT(_kern_ipc, OID_AUTO, maxsockets, CTLFLAG_RD, 795 &maxsockets, 0, "Maximum number of sockets available"); 796 SYSCTL_INT(_kern_ipc, KIPC_SOCKBUF_WASTE, sockbuf_waste_factor, CTLFLAG_RW, 797 &sb_efficiency, 0, 798 "Socket buffer limit scaler"); 799 800 /* 801 * Initialize maxsockets 802 */ 803 static void 804 init_maxsockets(void *ignored) 805 { 806 TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets); 807 maxsockets = imax(maxsockets, imax(maxfiles, nmbclusters)); 808 } 809 SYSINIT(param, SI_BOOT1_TUNABLES, SI_ORDER_ANY, 810 init_maxsockets, NULL); 811 812