1 /* 2 * Copyright (c) 1989, 1991, 1993, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95 37 * $FreeBSD: src/sys/nfs/nfs_socket.c,v 1.60.2.6 2003/03/26 01:44:46 alfred Exp $ 38 */ 39 40 /* 41 * Socket operations for use by nfs 42 */ 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/proc.h> 47 #include <sys/malloc.h> 48 #include <sys/mount.h> 49 #include <sys/kernel.h> 50 #include <sys/mbuf.h> 51 #include <sys/vnode.h> 52 #include <sys/fcntl.h> 53 #include <sys/protosw.h> 54 #include <sys/resourcevar.h> 55 #include <sys/socket.h> 56 #include <sys/socketvar.h> 57 #include <sys/socketops.h> 58 #include <sys/syslog.h> 59 #include <sys/thread.h> 60 #include <sys/tprintf.h> 61 #include <sys/sysctl.h> 62 #include <sys/signalvar.h> 63 64 #include <sys/signal2.h> 65 #include <sys/mutex2.h> 66 #include <sys/socketvar2.h> 67 68 #include <netinet/in.h> 69 #include <netinet/tcp.h> 70 #include <sys/thread2.h> 71 72 #include "rpcv2.h" 73 #include "nfsproto.h" 74 #include "nfs.h" 75 #include "xdr_subs.h" 76 #include "nfsm_subs.h" 77 #include "nfsmount.h" 78 #include "nfsnode.h" 79 #include "nfsrtt.h" 80 81 #define TRUE 1 82 #define FALSE 0 83 84 /* 85 * RTT calculations are scaled by 256 (8 bits). A proper fractional 86 * RTT will still be calculated even with a slow NFS timer. 87 */ 88 #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum]] 89 #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum]] 90 #define NFS_RTT_SCALE_BITS 8 /* bits */ 91 #define NFS_RTT_SCALE 256 /* value */ 92 93 /* 94 * Defines which timer to use for the procnum. 95 * 0 - default 96 * 1 - getattr 97 * 2 - lookup 98 * 3 - read 99 * 4 - write 100 */ 101 static int proct[NFS_NPROCS] = { 102 0, 1, 0, 2, 1, 3, 3, 4, 0, 0, /* 00-09 */ 103 0, 0, 0, 0, 0, 0, 3, 3, 0, 0, /* 10-19 */ 104 0, 5, 0, 0, 0, 0, /* 20-29 */ 105 }; 106 107 static int multt[NFS_NPROCS] = { 108 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 00-09 */ 109 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 10-19 */ 110 1, 2, 1, 1, 1, 1, /* 20-29 */ 111 }; 112 113 static int nfs_backoff[8] = { 2, 3, 5, 8, 13, 21, 34, 55 }; 114 static int nfs_realign_test; 115 static int nfs_realign_count; 116 static int nfs_showrtt; 117 static int nfs_showrexmit; 118 int nfs_maxasyncbio = NFS_MAXASYNCBIO; 119 120 SYSCTL_DECL(_vfs_nfs); 121 122 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_test, CTLFLAG_RW, &nfs_realign_test, 0, 123 "Number of times mbufs have been tested for bad alignment"); 124 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_count, CTLFLAG_RW, &nfs_realign_count, 0, 125 "Number of realignments for badly aligned mbuf data"); 126 SYSCTL_INT(_vfs_nfs, OID_AUTO, showrtt, CTLFLAG_RW, &nfs_showrtt, 0, 127 "Show round trip time output"); 128 SYSCTL_INT(_vfs_nfs, OID_AUTO, showrexmit, CTLFLAG_RW, &nfs_showrexmit, 0, 129 "Show retransmits info"); 130 SYSCTL_INT(_vfs_nfs, OID_AUTO, maxasyncbio, CTLFLAG_RW, &nfs_maxasyncbio, 0, 131 "Max number of asynchronous bio's"); 132 133 static int nfs_request_setup(nfsm_info_t info); 134 static int nfs_request_auth(struct nfsreq *rep); 135 static int nfs_request_try(struct nfsreq *rep); 136 static int nfs_request_waitreply(struct nfsreq *rep); 137 static int nfs_request_processreply(nfsm_info_t info, int); 138 139 int nfsrtton = 0; 140 struct nfsrtt nfsrtt; 141 struct callout nfs_timer_handle; 142 143 static int nfs_msg (struct thread *,char *,char *); 144 static int nfs_rcvlock (struct nfsmount *nmp, struct nfsreq *myreq); 145 static void nfs_rcvunlock (struct nfsmount *nmp); 146 static void nfs_realign (struct mbuf **pm, int hsiz); 147 static int nfs_receive (struct nfsmount *nmp, struct nfsreq *rep, 148 struct sockaddr **aname, struct mbuf **mp); 149 static void nfs_softterm (struct nfsreq *rep, int islocked); 150 static void nfs_hardterm (struct nfsreq *rep, int islocked); 151 static int nfs_reconnect (struct nfsmount *nmp, struct nfsreq *rep); 152 #ifndef NFS_NOSERVER 153 static int nfsrv_getstream (struct nfssvc_sock *, int, int *); 154 static void nfs_timer_req(struct nfsreq *req); 155 static void nfs_checkpkt(struct mbuf *m, int len); 156 157 int (*nfsrv3_procs[NFS_NPROCS]) (struct nfsrv_descript *nd, 158 struct nfssvc_sock *slp, 159 struct thread *td, 160 struct mbuf **mreqp) = { 161 nfsrv_null, 162 nfsrv_getattr, 163 nfsrv_setattr, 164 nfsrv_lookup, 165 nfsrv3_access, 166 nfsrv_readlink, 167 nfsrv_read, 168 nfsrv_write, 169 nfsrv_create, 170 nfsrv_mkdir, 171 nfsrv_symlink, 172 nfsrv_mknod, 173 nfsrv_remove, 174 nfsrv_rmdir, 175 nfsrv_rename, 176 nfsrv_link, 177 nfsrv_readdir, 178 nfsrv_readdirplus, 179 nfsrv_statfs, 180 nfsrv_fsinfo, 181 nfsrv_pathconf, 182 nfsrv_commit, 183 nfsrv_noop, 184 nfsrv_noop, 185 nfsrv_noop, 186 nfsrv_noop 187 }; 188 #endif /* NFS_NOSERVER */ 189 190 /* 191 * Initialize sockets and congestion for a new NFS connection. 192 * We do not free the sockaddr if error. 193 */ 194 int 195 nfs_connect(struct nfsmount *nmp, struct nfsreq *rep) 196 { 197 struct socket *so; 198 int error; 199 struct sockaddr *saddr; 200 struct sockaddr_in *sin; 201 struct thread *td = &thread0; /* only used for socreate and sobind */ 202 203 nmp->nm_so = so = NULL; 204 if (nmp->nm_flag & NFSMNT_FORCE) 205 return (EINVAL); 206 saddr = nmp->nm_nam; 207 error = socreate(saddr->sa_family, &so, nmp->nm_sotype, 208 nmp->nm_soproto, td); 209 if (error) 210 goto bad; 211 nmp->nm_soflags = so->so_proto->pr_flags; 212 213 /* 214 * Some servers require that the client port be a reserved port number. 215 */ 216 if (saddr->sa_family == AF_INET && (nmp->nm_flag & NFSMNT_RESVPORT)) { 217 struct sockopt sopt; 218 int ip; 219 struct sockaddr_in ssin; 220 221 bzero(&sopt, sizeof sopt); 222 ip = IP_PORTRANGE_LOW; 223 sopt.sopt_level = IPPROTO_IP; 224 sopt.sopt_name = IP_PORTRANGE; 225 sopt.sopt_val = (void *)&ip; 226 sopt.sopt_valsize = sizeof(ip); 227 sopt.sopt_td = NULL; 228 error = sosetopt(so, &sopt); 229 if (error) 230 goto bad; 231 bzero(&ssin, sizeof ssin); 232 sin = &ssin; 233 sin->sin_len = sizeof (struct sockaddr_in); 234 sin->sin_family = AF_INET; 235 sin->sin_addr.s_addr = INADDR_ANY; 236 sin->sin_port = htons(0); 237 error = sobind(so, (struct sockaddr *)sin, td); 238 if (error) 239 goto bad; 240 bzero(&sopt, sizeof sopt); 241 ip = IP_PORTRANGE_DEFAULT; 242 sopt.sopt_level = IPPROTO_IP; 243 sopt.sopt_name = IP_PORTRANGE; 244 sopt.sopt_val = (void *)&ip; 245 sopt.sopt_valsize = sizeof(ip); 246 sopt.sopt_td = NULL; 247 error = sosetopt(so, &sopt); 248 if (error) 249 goto bad; 250 } 251 252 /* 253 * Protocols that do not require connections may be optionally left 254 * unconnected for servers that reply from a port other than NFS_PORT. 255 */ 256 if (nmp->nm_flag & NFSMNT_NOCONN) { 257 if (nmp->nm_soflags & PR_CONNREQUIRED) { 258 error = ENOTCONN; 259 goto bad; 260 } 261 } else { 262 error = soconnect(so, nmp->nm_nam, td); 263 if (error) 264 goto bad; 265 266 /* 267 * Wait for the connection to complete. Cribbed from the 268 * connect system call but with the wait timing out so 269 * that interruptible mounts don't hang here for a long time. 270 */ 271 crit_enter(); 272 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) { 273 (void) tsleep((caddr_t)&so->so_timeo, 0, 274 "nfscon", 2 * hz); 275 if ((so->so_state & SS_ISCONNECTING) && 276 so->so_error == 0 && rep && 277 (error = nfs_sigintr(nmp, rep, rep->r_td)) != 0){ 278 soclrstate(so, SS_ISCONNECTING); 279 crit_exit(); 280 goto bad; 281 } 282 } 283 if (so->so_error) { 284 error = so->so_error; 285 so->so_error = 0; 286 crit_exit(); 287 goto bad; 288 } 289 crit_exit(); 290 } 291 so->so_rcv.ssb_timeo = (5 * hz); 292 so->so_snd.ssb_timeo = (5 * hz); 293 294 /* 295 * Get buffer reservation size from sysctl, but impose reasonable 296 * limits. 297 */ 298 if (nmp->nm_sotype == SOCK_STREAM) { 299 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 300 struct sockopt sopt; 301 int val; 302 303 bzero(&sopt, sizeof sopt); 304 sopt.sopt_level = SOL_SOCKET; 305 sopt.sopt_name = SO_KEEPALIVE; 306 sopt.sopt_val = &val; 307 sopt.sopt_valsize = sizeof val; 308 val = 1; 309 sosetopt(so, &sopt); 310 } 311 if (so->so_proto->pr_protocol == IPPROTO_TCP) { 312 struct sockopt sopt; 313 int val; 314 315 bzero(&sopt, sizeof sopt); 316 sopt.sopt_level = IPPROTO_TCP; 317 sopt.sopt_name = TCP_NODELAY; 318 sopt.sopt_val = &val; 319 sopt.sopt_valsize = sizeof val; 320 val = 1; 321 sosetopt(so, &sopt); 322 323 bzero(&sopt, sizeof sopt); 324 sopt.sopt_level = IPPROTO_TCP; 325 sopt.sopt_name = TCP_FASTKEEP; 326 sopt.sopt_val = &val; 327 sopt.sopt_valsize = sizeof val; 328 val = 1; 329 sosetopt(so, &sopt); 330 } 331 } 332 error = soreserve(so, nfs_soreserve, nfs_soreserve, NULL); 333 if (error) 334 goto bad; 335 atomic_set_int(&so->so_rcv.ssb_flags, SSB_NOINTR); 336 atomic_set_int(&so->so_snd.ssb_flags, SSB_NOINTR); 337 338 /* Initialize other non-zero congestion variables */ 339 nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] = 340 nmp->nm_srtt[3] = (NFS_TIMEO << NFS_RTT_SCALE_BITS); 341 nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] = 342 nmp->nm_sdrtt[3] = 0; 343 nmp->nm_maxasync_scaled = NFS_MINASYNC_SCALED; 344 nmp->nm_timeouts = 0; 345 346 /* 347 * Assign nm_so last. The moment nm_so is assigned the nfs_timer() 348 * can mess with the socket. 349 */ 350 nmp->nm_so = so; 351 return (0); 352 353 bad: 354 if (so) { 355 soshutdown(so, SHUT_RDWR); 356 soclose(so, FNONBLOCK); 357 } 358 return (error); 359 } 360 361 /* 362 * Reconnect routine: 363 * Called when a connection is broken on a reliable protocol. 364 * - clean up the old socket 365 * - nfs_connect() again 366 * - set R_NEEDSXMIT for all outstanding requests on mount point 367 * If this fails the mount point is DEAD! 368 * nb: Must be called with the nfs_sndlock() set on the mount point. 369 */ 370 static int 371 nfs_reconnect(struct nfsmount *nmp, struct nfsreq *rep) 372 { 373 struct nfsreq *req; 374 int error; 375 376 nfs_disconnect(nmp); 377 if (nmp->nm_rxstate >= NFSSVC_STOPPING) 378 return (EINTR); 379 while ((error = nfs_connect(nmp, rep)) != 0) { 380 if (error == EINTR || error == ERESTART) 381 return (EINTR); 382 if (error == EINVAL) 383 return (error); 384 if (nmp->nm_rxstate >= NFSSVC_STOPPING) 385 return (EINTR); 386 (void) tsleep((caddr_t)&lbolt, 0, "nfscon", 0); 387 } 388 389 /* 390 * Loop through outstanding request list and fix up all requests 391 * on old socket. 392 */ 393 crit_enter(); 394 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) { 395 KKASSERT(req->r_nmp == nmp); 396 req->r_flags |= R_NEEDSXMIT; 397 } 398 crit_exit(); 399 return (0); 400 } 401 402 /* 403 * NFS disconnect. Clean up and unlink. 404 */ 405 void 406 nfs_disconnect(struct nfsmount *nmp) 407 { 408 struct socket *so; 409 410 if (nmp->nm_so) { 411 so = nmp->nm_so; 412 nmp->nm_so = NULL; 413 soshutdown(so, SHUT_RDWR); 414 soclose(so, FNONBLOCK); 415 } 416 } 417 418 void 419 nfs_safedisconnect(struct nfsmount *nmp) 420 { 421 nfs_rcvlock(nmp, NULL); 422 nfs_disconnect(nmp); 423 nfs_rcvunlock(nmp); 424 } 425 426 /* 427 * This is the nfs send routine. For connection based socket types, it 428 * must be called with an nfs_sndlock() on the socket. 429 * "rep == NULL" indicates that it has been called from a server. 430 * For the client side: 431 * - return EINTR if the RPC is terminated, 0 otherwise 432 * - set R_NEEDSXMIT if the send fails for any reason 433 * - do any cleanup required by recoverable socket errors (?) 434 * For the server side: 435 * - return EINTR or ERESTART if interrupted by a signal 436 * - return EPIPE if a connection is lost for connection based sockets (TCP...) 437 * - do any cleanup required by recoverable socket errors (?) 438 */ 439 int 440 nfs_send(struct socket *so, struct sockaddr *nam, struct mbuf *top, 441 struct nfsreq *rep) 442 { 443 struct sockaddr *sendnam; 444 int error, soflags, flags; 445 446 if (rep) { 447 if (rep->r_flags & R_SOFTTERM) { 448 m_freem(top); 449 return (EINTR); 450 } 451 if ((so = rep->r_nmp->nm_so) == NULL) { 452 rep->r_flags |= R_NEEDSXMIT; 453 m_freem(top); 454 return (0); 455 } 456 rep->r_flags &= ~R_NEEDSXMIT; 457 soflags = rep->r_nmp->nm_soflags; 458 } else { 459 soflags = so->so_proto->pr_flags; 460 } 461 if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED)) 462 sendnam = NULL; 463 else 464 sendnam = nam; 465 if (so->so_type == SOCK_SEQPACKET) 466 flags = MSG_EOR; 467 else 468 flags = 0; 469 470 /* 471 * calls pru_sosend -> sosend -> so_pru_send -> netrpc 472 */ 473 error = so_pru_sosend(so, sendnam, NULL, top, NULL, flags, 474 curthread /*XXX*/); 475 476 /* 477 * ENOBUFS for dgram sockets is transient and non fatal. 478 * No need to log, and no need to break a soft mount. 479 */ 480 if (error == ENOBUFS && so->so_type == SOCK_DGRAM) { 481 error = 0; 482 /* 483 * do backoff retransmit on client 484 */ 485 if (rep) { 486 if ((rep->r_nmp->nm_state & NFSSTA_SENDSPACE) == 0) { 487 rep->r_nmp->nm_state |= NFSSTA_SENDSPACE; 488 kprintf("Warning: NFS: Insufficient sendspace " 489 "(%lu),\n" 490 "\t You must increase vfs.nfs.soreserve" 491 "or decrease vfs.nfs.maxasyncbio\n", 492 so->so_snd.ssb_hiwat); 493 } 494 rep->r_flags |= R_NEEDSXMIT; 495 } 496 } 497 498 if (error) { 499 if (rep) { 500 log(LOG_INFO, "nfs send error %d for server %s\n",error, 501 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); 502 /* 503 * Deal with errors for the client side. 504 */ 505 if (rep->r_flags & R_SOFTTERM) 506 error = EINTR; 507 else 508 rep->r_flags |= R_NEEDSXMIT; 509 } else { 510 log(LOG_INFO, "nfsd send error %d\n", error); 511 } 512 513 /* 514 * Handle any recoverable (soft) socket errors here. (?) 515 */ 516 if (error != EINTR && error != ERESTART && 517 error != EWOULDBLOCK && error != EPIPE) 518 error = 0; 519 } 520 return (error); 521 } 522 523 /* 524 * Receive a Sun RPC Request/Reply. For SOCK_DGRAM, the work is all 525 * done by soreceive(), but for SOCK_STREAM we must deal with the Record 526 * Mark and consolidate the data into a new mbuf list. 527 * nb: Sometimes TCP passes the data up to soreceive() in long lists of 528 * small mbufs. 529 * For SOCK_STREAM we must be very careful to read an entire record once 530 * we have read any of it, even if the system call has been interrupted. 531 */ 532 static int 533 nfs_receive(struct nfsmount *nmp, struct nfsreq *rep, 534 struct sockaddr **aname, struct mbuf **mp) 535 { 536 struct socket *so; 537 struct sockbuf sio; 538 struct uio auio; 539 struct iovec aio; 540 struct mbuf *m; 541 struct mbuf *control; 542 u_int32_t len; 543 struct sockaddr **getnam; 544 int error, sotype, rcvflg; 545 struct thread *td = curthread; /* XXX */ 546 547 /* 548 * Set up arguments for soreceive() 549 */ 550 *mp = NULL; 551 *aname = NULL; 552 sotype = nmp->nm_sotype; 553 554 /* 555 * For reliable protocols, lock against other senders/receivers 556 * in case a reconnect is necessary. 557 * For SOCK_STREAM, first get the Record Mark to find out how much 558 * more there is to get. 559 * We must lock the socket against other receivers 560 * until we have an entire rpc request/reply. 561 */ 562 if (sotype != SOCK_DGRAM) { 563 error = nfs_sndlock(nmp, rep); 564 if (error) 565 return (error); 566 tryagain: 567 /* 568 * Check for fatal errors and resending request. 569 */ 570 /* 571 * Ugh: If a reconnect attempt just happened, nm_so 572 * would have changed. NULL indicates a failed 573 * attempt that has essentially shut down this 574 * mount point. 575 */ 576 if (rep && (rep->r_mrep || (rep->r_flags & R_SOFTTERM))) { 577 nfs_sndunlock(nmp); 578 return (EINTR); 579 } 580 so = nmp->nm_so; 581 if (so == NULL) { 582 error = nfs_reconnect(nmp, rep); 583 if (error) { 584 nfs_sndunlock(nmp); 585 return (error); 586 } 587 goto tryagain; 588 } 589 while (rep && (rep->r_flags & R_NEEDSXMIT)) { 590 m = m_copym(rep->r_mreq, 0, M_COPYALL, MB_WAIT); 591 nfsstats.rpcretries++; 592 error = nfs_send(so, rep->r_nmp->nm_nam, m, rep); 593 if (error) { 594 if (error == EINTR || error == ERESTART || 595 (error = nfs_reconnect(nmp, rep)) != 0) { 596 nfs_sndunlock(nmp); 597 return (error); 598 } 599 goto tryagain; 600 } 601 } 602 nfs_sndunlock(nmp); 603 if (sotype == SOCK_STREAM) { 604 /* 605 * Get the length marker from the stream 606 */ 607 aio.iov_base = (caddr_t)&len; 608 aio.iov_len = sizeof(u_int32_t); 609 auio.uio_iov = &aio; 610 auio.uio_iovcnt = 1; 611 auio.uio_segflg = UIO_SYSSPACE; 612 auio.uio_rw = UIO_READ; 613 auio.uio_offset = 0; 614 auio.uio_resid = sizeof(u_int32_t); 615 auio.uio_td = td; 616 do { 617 rcvflg = MSG_WAITALL; 618 error = so_pru_soreceive(so, NULL, &auio, NULL, 619 NULL, &rcvflg); 620 if (error == EWOULDBLOCK && rep) { 621 if (rep->r_flags & R_SOFTTERM) 622 return (EINTR); 623 } 624 } while (error == EWOULDBLOCK); 625 626 if (error == 0 && auio.uio_resid > 0) { 627 /* 628 * Only log short packets if not EOF 629 */ 630 if (auio.uio_resid != sizeof(u_int32_t)) 631 log(LOG_INFO, 632 "short receive (%d/%d) from nfs server %s\n", 633 (int)(sizeof(u_int32_t) - auio.uio_resid), 634 (int)sizeof(u_int32_t), 635 nmp->nm_mountp->mnt_stat.f_mntfromname); 636 error = EPIPE; 637 } 638 if (error) 639 goto errout; 640 len = ntohl(len) & ~0x80000000; 641 /* 642 * This is SERIOUS! We are out of sync with the sender 643 * and forcing a disconnect/reconnect is all I can do. 644 */ 645 if (len > NFS_MAXPACKET) { 646 log(LOG_ERR, "%s (%d) from nfs server %s\n", 647 "impossible packet length", 648 len, 649 nmp->nm_mountp->mnt_stat.f_mntfromname); 650 error = EFBIG; 651 goto errout; 652 } 653 654 /* 655 * Get the rest of the packet as an mbuf chain 656 */ 657 sbinit(&sio, len); 658 do { 659 rcvflg = MSG_WAITALL; 660 error = so_pru_soreceive(so, NULL, NULL, &sio, 661 NULL, &rcvflg); 662 } while (error == EWOULDBLOCK || error == EINTR || 663 error == ERESTART); 664 if (error == 0 && sio.sb_cc != len) { 665 if (sio.sb_cc != 0) 666 log(LOG_INFO, 667 "short receive (%zu/%d) from nfs server %s\n", 668 (size_t)len - auio.uio_resid, len, 669 nmp->nm_mountp->mnt_stat.f_mntfromname); 670 error = EPIPE; 671 } 672 *mp = sio.sb_mb; 673 } else { 674 /* 675 * Non-stream, so get the whole packet by not 676 * specifying MSG_WAITALL and by specifying a large 677 * length. 678 * 679 * We have no use for control msg., but must grab them 680 * and then throw them away so we know what is going 681 * on. 682 */ 683 sbinit(&sio, 100000000); 684 do { 685 rcvflg = 0; 686 error = so_pru_soreceive(so, NULL, NULL, &sio, 687 &control, &rcvflg); 688 if (control) 689 m_freem(control); 690 if (error == EWOULDBLOCK && rep) { 691 if (rep->r_flags & R_SOFTTERM) { 692 m_freem(sio.sb_mb); 693 return (EINTR); 694 } 695 } 696 } while (error == EWOULDBLOCK || 697 (error == 0 && sio.sb_mb == NULL && control)); 698 if ((rcvflg & MSG_EOR) == 0) 699 kprintf("Egad!!\n"); 700 if (error == 0 && sio.sb_mb == NULL) 701 error = EPIPE; 702 len = sio.sb_cc; 703 *mp = sio.sb_mb; 704 } 705 errout: 706 if (error && error != EINTR && error != ERESTART) { 707 m_freem(*mp); 708 *mp = NULL; 709 if (error != EPIPE) { 710 log(LOG_INFO, 711 "receive error %d from nfs server %s\n", 712 error, 713 nmp->nm_mountp->mnt_stat.f_mntfromname); 714 } 715 error = nfs_sndlock(nmp, rep); 716 if (!error) { 717 error = nfs_reconnect(nmp, rep); 718 if (!error) 719 goto tryagain; 720 else 721 nfs_sndunlock(nmp); 722 } 723 } 724 } else { 725 if ((so = nmp->nm_so) == NULL) 726 return (EACCES); 727 if (so->so_state & SS_ISCONNECTED) 728 getnam = NULL; 729 else 730 getnam = aname; 731 sbinit(&sio, 100000000); 732 do { 733 rcvflg = 0; 734 error = so_pru_soreceive(so, getnam, NULL, &sio, 735 NULL, &rcvflg); 736 if (error == EWOULDBLOCK && rep && 737 (rep->r_flags & R_SOFTTERM)) { 738 m_freem(sio.sb_mb); 739 return (EINTR); 740 } 741 } while (error == EWOULDBLOCK); 742 743 len = sio.sb_cc; 744 *mp = sio.sb_mb; 745 746 /* 747 * A shutdown may result in no error and no mbuf. 748 * Convert to EPIPE. 749 */ 750 if (*mp == NULL && error == 0) 751 error = EPIPE; 752 } 753 if (error) { 754 m_freem(*mp); 755 *mp = NULL; 756 } 757 758 /* 759 * Search for any mbufs that are not a multiple of 4 bytes long 760 * or with m_data not longword aligned. 761 * These could cause pointer alignment problems, so copy them to 762 * well aligned mbufs. 763 */ 764 nfs_realign(mp, 5 * NFSX_UNSIGNED); 765 return (error); 766 } 767 768 /* 769 * Implement receipt of reply on a socket. 770 * 771 * We must search through the list of received datagrams matching them 772 * with outstanding requests using the xid, until ours is found. 773 * 774 * If myrep is NULL we process packets on the socket until 775 * interrupted or until nm_reqrxq is non-empty. 776 */ 777 /* ARGSUSED */ 778 int 779 nfs_reply(struct nfsmount *nmp, struct nfsreq *myrep) 780 { 781 struct nfsreq *rep; 782 struct sockaddr *nam; 783 u_int32_t rxid; 784 u_int32_t *tl; 785 int error; 786 struct nfsm_info info; 787 788 /* 789 * Loop around until we get our own reply 790 */ 791 for (;;) { 792 /* 793 * Lock against other receivers so that I don't get stuck in 794 * sbwait() after someone else has received my reply for me. 795 * Also necessary for connection based protocols to avoid 796 * race conditions during a reconnect. 797 * 798 * If nfs_rcvlock() returns EALREADY, that means that 799 * the reply has already been recieved by another 800 * process and we can return immediately. In this 801 * case, the lock is not taken to avoid races with 802 * other processes. 803 */ 804 info.mrep = NULL; 805 806 error = nfs_rcvlock(nmp, myrep); 807 if (error == EALREADY) 808 return (0); 809 if (error) 810 return (error); 811 812 /* 813 * If myrep is NULL we are the receiver helper thread. 814 * Stop waiting for incoming replies if there are 815 * messages sitting on reqrxq that we need to process, 816 * or if a shutdown request is pending. 817 */ 818 if (myrep == NULL && (TAILQ_FIRST(&nmp->nm_reqrxq) || 819 nmp->nm_rxstate > NFSSVC_PENDING)) { 820 nfs_rcvunlock(nmp); 821 return(EWOULDBLOCK); 822 } 823 824 /* 825 * Get the next Rpc reply off the socket 826 * 827 * We cannot release the receive lock until we've 828 * filled in rep->r_mrep, otherwise a waiting 829 * thread may deadlock in soreceive with no incoming 830 * packets expected. 831 */ 832 error = nfs_receive(nmp, myrep, &nam, &info.mrep); 833 if (error) { 834 /* 835 * Ignore routing errors on connectionless protocols?? 836 */ 837 nfs_rcvunlock(nmp); 838 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) { 839 if (nmp->nm_so == NULL) 840 return (error); 841 nmp->nm_so->so_error = 0; 842 continue; 843 } 844 return (error); 845 } 846 if (nam) 847 kfree(nam, M_SONAME); 848 849 /* 850 * Get the xid and check that it is an rpc reply 851 */ 852 info.md = info.mrep; 853 info.dpos = mtod(info.md, caddr_t); 854 NULLOUT(tl = nfsm_dissect(&info, 2*NFSX_UNSIGNED)); 855 rxid = *tl++; 856 if (*tl != rpc_reply) { 857 nfsstats.rpcinvalid++; 858 m_freem(info.mrep); 859 info.mrep = NULL; 860 nfsmout: 861 nfs_rcvunlock(nmp); 862 continue; 863 } 864 865 /* 866 * Loop through the request list to match up the reply 867 * Iff no match, just drop the datagram. On match, set 868 * r_mrep atomically to prevent the timer from messing 869 * around with the request after we have exited the critical 870 * section. 871 */ 872 crit_enter(); 873 TAILQ_FOREACH(rep, &nmp->nm_reqq, r_chain) { 874 if (rep->r_mrep == NULL && rxid == rep->r_xid) 875 break; 876 } 877 878 /* 879 * Fill in the rest of the reply if we found a match. 880 * 881 * Deal with duplicate responses if there was no match. 882 */ 883 if (rep) { 884 rep->r_md = info.md; 885 rep->r_dpos = info.dpos; 886 if (nfsrtton) { 887 struct rttl *rt; 888 889 rt = &nfsrtt.rttl[nfsrtt.pos]; 890 rt->proc = rep->r_procnum; 891 rt->rto = 0; 892 rt->sent = 0; 893 rt->cwnd = nmp->nm_maxasync_scaled; 894 rt->srtt = nmp->nm_srtt[proct[rep->r_procnum] - 1]; 895 rt->sdrtt = nmp->nm_sdrtt[proct[rep->r_procnum] - 1]; 896 rt->fsid = nmp->nm_mountp->mnt_stat.f_fsid; 897 getmicrotime(&rt->tstamp); 898 if (rep->r_flags & R_TIMING) 899 rt->rtt = rep->r_rtt; 900 else 901 rt->rtt = 1000000; 902 nfsrtt.pos = (nfsrtt.pos + 1) % NFSRTTLOGSIZ; 903 } 904 905 /* 906 * New congestion control is based only on async 907 * requests. 908 */ 909 if (nmp->nm_maxasync_scaled < NFS_MAXASYNC_SCALED) 910 ++nmp->nm_maxasync_scaled; 911 if (rep->r_flags & R_SENT) { 912 rep->r_flags &= ~R_SENT; 913 } 914 /* 915 * Update rtt using a gain of 0.125 on the mean 916 * and a gain of 0.25 on the deviation. 917 * 918 * NOTE SRTT/SDRTT are only good if R_TIMING is set. 919 */ 920 if ((rep->r_flags & R_TIMING) && rep->r_rexmit == 0) { 921 /* 922 * Since the timer resolution of 923 * NFS_HZ is so course, it can often 924 * result in r_rtt == 0. Since 925 * r_rtt == N means that the actual 926 * rtt is between N+dt and N+2-dt ticks, 927 * add 1. 928 */ 929 int n; 930 int d; 931 932 #define NFSRSB NFS_RTT_SCALE_BITS 933 n = ((NFS_SRTT(rep) * 7) + 934 (rep->r_rtt << NFSRSB)) >> 3; 935 d = n - NFS_SRTT(rep); 936 NFS_SRTT(rep) = n; 937 938 /* 939 * Don't let the jitter calculation decay 940 * too quickly, but we want a fast rampup. 941 */ 942 if (d < 0) 943 d = -d; 944 d <<= NFSRSB; 945 if (d < NFS_SDRTT(rep)) 946 n = ((NFS_SDRTT(rep) * 15) + d) >> 4; 947 else 948 n = ((NFS_SDRTT(rep) * 3) + d) >> 2; 949 NFS_SDRTT(rep) = n; 950 #undef NFSRSB 951 } 952 nmp->nm_timeouts = 0; 953 rep->r_mrep = info.mrep; 954 nfs_hardterm(rep, 0); 955 } else { 956 /* 957 * Extract vers, prog, nfsver, procnum. A duplicate 958 * response means we didn't wait long enough so 959 * we increase the SRTT to avoid future spurious 960 * timeouts. 961 */ 962 u_int procnum = nmp->nm_lastreprocnum; 963 int n; 964 965 if (procnum < NFS_NPROCS && proct[procnum]) { 966 if (nfs_showrexmit) 967 kprintf("D"); 968 n = nmp->nm_srtt[proct[procnum]]; 969 n += NFS_ASYSCALE * NFS_HZ; 970 if (n < NFS_ASYSCALE * NFS_HZ * 10) 971 n = NFS_ASYSCALE * NFS_HZ * 10; 972 nmp->nm_srtt[proct[procnum]] = n; 973 } 974 } 975 nfs_rcvunlock(nmp); 976 crit_exit(); 977 978 /* 979 * If not matched to a request, drop it. 980 * If it's mine, get out. 981 */ 982 if (rep == NULL) { 983 nfsstats.rpcunexpected++; 984 m_freem(info.mrep); 985 info.mrep = NULL; 986 } else if (rep == myrep) { 987 if (rep->r_mrep == NULL) 988 panic("nfsreply nil"); 989 return (0); 990 } 991 } 992 } 993 994 /* 995 * Run the request state machine until the target state is reached 996 * or a fatal error occurs. The target state is not run. Specifying 997 * a target of NFSM_STATE_DONE runs the state machine until the rpc 998 * is complete. 999 * 1000 * EINPROGRESS is returned for all states other then the DONE state, 1001 * indicating that the rpc is still in progress. 1002 */ 1003 int 1004 nfs_request(struct nfsm_info *info, nfsm_state_t bstate, nfsm_state_t estate) 1005 { 1006 struct nfsreq *req; 1007 1008 while (info->state >= bstate && info->state < estate) { 1009 switch(info->state) { 1010 case NFSM_STATE_SETUP: 1011 /* 1012 * Setup the nfsreq. Any error which occurs during 1013 * this state is fatal. 1014 */ 1015 info->error = nfs_request_setup(info); 1016 if (info->error) { 1017 info->state = NFSM_STATE_DONE; 1018 return (info->error); 1019 } else { 1020 req = info->req; 1021 req->r_mrp = &info->mrep; 1022 req->r_mdp = &info->md; 1023 req->r_dposp = &info->dpos; 1024 info->state = NFSM_STATE_AUTH; 1025 } 1026 break; 1027 case NFSM_STATE_AUTH: 1028 /* 1029 * Authenticate the nfsreq. Any error which occurs 1030 * during this state is fatal. 1031 */ 1032 info->error = nfs_request_auth(info->req); 1033 if (info->error) { 1034 info->state = NFSM_STATE_DONE; 1035 return (info->error); 1036 } else { 1037 info->state = NFSM_STATE_TRY; 1038 } 1039 break; 1040 case NFSM_STATE_TRY: 1041 /* 1042 * Transmit or retransmit attempt. An error in this 1043 * state is ignored and we always move on to the 1044 * next state. 1045 * 1046 * This can trivially race the receiver if the 1047 * request is asynchronous. nfs_request_try() 1048 * will thus set the state for us and we 1049 * must also return immediately if we are 1050 * running an async state machine, because 1051 * info can become invalid due to races after 1052 * try() returns. 1053 */ 1054 if (info->req->r_flags & R_ASYNC) { 1055 nfs_request_try(info->req); 1056 if (estate == NFSM_STATE_WAITREPLY) 1057 return (EINPROGRESS); 1058 } else { 1059 nfs_request_try(info->req); 1060 info->state = NFSM_STATE_WAITREPLY; 1061 } 1062 break; 1063 case NFSM_STATE_WAITREPLY: 1064 /* 1065 * Wait for a reply or timeout and move on to the 1066 * next state. The error returned by this state 1067 * is passed to the processing code in the next 1068 * state. 1069 */ 1070 info->error = nfs_request_waitreply(info->req); 1071 info->state = NFSM_STATE_PROCESSREPLY; 1072 break; 1073 case NFSM_STATE_PROCESSREPLY: 1074 /* 1075 * Process the reply or timeout. Errors which occur 1076 * in this state may cause the state machine to 1077 * go back to an earlier state, and are fatal 1078 * otherwise. 1079 */ 1080 info->error = nfs_request_processreply(info, 1081 info->error); 1082 switch(info->error) { 1083 case ENEEDAUTH: 1084 info->state = NFSM_STATE_AUTH; 1085 break; 1086 case EAGAIN: 1087 info->state = NFSM_STATE_TRY; 1088 break; 1089 default: 1090 /* 1091 * Operation complete, with or without an 1092 * error. We are done. 1093 */ 1094 info->req = NULL; 1095 info->state = NFSM_STATE_DONE; 1096 return (info->error); 1097 } 1098 break; 1099 case NFSM_STATE_DONE: 1100 /* 1101 * Shouldn't be reached 1102 */ 1103 return (info->error); 1104 /* NOT REACHED */ 1105 } 1106 } 1107 1108 /* 1109 * If we are done return the error code (if any). 1110 * Otherwise return EINPROGRESS. 1111 */ 1112 if (info->state == NFSM_STATE_DONE) 1113 return (info->error); 1114 return (EINPROGRESS); 1115 } 1116 1117 /* 1118 * nfs_request - goes something like this 1119 * - fill in request struct 1120 * - links it into list 1121 * - calls nfs_send() for first transmit 1122 * - calls nfs_receive() to get reply 1123 * - break down rpc header and return with nfs reply pointed to 1124 * by mrep or error 1125 * nb: always frees up mreq mbuf list 1126 */ 1127 static int 1128 nfs_request_setup(nfsm_info_t info) 1129 { 1130 struct nfsreq *req; 1131 struct nfsmount *nmp; 1132 struct mbuf *m; 1133 int i; 1134 1135 /* 1136 * Reject requests while attempting a forced unmount. 1137 */ 1138 if (info->vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF) { 1139 m_freem(info->mreq); 1140 info->mreq = NULL; 1141 return (EIO); 1142 } 1143 nmp = VFSTONFS(info->vp->v_mount); 1144 req = kmalloc(sizeof(struct nfsreq), M_NFSREQ, M_WAITOK); 1145 req->r_nmp = nmp; 1146 req->r_vp = info->vp; 1147 req->r_td = info->td; 1148 req->r_procnum = info->procnum; 1149 req->r_mreq = NULL; 1150 req->r_cred = info->cred; 1151 1152 i = 0; 1153 m = info->mreq; 1154 while (m) { 1155 i += m->m_len; 1156 m = m->m_next; 1157 } 1158 req->r_mrest = info->mreq; 1159 req->r_mrest_len = i; 1160 1161 /* 1162 * The presence of a non-NULL r_info in req indicates 1163 * async completion via our helper threads. See the receiver 1164 * code. 1165 */ 1166 if (info->bio) { 1167 req->r_info = info; 1168 req->r_flags = R_ASYNC; 1169 } else { 1170 req->r_info = NULL; 1171 req->r_flags = 0; 1172 } 1173 info->req = req; 1174 return(0); 1175 } 1176 1177 static int 1178 nfs_request_auth(struct nfsreq *rep) 1179 { 1180 struct nfsmount *nmp = rep->r_nmp; 1181 struct mbuf *m; 1182 char nickv[RPCX_NICKVERF]; 1183 int error = 0, auth_len, auth_type; 1184 int verf_len; 1185 u_int32_t xid; 1186 char *auth_str, *verf_str; 1187 struct ucred *cred; 1188 1189 cred = rep->r_cred; 1190 rep->r_failed_auth = 0; 1191 1192 /* 1193 * Get the RPC header with authorization. 1194 */ 1195 verf_str = auth_str = NULL; 1196 if (nmp->nm_flag & NFSMNT_KERB) { 1197 verf_str = nickv; 1198 verf_len = sizeof (nickv); 1199 auth_type = RPCAUTH_KERB4; 1200 bzero((caddr_t)rep->r_key, sizeof(rep->r_key)); 1201 if (rep->r_failed_auth || 1202 nfs_getnickauth(nmp, cred, &auth_str, &auth_len, 1203 verf_str, verf_len)) { 1204 error = nfs_getauth(nmp, rep, cred, &auth_str, 1205 &auth_len, verf_str, &verf_len, rep->r_key); 1206 if (error) { 1207 m_freem(rep->r_mrest); 1208 rep->r_mrest = NULL; 1209 kfree((caddr_t)rep, M_NFSREQ); 1210 return (error); 1211 } 1212 } 1213 } else { 1214 auth_type = RPCAUTH_UNIX; 1215 if (cred->cr_ngroups < 1) 1216 panic("nfsreq nogrps"); 1217 auth_len = ((((cred->cr_ngroups - 1) > nmp->nm_numgrps) ? 1218 nmp->nm_numgrps : (cred->cr_ngroups - 1)) << 2) + 1219 5 * NFSX_UNSIGNED; 1220 } 1221 if (rep->r_mrest) 1222 nfs_checkpkt(rep->r_mrest, rep->r_mrest_len); 1223 m = nfsm_rpchead(cred, nmp->nm_flag, rep->r_procnum, auth_type, 1224 auth_len, auth_str, verf_len, verf_str, 1225 rep->r_mrest, rep->r_mrest_len, &rep->r_mheadend, &xid); 1226 rep->r_mrest = NULL; 1227 if (auth_str) 1228 kfree(auth_str, M_TEMP); 1229 1230 /* 1231 * For stream protocols, insert a Sun RPC Record Mark. 1232 */ 1233 if (nmp->nm_sotype == SOCK_STREAM) { 1234 M_PREPEND(m, NFSX_UNSIGNED, MB_WAIT); 1235 if (m == NULL) { 1236 kfree(rep, M_NFSREQ); 1237 return (ENOBUFS); 1238 } 1239 *mtod(m, u_int32_t *) = htonl(0x80000000 | 1240 (m->m_pkthdr.len - NFSX_UNSIGNED)); 1241 } 1242 1243 nfs_checkpkt(m, m->m_pkthdr.len); 1244 1245 rep->r_mreq = m; 1246 rep->r_xid = xid; 1247 return (0); 1248 } 1249 1250 static int 1251 nfs_request_try(struct nfsreq *rep) 1252 { 1253 struct nfsmount *nmp = rep->r_nmp; 1254 struct mbuf *m2; 1255 int error; 1256 1257 /* 1258 * Request is not on any queue, only the owner has access to it 1259 * so it should not be locked by anyone atm. 1260 * 1261 * Interlock to prevent races. While locked the only remote 1262 * action possible is for r_mrep to be set (once we enqueue it). 1263 */ 1264 if (rep->r_flags == 0xdeadc0de) { 1265 print_backtrace(-1); 1266 panic("flags nbad\n"); 1267 } 1268 KKASSERT((rep->r_flags & (R_LOCKED | R_ONREQQ)) == 0); 1269 if (nmp->nm_flag & NFSMNT_SOFT) 1270 rep->r_retry = nmp->nm_retry; 1271 else 1272 rep->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */ 1273 rep->r_rtt = rep->r_rexmit = 0; 1274 if (proct[rep->r_procnum] > 0) 1275 rep->r_flags |= R_TIMING | R_LOCKED; 1276 else 1277 rep->r_flags |= R_LOCKED; 1278 rep->r_mrep = NULL; 1279 1280 nfsstats.rpcrequests++; 1281 1282 if (nmp->nm_flag & NFSMNT_FORCE) { 1283 rep->r_flags |= R_SOFTTERM; 1284 rep->r_flags &= ~R_LOCKED; 1285 return (0); 1286 } 1287 rep->r_flags |= R_NEEDSXMIT; /* in case send lock races us */ 1288 1289 /* 1290 * Do the client side RPC. 1291 * 1292 * Chain request into list of outstanding requests. Be sure 1293 * to put it LAST so timer finds oldest requests first. Note 1294 * that our control of R_LOCKED prevents the request from 1295 * getting ripped out from under us or transmitted by the 1296 * timer code. 1297 * 1298 * For requests with info structures we must atomically set the 1299 * info's state because the structure could become invalid upon 1300 * return due to races (i.e., if async) 1301 */ 1302 crit_enter(); 1303 mtx_link_init(&rep->r_link); 1304 KKASSERT((rep->r_flags & R_ONREQQ) == 0); 1305 TAILQ_INSERT_TAIL(&nmp->nm_reqq, rep, r_chain); 1306 rep->r_flags |= R_ONREQQ; 1307 ++nmp->nm_reqqlen; 1308 if (rep->r_flags & R_ASYNC) 1309 rep->r_info->state = NFSM_STATE_WAITREPLY; 1310 crit_exit(); 1311 1312 error = 0; 1313 1314 /* 1315 * Send if we can. Congestion control is not handled here any more 1316 * becausing trying to defer the initial send based on the nfs_timer 1317 * requires having a very fast nfs_timer, which is silly. 1318 */ 1319 if (nmp->nm_so) { 1320 if (nmp->nm_soflags & PR_CONNREQUIRED) 1321 error = nfs_sndlock(nmp, rep); 1322 if (error == 0 && (rep->r_flags & R_NEEDSXMIT)) { 1323 m2 = m_copym(rep->r_mreq, 0, M_COPYALL, MB_WAIT); 1324 error = nfs_send(nmp->nm_so, nmp->nm_nam, m2, rep); 1325 rep->r_flags &= ~R_NEEDSXMIT; 1326 if ((rep->r_flags & R_SENT) == 0) { 1327 rep->r_flags |= R_SENT; 1328 } 1329 if (nmp->nm_soflags & PR_CONNREQUIRED) 1330 nfs_sndunlock(nmp); 1331 } 1332 } else { 1333 rep->r_rtt = -1; 1334 } 1335 if (error == EPIPE) 1336 error = 0; 1337 1338 /* 1339 * Release the lock. The only remote action that may have occurred 1340 * would have been the setting of rep->r_mrep. If this occured 1341 * and the request was async we have to move it to the reader 1342 * thread's queue for action. 1343 * 1344 * For async requests also make sure the reader is woken up so 1345 * it gets on the socket to read responses. 1346 */ 1347 crit_enter(); 1348 if (rep->r_flags & R_ASYNC) { 1349 if (rep->r_mrep) 1350 nfs_hardterm(rep, 1); 1351 rep->r_flags &= ~R_LOCKED; 1352 nfssvc_iod_reader_wakeup(nmp); 1353 } else { 1354 rep->r_flags &= ~R_LOCKED; 1355 } 1356 if (rep->r_flags & R_WANTED) { 1357 rep->r_flags &= ~R_WANTED; 1358 wakeup(rep); 1359 } 1360 crit_exit(); 1361 return (error); 1362 } 1363 1364 /* 1365 * This code is only called for synchronous requests. Completed synchronous 1366 * requests are left on reqq and we remove them before moving on to the 1367 * processing state. 1368 */ 1369 static int 1370 nfs_request_waitreply(struct nfsreq *rep) 1371 { 1372 struct nfsmount *nmp = rep->r_nmp; 1373 int error; 1374 1375 KKASSERT((rep->r_flags & R_ASYNC) == 0); 1376 1377 /* 1378 * Wait until the request is finished. 1379 */ 1380 error = nfs_reply(nmp, rep); 1381 1382 /* 1383 * RPC done, unlink the request, but don't rip it out from under 1384 * the callout timer. 1385 * 1386 * Once unlinked no other receiver or the timer will have 1387 * visibility, so we do not have to set R_LOCKED. 1388 */ 1389 crit_enter(); 1390 while (rep->r_flags & R_LOCKED) { 1391 rep->r_flags |= R_WANTED; 1392 tsleep(rep, 0, "nfstrac", 0); 1393 } 1394 KKASSERT(rep->r_flags & R_ONREQQ); 1395 TAILQ_REMOVE(&nmp->nm_reqq, rep, r_chain); 1396 rep->r_flags &= ~R_ONREQQ; 1397 --nmp->nm_reqqlen; 1398 if (TAILQ_FIRST(&nmp->nm_bioq) && 1399 nmp->nm_reqqlen <= nfs_maxasyncbio * 2 / 3) { 1400 nfssvc_iod_writer_wakeup(nmp); 1401 } 1402 crit_exit(); 1403 1404 /* 1405 * Decrement the outstanding request count. 1406 */ 1407 if (rep->r_flags & R_SENT) { 1408 rep->r_flags &= ~R_SENT; 1409 } 1410 return (error); 1411 } 1412 1413 /* 1414 * Process reply with error returned from nfs_requet_waitreply(). 1415 * 1416 * Returns EAGAIN if it wants us to loop up to nfs_request_try() again. 1417 * Returns ENEEDAUTH if it wants us to loop up to nfs_request_auth() again. 1418 */ 1419 static int 1420 nfs_request_processreply(nfsm_info_t info, int error) 1421 { 1422 struct nfsreq *req = info->req; 1423 struct nfsmount *nmp = req->r_nmp; 1424 u_int32_t *tl; 1425 int verf_type; 1426 int i; 1427 1428 /* 1429 * If there was a successful reply and a tprintf msg. 1430 * tprintf a response. 1431 */ 1432 if (error == 0 && (req->r_flags & R_TPRINTFMSG)) { 1433 nfs_msg(req->r_td, nmp->nm_mountp->mnt_stat.f_mntfromname, 1434 "is alive again"); 1435 } 1436 info->mrep = req->r_mrep; 1437 info->md = req->r_md; 1438 info->dpos = req->r_dpos; 1439 if (error) { 1440 m_freem(req->r_mreq); 1441 req->r_mreq = NULL; 1442 kfree(req, M_NFSREQ); 1443 info->req = NULL; 1444 return (error); 1445 } 1446 1447 /* 1448 * break down the rpc header and check if ok 1449 */ 1450 NULLOUT(tl = nfsm_dissect(info, 3 * NFSX_UNSIGNED)); 1451 if (*tl++ == rpc_msgdenied) { 1452 if (*tl == rpc_mismatch) { 1453 error = EOPNOTSUPP; 1454 } else if ((nmp->nm_flag & NFSMNT_KERB) && 1455 *tl++ == rpc_autherr) { 1456 if (req->r_failed_auth == 0) { 1457 req->r_failed_auth++; 1458 req->r_mheadend->m_next = NULL; 1459 m_freem(info->mrep); 1460 info->mrep = NULL; 1461 m_freem(req->r_mreq); 1462 req->r_mreq = NULL; 1463 return (ENEEDAUTH); 1464 } else { 1465 error = EAUTH; 1466 } 1467 } else { 1468 error = EACCES; 1469 } 1470 m_freem(info->mrep); 1471 info->mrep = NULL; 1472 m_freem(req->r_mreq); 1473 req->r_mreq = NULL; 1474 kfree(req, M_NFSREQ); 1475 info->req = NULL; 1476 return (error); 1477 } 1478 1479 /* 1480 * Grab any Kerberos verifier, otherwise just throw it away. 1481 */ 1482 verf_type = fxdr_unsigned(int, *tl++); 1483 i = fxdr_unsigned(int32_t, *tl); 1484 if ((nmp->nm_flag & NFSMNT_KERB) && verf_type == RPCAUTH_KERB4) { 1485 error = nfs_savenickauth(nmp, req->r_cred, i, req->r_key, 1486 &info->md, &info->dpos, info->mrep); 1487 if (error) 1488 goto nfsmout; 1489 } else if (i > 0) { 1490 ERROROUT(nfsm_adv(info, nfsm_rndup(i))); 1491 } 1492 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED)); 1493 /* 0 == ok */ 1494 if (*tl == 0) { 1495 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED)); 1496 if (*tl != 0) { 1497 error = fxdr_unsigned(int, *tl); 1498 1499 /* 1500 * Does anyone even implement this? Just impose 1501 * a 1-second delay. 1502 */ 1503 if ((nmp->nm_flag & NFSMNT_NFSV3) && 1504 error == NFSERR_TRYLATER) { 1505 m_freem(info->mrep); 1506 info->mrep = NULL; 1507 error = 0; 1508 1509 tsleep((caddr_t)&lbolt, 0, "nqnfstry", 0); 1510 return (EAGAIN); /* goto tryagain */ 1511 } 1512 1513 /* 1514 * If the File Handle was stale, invalidate the 1515 * lookup cache, just in case. 1516 * 1517 * To avoid namecache<->vnode deadlocks we must 1518 * release the vnode lock if we hold it. 1519 */ 1520 if (error == ESTALE) { 1521 struct vnode *vp = req->r_vp; 1522 int ltype; 1523 1524 ltype = lockstatus(&vp->v_lock, curthread); 1525 if (ltype == LK_EXCLUSIVE || ltype == LK_SHARED) 1526 lockmgr(&vp->v_lock, LK_RELEASE); 1527 cache_inval_vp(vp, CINV_CHILDREN); 1528 if (ltype == LK_EXCLUSIVE || ltype == LK_SHARED) 1529 lockmgr(&vp->v_lock, ltype); 1530 } 1531 if (nmp->nm_flag & NFSMNT_NFSV3) { 1532 KKASSERT(*req->r_mrp == info->mrep); 1533 KKASSERT(*req->r_mdp == info->md); 1534 KKASSERT(*req->r_dposp == info->dpos); 1535 error |= NFSERR_RETERR; 1536 } else { 1537 m_freem(info->mrep); 1538 info->mrep = NULL; 1539 } 1540 m_freem(req->r_mreq); 1541 req->r_mreq = NULL; 1542 kfree(req, M_NFSREQ); 1543 info->req = NULL; 1544 return (error); 1545 } 1546 1547 KKASSERT(*req->r_mrp == info->mrep); 1548 KKASSERT(*req->r_mdp == info->md); 1549 KKASSERT(*req->r_dposp == info->dpos); 1550 m_freem(req->r_mreq); 1551 req->r_mreq = NULL; 1552 kfree(req, M_NFSREQ); 1553 return (0); 1554 } 1555 m_freem(info->mrep); 1556 info->mrep = NULL; 1557 error = EPROTONOSUPPORT; 1558 nfsmout: 1559 m_freem(req->r_mreq); 1560 req->r_mreq = NULL; 1561 kfree(req, M_NFSREQ); 1562 info->req = NULL; 1563 return (error); 1564 } 1565 1566 #ifndef NFS_NOSERVER 1567 /* 1568 * Generate the rpc reply header 1569 * siz arg. is used to decide if adding a cluster is worthwhile 1570 */ 1571 int 1572 nfs_rephead(int siz, struct nfsrv_descript *nd, struct nfssvc_sock *slp, 1573 int err, struct mbuf **mrq, struct mbuf **mbp, caddr_t *bposp) 1574 { 1575 u_int32_t *tl; 1576 struct nfsm_info info; 1577 1578 siz += RPC_REPLYSIZ; 1579 info.mb = m_getl(max_hdr + siz, MB_WAIT, MT_DATA, M_PKTHDR, NULL); 1580 info.mreq = info.mb; 1581 info.mreq->m_pkthdr.len = 0; 1582 /* 1583 * If this is not a cluster, try and leave leading space 1584 * for the lower level headers. 1585 */ 1586 if ((max_hdr + siz) < MINCLSIZE) 1587 info.mreq->m_data += max_hdr; 1588 tl = mtod(info.mreq, u_int32_t *); 1589 info.mreq->m_len = 6 * NFSX_UNSIGNED; 1590 info.bpos = ((caddr_t)tl) + info.mreq->m_len; 1591 *tl++ = txdr_unsigned(nd->nd_retxid); 1592 *tl++ = rpc_reply; 1593 if (err == ERPCMISMATCH || (err & NFSERR_AUTHERR)) { 1594 *tl++ = rpc_msgdenied; 1595 if (err & NFSERR_AUTHERR) { 1596 *tl++ = rpc_autherr; 1597 *tl = txdr_unsigned(err & ~NFSERR_AUTHERR); 1598 info.mreq->m_len -= NFSX_UNSIGNED; 1599 info.bpos -= NFSX_UNSIGNED; 1600 } else { 1601 *tl++ = rpc_mismatch; 1602 *tl++ = txdr_unsigned(RPC_VER2); 1603 *tl = txdr_unsigned(RPC_VER2); 1604 } 1605 } else { 1606 *tl++ = rpc_msgaccepted; 1607 1608 /* 1609 * For Kerberos authentication, we must send the nickname 1610 * verifier back, otherwise just RPCAUTH_NULL. 1611 */ 1612 if (nd->nd_flag & ND_KERBFULL) { 1613 struct nfsuid *nuidp; 1614 struct timeval ktvin, ktvout; 1615 1616 for (nuidp = NUIDHASH(slp, nd->nd_cr.cr_uid)->lh_first; 1617 nuidp != 0; nuidp = nuidp->nu_hash.le_next) { 1618 if (nuidp->nu_cr.cr_uid == nd->nd_cr.cr_uid && 1619 (!nd->nd_nam2 || netaddr_match(NU_NETFAM(nuidp), 1620 &nuidp->nu_haddr, nd->nd_nam2))) 1621 break; 1622 } 1623 if (nuidp) { 1624 ktvin.tv_sec = 1625 txdr_unsigned(nuidp->nu_timestamp.tv_sec - 1); 1626 ktvin.tv_usec = 1627 txdr_unsigned(nuidp->nu_timestamp.tv_usec); 1628 1629 /* 1630 * Encrypt the timestamp in ecb mode using the 1631 * session key. 1632 */ 1633 #ifdef NFSKERB 1634 XXX 1635 #else 1636 ktvout.tv_sec = 0; 1637 ktvout.tv_usec = 0; 1638 #endif 1639 1640 *tl++ = rpc_auth_kerb; 1641 *tl++ = txdr_unsigned(3 * NFSX_UNSIGNED); 1642 *tl = ktvout.tv_sec; 1643 tl = nfsm_build(&info, 3 * NFSX_UNSIGNED); 1644 *tl++ = ktvout.tv_usec; 1645 *tl++ = txdr_unsigned(nuidp->nu_cr.cr_uid); 1646 } else { 1647 *tl++ = 0; 1648 *tl++ = 0; 1649 } 1650 } else { 1651 *tl++ = 0; 1652 *tl++ = 0; 1653 } 1654 switch (err) { 1655 case EPROGUNAVAIL: 1656 *tl = txdr_unsigned(RPC_PROGUNAVAIL); 1657 break; 1658 case EPROGMISMATCH: 1659 *tl = txdr_unsigned(RPC_PROGMISMATCH); 1660 tl = nfsm_build(&info, 2 * NFSX_UNSIGNED); 1661 *tl++ = txdr_unsigned(2); 1662 *tl = txdr_unsigned(3); 1663 break; 1664 case EPROCUNAVAIL: 1665 *tl = txdr_unsigned(RPC_PROCUNAVAIL); 1666 break; 1667 case EBADRPC: 1668 *tl = txdr_unsigned(RPC_GARBAGE); 1669 break; 1670 default: 1671 *tl = 0; 1672 if (err != NFSERR_RETVOID) { 1673 tl = nfsm_build(&info, NFSX_UNSIGNED); 1674 if (err) 1675 *tl = txdr_unsigned(nfsrv_errmap(nd, err)); 1676 else 1677 *tl = 0; 1678 } 1679 break; 1680 }; 1681 } 1682 1683 if (mrq != NULL) 1684 *mrq = info.mreq; 1685 *mbp = info.mb; 1686 *bposp = info.bpos; 1687 if (err != 0 && err != NFSERR_RETVOID) 1688 nfsstats.srvrpc_errs++; 1689 return (0); 1690 } 1691 1692 1693 #endif /* NFS_NOSERVER */ 1694 1695 /* 1696 * Nfs timer routine. 1697 * 1698 * Scan the nfsreq list and retranmit any requests that have timed out 1699 * To avoid retransmission attempts on STREAM sockets (in the future) make 1700 * sure to set the r_retry field to 0 (implies nm_retry == 0). 1701 * 1702 * Requests with attached responses, terminated requests, and 1703 * locked requests are ignored. Locked requests will be picked up 1704 * in a later timer call. 1705 */ 1706 void 1707 nfs_timer_callout(void *arg /* never used */) 1708 { 1709 struct nfsmount *nmp; 1710 struct nfsreq *req; 1711 #ifndef NFS_NOSERVER 1712 struct nfssvc_sock *slp; 1713 u_quad_t cur_usec; 1714 #endif /* NFS_NOSERVER */ 1715 1716 lwkt_gettoken(&nfs_token); 1717 TAILQ_FOREACH(nmp, &nfs_mountq, nm_entry) { 1718 lwkt_gettoken(&nmp->nm_token); 1719 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) { 1720 KKASSERT(nmp == req->r_nmp); 1721 if (req->r_mrep) 1722 continue; 1723 if (req->r_flags & (R_SOFTTERM | R_LOCKED)) 1724 continue; 1725 1726 /* 1727 * Handle timeout/retry. Be sure to process r_mrep 1728 * for async requests that completed while we had 1729 * the request locked or they will hang in the reqq 1730 * forever. 1731 */ 1732 req->r_flags |= R_LOCKED; 1733 if (nfs_sigintr(nmp, req, req->r_td)) { 1734 nfs_softterm(req, 1); 1735 req->r_flags &= ~R_LOCKED; 1736 } else { 1737 nfs_timer_req(req); 1738 if (req->r_flags & R_ASYNC) { 1739 if (req->r_mrep) 1740 nfs_hardterm(req, 1); 1741 req->r_flags &= ~R_LOCKED; 1742 nfssvc_iod_reader_wakeup(nmp); 1743 } else { 1744 req->r_flags &= ~R_LOCKED; 1745 } 1746 } 1747 if (req->r_flags & R_WANTED) { 1748 req->r_flags &= ~R_WANTED; 1749 wakeup(req); 1750 } 1751 } 1752 lwkt_reltoken(&nmp->nm_token); 1753 } 1754 #ifndef NFS_NOSERVER 1755 1756 /* 1757 * Scan the write gathering queues for writes that need to be 1758 * completed now. 1759 */ 1760 cur_usec = nfs_curusec(); 1761 1762 TAILQ_FOREACH(slp, &nfssvc_sockhead, ns_chain) { 1763 /* XXX race against removal */ 1764 if (lwkt_trytoken(&slp->ns_token)) { 1765 if (slp->ns_tq.lh_first && 1766 (slp->ns_tq.lh_first->nd_time <= cur_usec)) { 1767 nfsrv_wakenfsd(slp, 1); 1768 } 1769 lwkt_reltoken(&slp->ns_token); 1770 } 1771 } 1772 #endif /* NFS_NOSERVER */ 1773 1774 callout_reset(&nfs_timer_handle, nfs_ticks, nfs_timer_callout, NULL); 1775 lwkt_reltoken(&nfs_token); 1776 } 1777 1778 static 1779 void 1780 nfs_timer_req(struct nfsreq *req) 1781 { 1782 struct thread *td = &thread0; /* XXX for creds, will break if sleep */ 1783 struct nfsmount *nmp = req->r_nmp; 1784 struct mbuf *m; 1785 struct socket *so; 1786 int timeo; 1787 int error; 1788 1789 /* 1790 * rtt ticks and timeout calculation. Return if the timeout 1791 * has not been reached yet, unless the packet is flagged 1792 * for an immediate send. 1793 * 1794 * The mean rtt doesn't help when we get random I/Os, we have 1795 * to multiply by fairly large numbers. 1796 */ 1797 if (req->r_rtt >= 0) { 1798 /* 1799 * Calculate the timeout to test against. 1800 */ 1801 req->r_rtt++; 1802 if (nmp->nm_flag & NFSMNT_DUMBTIMR) { 1803 timeo = nmp->nm_timeo << NFS_RTT_SCALE_BITS; 1804 } else if (req->r_flags & R_TIMING) { 1805 timeo = NFS_SRTT(req) + NFS_SDRTT(req); 1806 } else { 1807 timeo = nmp->nm_timeo << NFS_RTT_SCALE_BITS; 1808 } 1809 timeo *= multt[req->r_procnum]; 1810 /* timeo is still scaled by SCALE_BITS */ 1811 1812 #define NFSFS (NFS_RTT_SCALE * NFS_HZ) 1813 if (req->r_flags & R_TIMING) { 1814 static long last_time; 1815 if (nfs_showrtt && last_time != time_second) { 1816 kprintf("rpccmd %d NFS SRTT %d SDRTT %d " 1817 "timeo %d.%03d\n", 1818 proct[req->r_procnum], 1819 NFS_SRTT(req), NFS_SDRTT(req), 1820 timeo / NFSFS, 1821 timeo % NFSFS * 1000 / NFSFS); 1822 last_time = time_second; 1823 } 1824 } 1825 #undef NFSFS 1826 1827 /* 1828 * deal with nfs_timer jitter. 1829 */ 1830 timeo = (timeo >> NFS_RTT_SCALE_BITS) + 1; 1831 if (timeo < 2) 1832 timeo = 2; 1833 1834 if (nmp->nm_timeouts > 0) 1835 timeo *= nfs_backoff[nmp->nm_timeouts - 1]; 1836 if (timeo > NFS_MAXTIMEO) 1837 timeo = NFS_MAXTIMEO; 1838 if (req->r_rtt <= timeo) { 1839 if ((req->r_flags & R_NEEDSXMIT) == 0) 1840 return; 1841 } else if (nmp->nm_timeouts < 8) { 1842 nmp->nm_timeouts++; 1843 } 1844 } 1845 1846 /* 1847 * Check for server not responding 1848 */ 1849 if ((req->r_flags & R_TPRINTFMSG) == 0 && 1850 req->r_rexmit > nmp->nm_deadthresh) { 1851 nfs_msg(req->r_td, nmp->nm_mountp->mnt_stat.f_mntfromname, 1852 "not responding"); 1853 req->r_flags |= R_TPRINTFMSG; 1854 } 1855 if (req->r_rexmit >= req->r_retry) { /* too many */ 1856 nfsstats.rpctimeouts++; 1857 nfs_softterm(req, 1); 1858 return; 1859 } 1860 1861 /* 1862 * Generally disable retransmission on reliable sockets, 1863 * unless the request is flagged for immediate send. 1864 */ 1865 if (nmp->nm_sotype != SOCK_DGRAM) { 1866 if (++req->r_rexmit > NFS_MAXREXMIT) 1867 req->r_rexmit = NFS_MAXREXMIT; 1868 if ((req->r_flags & R_NEEDSXMIT) == 0) 1869 return; 1870 } 1871 1872 /* 1873 * Stop here if we do not have a socket! 1874 */ 1875 if ((so = nmp->nm_so) == NULL) 1876 return; 1877 1878 /* 1879 * If there is enough space and the window allows.. resend it. 1880 * 1881 * r_rtt is left intact in case we get an answer after the 1882 * retry that was a reply to the original packet. 1883 * 1884 * NOTE: so_pru_send() 1885 */ 1886 if (ssb_space(&so->so_snd) >= req->r_mreq->m_pkthdr.len && 1887 (req->r_flags & (R_SENT | R_NEEDSXMIT)) && 1888 (m = m_copym(req->r_mreq, 0, M_COPYALL, MB_DONTWAIT))){ 1889 if ((nmp->nm_flag & NFSMNT_NOCONN) == 0) 1890 error = so_pru_send(so, 0, m, NULL, NULL, td); 1891 else 1892 error = so_pru_send(so, 0, m, nmp->nm_nam, NULL, td); 1893 if (error) { 1894 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) 1895 so->so_error = 0; 1896 req->r_flags |= R_NEEDSXMIT; 1897 } else if (req->r_mrep == NULL) { 1898 /* 1899 * Iff first send, start timing 1900 * else turn timing off, backoff timer 1901 * and divide congestion window by 2. 1902 * 1903 * It is possible for the so_pru_send() to 1904 * block and for us to race a reply so we 1905 * only do this if the reply field has not 1906 * been filled in. R_LOCKED will prevent 1907 * the request from being ripped out from under 1908 * us entirely. 1909 * 1910 * Record the last resent procnum to aid us 1911 * in duplicate detection on receive. 1912 */ 1913 if ((req->r_flags & R_NEEDSXMIT) == 0) { 1914 if (nfs_showrexmit) 1915 kprintf("X"); 1916 if (++req->r_rexmit > NFS_MAXREXMIT) 1917 req->r_rexmit = NFS_MAXREXMIT; 1918 nmp->nm_maxasync_scaled >>= 1; 1919 if (nmp->nm_maxasync_scaled < NFS_MINASYNC_SCALED) 1920 nmp->nm_maxasync_scaled = NFS_MINASYNC_SCALED; 1921 nfsstats.rpcretries++; 1922 nmp->nm_lastreprocnum = req->r_procnum; 1923 } else { 1924 req->r_flags |= R_SENT; 1925 req->r_flags &= ~R_NEEDSXMIT; 1926 } 1927 } 1928 } 1929 } 1930 1931 /* 1932 * Mark all of an nfs mount's outstanding requests with R_SOFTTERM and 1933 * wait for all requests to complete. This is used by forced unmounts 1934 * to terminate any outstanding RPCs. 1935 * 1936 * Locked requests cannot be canceled but will be marked for 1937 * soft-termination. 1938 */ 1939 int 1940 nfs_nmcancelreqs(struct nfsmount *nmp) 1941 { 1942 struct nfsreq *req; 1943 int i; 1944 1945 crit_enter(); 1946 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) { 1947 if (req->r_mrep != NULL || (req->r_flags & R_SOFTTERM)) 1948 continue; 1949 nfs_softterm(req, 0); 1950 } 1951 /* XXX the other two queues as well */ 1952 crit_exit(); 1953 1954 for (i = 0; i < 30; i++) { 1955 crit_enter(); 1956 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) { 1957 if (nmp == req->r_nmp) 1958 break; 1959 } 1960 crit_exit(); 1961 if (req == NULL) 1962 return (0); 1963 tsleep(&lbolt, 0, "nfscancel", 0); 1964 } 1965 return (EBUSY); 1966 } 1967 1968 /* 1969 * Soft-terminate a request, effectively marking it as failed. 1970 * 1971 * Must be called from within a critical section. 1972 */ 1973 static void 1974 nfs_softterm(struct nfsreq *rep, int islocked) 1975 { 1976 rep->r_flags |= R_SOFTTERM; 1977 nfs_hardterm(rep, islocked); 1978 } 1979 1980 /* 1981 * Hard-terminate a request, typically after getting a response. 1982 * 1983 * The state machine can still decide to re-issue it later if necessary. 1984 * 1985 * Must be called from within a critical section. 1986 */ 1987 static void 1988 nfs_hardterm(struct nfsreq *rep, int islocked) 1989 { 1990 struct nfsmount *nmp = rep->r_nmp; 1991 1992 /* 1993 * The nm_send count is decremented now to avoid deadlocks 1994 * when the process in soreceive() hasn't yet managed to send 1995 * its own request. 1996 */ 1997 if (rep->r_flags & R_SENT) { 1998 rep->r_flags &= ~R_SENT; 1999 } 2000 2001 /* 2002 * If we locked the request or nobody else has locked the request, 2003 * and the request is async, we can move it to the reader thread's 2004 * queue now and fix up the state. 2005 * 2006 * If we locked the request or nobody else has locked the request, 2007 * we can wake up anyone blocked waiting for a response on the 2008 * request. 2009 */ 2010 if (islocked || (rep->r_flags & R_LOCKED) == 0) { 2011 if ((rep->r_flags & (R_ONREQQ | R_ASYNC)) == 2012 (R_ONREQQ | R_ASYNC)) { 2013 rep->r_flags &= ~R_ONREQQ; 2014 TAILQ_REMOVE(&nmp->nm_reqq, rep, r_chain); 2015 --nmp->nm_reqqlen; 2016 TAILQ_INSERT_TAIL(&nmp->nm_reqrxq, rep, r_chain); 2017 KKASSERT(rep->r_info->state == NFSM_STATE_TRY || 2018 rep->r_info->state == NFSM_STATE_WAITREPLY); 2019 rep->r_info->state = NFSM_STATE_PROCESSREPLY; 2020 nfssvc_iod_reader_wakeup(nmp); 2021 if (TAILQ_FIRST(&nmp->nm_bioq) && 2022 nmp->nm_reqqlen <= nfs_maxasyncbio * 2 / 3) { 2023 nfssvc_iod_writer_wakeup(nmp); 2024 } 2025 } 2026 mtx_abort_ex_link(&nmp->nm_rxlock, &rep->r_link); 2027 } 2028 } 2029 2030 /* 2031 * Test for a termination condition pending on the process. 2032 * This is used for NFSMNT_INT mounts. 2033 */ 2034 int 2035 nfs_sigintr(struct nfsmount *nmp, struct nfsreq *rep, struct thread *td) 2036 { 2037 sigset_t tmpset; 2038 struct proc *p; 2039 struct lwp *lp; 2040 2041 if (rep && (rep->r_flags & R_SOFTTERM)) 2042 return (EINTR); 2043 /* Terminate all requests while attempting a forced unmount. */ 2044 if (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF) 2045 return (EINTR); 2046 if (!(nmp->nm_flag & NFSMNT_INT)) 2047 return (0); 2048 /* td might be NULL YYY */ 2049 if (td == NULL || (p = td->td_proc) == NULL) 2050 return (0); 2051 2052 lp = td->td_lwp; 2053 tmpset = lwp_sigpend(lp); 2054 SIGSETNAND(tmpset, lp->lwp_sigmask); 2055 SIGSETNAND(tmpset, p->p_sigignore); 2056 if (SIGNOTEMPTY(tmpset) && NFSINT_SIGMASK(tmpset)) 2057 return (EINTR); 2058 2059 return (0); 2060 } 2061 2062 /* 2063 * Lock a socket against others. 2064 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply 2065 * and also to avoid race conditions between the processes with nfs requests 2066 * in progress when a reconnect is necessary. 2067 */ 2068 int 2069 nfs_sndlock(struct nfsmount *nmp, struct nfsreq *rep) 2070 { 2071 mtx_t mtx = &nmp->nm_txlock; 2072 struct thread *td; 2073 int slptimeo; 2074 int slpflag; 2075 int error; 2076 2077 slpflag = 0; 2078 slptimeo = 0; 2079 td = rep ? rep->r_td : NULL; 2080 if (nmp->nm_flag & NFSMNT_INT) 2081 slpflag = PCATCH; 2082 2083 while ((error = mtx_lock_ex_try(mtx)) != 0) { 2084 if (nfs_sigintr(nmp, rep, td)) { 2085 error = EINTR; 2086 break; 2087 } 2088 error = mtx_lock_ex(mtx, "nfsndlck", slpflag, slptimeo); 2089 if (error == 0) 2090 break; 2091 if (slpflag == PCATCH) { 2092 slpflag = 0; 2093 slptimeo = 2 * hz; 2094 } 2095 } 2096 /* Always fail if our request has been cancelled. */ 2097 if (rep && (rep->r_flags & R_SOFTTERM)) { 2098 if (error == 0) 2099 mtx_unlock(mtx); 2100 error = EINTR; 2101 } 2102 return (error); 2103 } 2104 2105 /* 2106 * Unlock the stream socket for others. 2107 */ 2108 void 2109 nfs_sndunlock(struct nfsmount *nmp) 2110 { 2111 mtx_unlock(&nmp->nm_txlock); 2112 } 2113 2114 /* 2115 * Lock the receiver side of the socket. 2116 * 2117 * rep may be NULL. 2118 */ 2119 static int 2120 nfs_rcvlock(struct nfsmount *nmp, struct nfsreq *rep) 2121 { 2122 mtx_t mtx = &nmp->nm_rxlock; 2123 int slpflag; 2124 int slptimeo; 2125 int error; 2126 2127 /* 2128 * Unconditionally check for completion in case another nfsiod 2129 * get the packet while the caller was blocked, before the caller 2130 * called us. Packet reception is handled by mainline code which 2131 * is protected by the BGL at the moment. 2132 * 2133 * We do not strictly need the second check just before the 2134 * tsleep(), but it's good defensive programming. 2135 */ 2136 if (rep && rep->r_mrep != NULL) 2137 return (EALREADY); 2138 2139 if (nmp->nm_flag & NFSMNT_INT) 2140 slpflag = PCATCH; 2141 else 2142 slpflag = 0; 2143 slptimeo = 0; 2144 2145 while ((error = mtx_lock_ex_try(mtx)) != 0) { 2146 if (nfs_sigintr(nmp, rep, (rep ? rep->r_td : NULL))) { 2147 error = EINTR; 2148 break; 2149 } 2150 if (rep && rep->r_mrep != NULL) { 2151 error = EALREADY; 2152 break; 2153 } 2154 2155 /* 2156 * NOTE: can return ENOLCK, but in that case rep->r_mrep 2157 * will already be set. 2158 */ 2159 if (rep) { 2160 error = mtx_lock_ex_link(mtx, &rep->r_link, 2161 "nfsrcvlk", 2162 slpflag, slptimeo); 2163 } else { 2164 error = mtx_lock_ex(mtx, "nfsrcvlk", slpflag, slptimeo); 2165 } 2166 if (error == 0) 2167 break; 2168 2169 /* 2170 * If our reply was recieved while we were sleeping, 2171 * then just return without taking the lock to avoid a 2172 * situation where a single iod could 'capture' the 2173 * recieve lock. 2174 */ 2175 if (rep && rep->r_mrep != NULL) { 2176 error = EALREADY; 2177 break; 2178 } 2179 if (slpflag == PCATCH) { 2180 slpflag = 0; 2181 slptimeo = 2 * hz; 2182 } 2183 } 2184 if (error == 0) { 2185 if (rep && rep->r_mrep != NULL) { 2186 error = EALREADY; 2187 mtx_unlock(mtx); 2188 } 2189 } 2190 return (error); 2191 } 2192 2193 /* 2194 * Unlock the stream socket for others. 2195 */ 2196 static void 2197 nfs_rcvunlock(struct nfsmount *nmp) 2198 { 2199 mtx_unlock(&nmp->nm_rxlock); 2200 } 2201 2202 /* 2203 * nfs_realign: 2204 * 2205 * Check for badly aligned mbuf data and realign by copying the unaligned 2206 * portion of the data into a new mbuf chain and freeing the portions 2207 * of the old chain that were replaced. 2208 * 2209 * We cannot simply realign the data within the existing mbuf chain 2210 * because the underlying buffers may contain other rpc commands and 2211 * we cannot afford to overwrite them. 2212 * 2213 * We would prefer to avoid this situation entirely. The situation does 2214 * not occur with NFS/UDP and is supposed to only occassionally occur 2215 * with TCP. Use vfs.nfs.realign_count and realign_test to check this. 2216 * 2217 * NOTE! MB_DONTWAIT cannot be used here. The mbufs must be acquired 2218 * because the rpc request OR reply cannot be thrown away. TCP NFS 2219 * mounts do not retry their RPCs unless the TCP connection itself 2220 * is dropped so throwing away a RPC will basically cause the NFS 2221 * operation to lockup indefinitely. 2222 */ 2223 static void 2224 nfs_realign(struct mbuf **pm, int hsiz) 2225 { 2226 struct mbuf *m; 2227 struct mbuf *n = NULL; 2228 2229 /* 2230 * Check for misalignemnt 2231 */ 2232 ++nfs_realign_test; 2233 while ((m = *pm) != NULL) { 2234 if ((m->m_len & 0x3) || (mtod(m, intptr_t) & 0x3)) 2235 break; 2236 pm = &m->m_next; 2237 } 2238 2239 /* 2240 * If misalignment found make a completely new copy. 2241 */ 2242 if (m) { 2243 ++nfs_realign_count; 2244 n = m_dup_data(m, MB_WAIT); 2245 m_freem(*pm); 2246 *pm = n; 2247 } 2248 } 2249 2250 #ifndef NFS_NOSERVER 2251 2252 /* 2253 * Parse an RPC request 2254 * - verify it 2255 * - fill in the cred struct. 2256 */ 2257 int 2258 nfs_getreq(struct nfsrv_descript *nd, struct nfsd *nfsd, int has_header) 2259 { 2260 int len, i; 2261 u_int32_t *tl; 2262 struct uio uio; 2263 struct iovec iov; 2264 caddr_t cp; 2265 u_int32_t nfsvers, auth_type; 2266 uid_t nickuid; 2267 int error = 0, ticklen; 2268 struct nfsuid *nuidp; 2269 struct timeval tvin, tvout; 2270 struct nfsm_info info; 2271 #if 0 /* until encrypted keys are implemented */ 2272 NFSKERBKEYSCHED_T keys; /* stores key schedule */ 2273 #endif 2274 2275 info.mrep = nd->nd_mrep; 2276 info.md = nd->nd_md; 2277 info.dpos = nd->nd_dpos; 2278 2279 if (has_header) { 2280 NULLOUT(tl = nfsm_dissect(&info, 10 * NFSX_UNSIGNED)); 2281 nd->nd_retxid = fxdr_unsigned(u_int32_t, *tl++); 2282 if (*tl++ != rpc_call) { 2283 m_freem(info.mrep); 2284 return (EBADRPC); 2285 } 2286 } else { 2287 NULLOUT(tl = nfsm_dissect(&info, 8 * NFSX_UNSIGNED)); 2288 } 2289 nd->nd_repstat = 0; 2290 nd->nd_flag = 0; 2291 if (*tl++ != rpc_vers) { 2292 nd->nd_repstat = ERPCMISMATCH; 2293 nd->nd_procnum = NFSPROC_NOOP; 2294 return (0); 2295 } 2296 if (*tl != nfs_prog) { 2297 nd->nd_repstat = EPROGUNAVAIL; 2298 nd->nd_procnum = NFSPROC_NOOP; 2299 return (0); 2300 } 2301 tl++; 2302 nfsvers = fxdr_unsigned(u_int32_t, *tl++); 2303 if (nfsvers < NFS_VER2 || nfsvers > NFS_VER3) { 2304 nd->nd_repstat = EPROGMISMATCH; 2305 nd->nd_procnum = NFSPROC_NOOP; 2306 return (0); 2307 } 2308 if (nfsvers == NFS_VER3) 2309 nd->nd_flag = ND_NFSV3; 2310 nd->nd_procnum = fxdr_unsigned(u_int32_t, *tl++); 2311 if (nd->nd_procnum == NFSPROC_NULL) 2312 return (0); 2313 if (nd->nd_procnum >= NFS_NPROCS || 2314 (nd->nd_procnum >= NQNFSPROC_GETLEASE) || 2315 (!nd->nd_flag && nd->nd_procnum > NFSV2PROC_STATFS)) { 2316 nd->nd_repstat = EPROCUNAVAIL; 2317 nd->nd_procnum = NFSPROC_NOOP; 2318 return (0); 2319 } 2320 if ((nd->nd_flag & ND_NFSV3) == 0) 2321 nd->nd_procnum = nfsv3_procid[nd->nd_procnum]; 2322 auth_type = *tl++; 2323 len = fxdr_unsigned(int, *tl++); 2324 if (len < 0 || len > RPCAUTH_MAXSIZ) { 2325 m_freem(info.mrep); 2326 return (EBADRPC); 2327 } 2328 2329 nd->nd_flag &= ~ND_KERBAUTH; 2330 /* 2331 * Handle auth_unix or auth_kerb. 2332 */ 2333 if (auth_type == rpc_auth_unix) { 2334 len = fxdr_unsigned(int, *++tl); 2335 if (len < 0 || len > NFS_MAXNAMLEN) { 2336 m_freem(info.mrep); 2337 return (EBADRPC); 2338 } 2339 ERROROUT(nfsm_adv(&info, nfsm_rndup(len))); 2340 NULLOUT(tl = nfsm_dissect(&info, 3 * NFSX_UNSIGNED)); 2341 bzero((caddr_t)&nd->nd_cr, sizeof (struct ucred)); 2342 nd->nd_cr.cr_ref = 1; 2343 nd->nd_cr.cr_uid = fxdr_unsigned(uid_t, *tl++); 2344 nd->nd_cr.cr_ruid = nd->nd_cr.cr_svuid = nd->nd_cr.cr_uid; 2345 nd->nd_cr.cr_gid = fxdr_unsigned(gid_t, *tl++); 2346 nd->nd_cr.cr_rgid = nd->nd_cr.cr_svgid = nd->nd_cr.cr_gid; 2347 len = fxdr_unsigned(int, *tl); 2348 if (len < 0 || len > RPCAUTH_UNIXGIDS) { 2349 m_freem(info.mrep); 2350 return (EBADRPC); 2351 } 2352 NULLOUT(tl = nfsm_dissect(&info, (len + 2) * NFSX_UNSIGNED)); 2353 for (i = 1; i <= len; i++) 2354 if (i < NGROUPS) 2355 nd->nd_cr.cr_groups[i] = fxdr_unsigned(gid_t, *tl++); 2356 else 2357 tl++; 2358 nd->nd_cr.cr_ngroups = (len >= NGROUPS) ? NGROUPS : (len + 1); 2359 if (nd->nd_cr.cr_ngroups > 1) 2360 nfsrvw_sort(nd->nd_cr.cr_groups, nd->nd_cr.cr_ngroups); 2361 len = fxdr_unsigned(int, *++tl); 2362 if (len < 0 || len > RPCAUTH_MAXSIZ) { 2363 m_freem(info.mrep); 2364 return (EBADRPC); 2365 } 2366 if (len > 0) { 2367 ERROROUT(nfsm_adv(&info, nfsm_rndup(len))); 2368 } 2369 } else if (auth_type == rpc_auth_kerb) { 2370 switch (fxdr_unsigned(int, *tl++)) { 2371 case RPCAKN_FULLNAME: 2372 ticklen = fxdr_unsigned(int, *tl); 2373 *((u_int32_t *)nfsd->nfsd_authstr) = *tl; 2374 uio.uio_resid = nfsm_rndup(ticklen) + NFSX_UNSIGNED; 2375 nfsd->nfsd_authlen = uio.uio_resid + NFSX_UNSIGNED; 2376 if (uio.uio_resid > (len - 2 * NFSX_UNSIGNED)) { 2377 m_freem(info.mrep); 2378 return (EBADRPC); 2379 } 2380 uio.uio_offset = 0; 2381 uio.uio_iov = &iov; 2382 uio.uio_iovcnt = 1; 2383 uio.uio_segflg = UIO_SYSSPACE; 2384 iov.iov_base = (caddr_t)&nfsd->nfsd_authstr[4]; 2385 iov.iov_len = RPCAUTH_MAXSIZ - 4; 2386 ERROROUT(nfsm_mtouio(&info, &uio, uio.uio_resid)); 2387 NULLOUT(tl = nfsm_dissect(&info, 2 * NFSX_UNSIGNED)); 2388 if (*tl++ != rpc_auth_kerb || 2389 fxdr_unsigned(int, *tl) != 4 * NFSX_UNSIGNED) { 2390 kprintf("Bad kerb verifier\n"); 2391 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF); 2392 nd->nd_procnum = NFSPROC_NOOP; 2393 return (0); 2394 } 2395 NULLOUT(cp = nfsm_dissect(&info, 4 * NFSX_UNSIGNED)); 2396 tl = (u_int32_t *)cp; 2397 if (fxdr_unsigned(int, *tl) != RPCAKN_FULLNAME) { 2398 kprintf("Not fullname kerb verifier\n"); 2399 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF); 2400 nd->nd_procnum = NFSPROC_NOOP; 2401 return (0); 2402 } 2403 cp += NFSX_UNSIGNED; 2404 bcopy(cp, nfsd->nfsd_verfstr, 3 * NFSX_UNSIGNED); 2405 nfsd->nfsd_verflen = 3 * NFSX_UNSIGNED; 2406 nd->nd_flag |= ND_KERBFULL; 2407 nfsd->nfsd_flag |= NFSD_NEEDAUTH; 2408 break; 2409 case RPCAKN_NICKNAME: 2410 if (len != 2 * NFSX_UNSIGNED) { 2411 kprintf("Kerb nickname short\n"); 2412 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADCRED); 2413 nd->nd_procnum = NFSPROC_NOOP; 2414 return (0); 2415 } 2416 nickuid = fxdr_unsigned(uid_t, *tl); 2417 NULLOUT(tl = nfsm_dissect(&info, 2 * NFSX_UNSIGNED)); 2418 if (*tl++ != rpc_auth_kerb || 2419 fxdr_unsigned(int, *tl) != 3 * NFSX_UNSIGNED) { 2420 kprintf("Kerb nick verifier bad\n"); 2421 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF); 2422 nd->nd_procnum = NFSPROC_NOOP; 2423 return (0); 2424 } 2425 NULLOUT(tl = nfsm_dissect(&info, 3 * NFSX_UNSIGNED)); 2426 tvin.tv_sec = *tl++; 2427 tvin.tv_usec = *tl; 2428 2429 for (nuidp = NUIDHASH(nfsd->nfsd_slp,nickuid)->lh_first; 2430 nuidp != 0; nuidp = nuidp->nu_hash.le_next) { 2431 if (nuidp->nu_cr.cr_uid == nickuid && 2432 (!nd->nd_nam2 || 2433 netaddr_match(NU_NETFAM(nuidp), 2434 &nuidp->nu_haddr, nd->nd_nam2))) 2435 break; 2436 } 2437 if (!nuidp) { 2438 nd->nd_repstat = 2439 (NFSERR_AUTHERR|AUTH_REJECTCRED); 2440 nd->nd_procnum = NFSPROC_NOOP; 2441 return (0); 2442 } 2443 2444 /* 2445 * Now, decrypt the timestamp using the session key 2446 * and validate it. 2447 */ 2448 #ifdef NFSKERB 2449 XXX 2450 #else 2451 tvout.tv_sec = 0; 2452 tvout.tv_usec = 0; 2453 #endif 2454 2455 tvout.tv_sec = fxdr_unsigned(long, tvout.tv_sec); 2456 tvout.tv_usec = fxdr_unsigned(long, tvout.tv_usec); 2457 if (nuidp->nu_expire < time_second || 2458 nuidp->nu_timestamp.tv_sec > tvout.tv_sec || 2459 (nuidp->nu_timestamp.tv_sec == tvout.tv_sec && 2460 nuidp->nu_timestamp.tv_usec > tvout.tv_usec)) { 2461 nuidp->nu_expire = 0; 2462 nd->nd_repstat = 2463 (NFSERR_AUTHERR|AUTH_REJECTVERF); 2464 nd->nd_procnum = NFSPROC_NOOP; 2465 return (0); 2466 } 2467 nfsrv_setcred(&nuidp->nu_cr, &nd->nd_cr); 2468 nd->nd_flag |= ND_KERBNICK; 2469 }; 2470 } else { 2471 nd->nd_repstat = (NFSERR_AUTHERR | AUTH_REJECTCRED); 2472 nd->nd_procnum = NFSPROC_NOOP; 2473 return (0); 2474 } 2475 2476 nd->nd_md = info.md; 2477 nd->nd_dpos = info.dpos; 2478 return (0); 2479 nfsmout: 2480 return (error); 2481 } 2482 2483 #endif 2484 2485 /* 2486 * Send a message to the originating process's terminal. The thread and/or 2487 * process may be NULL. YYY the thread should not be NULL but there may 2488 * still be some uio_td's that are still being passed as NULL through to 2489 * nfsm_request(). 2490 */ 2491 static int 2492 nfs_msg(struct thread *td, char *server, char *msg) 2493 { 2494 tpr_t tpr; 2495 2496 if (td && td->td_proc) 2497 tpr = tprintf_open(td->td_proc); 2498 else 2499 tpr = NULL; 2500 tprintf(tpr, "nfs server %s: %s\n", server, msg); 2501 tprintf_close(tpr); 2502 return (0); 2503 } 2504 2505 #ifndef NFS_NOSERVER 2506 2507 /* 2508 * Socket upcall routine for nfsd sockets. This runs in the protocol 2509 * thread and passes waitflag == MB_DONTWAIT. 2510 */ 2511 void 2512 nfsrv_rcv_upcall(struct socket *so, void *arg, int waitflag) 2513 { 2514 struct nfssvc_sock *slp = (struct nfssvc_sock *)arg; 2515 2516 if (slp->ns_needq_upcall == 0) { 2517 slp->ns_needq_upcall = 1; /* ok to race */ 2518 lwkt_gettoken(&nfs_token); 2519 nfsrv_wakenfsd(slp, 1); 2520 lwkt_reltoken(&nfs_token); 2521 } 2522 #if 0 2523 lwkt_gettoken(&slp->ns_token); 2524 slp->ns_flag |= SLP_NEEDQ; 2525 nfsrv_rcv(so, arg, waitflag); 2526 lwkt_reltoken(&slp->ns_token); 2527 #endif 2528 } 2529 2530 /* 2531 * Process new data on a receive socket. Essentially do as much as we can 2532 * non-blocking, else punt and it will be called with MB_WAIT from an nfsd. 2533 * 2534 * slp->ns_token is held on call 2535 */ 2536 void 2537 nfsrv_rcv(struct socket *so, void *arg, int waitflag) 2538 { 2539 struct nfssvc_sock *slp = (struct nfssvc_sock *)arg; 2540 struct mbuf *m; 2541 struct sockaddr *nam; 2542 struct sockbuf sio; 2543 int flags, error; 2544 int nparallel_wakeup = 0; 2545 2546 ASSERT_LWKT_TOKEN_HELD(&slp->ns_token); 2547 2548 if ((slp->ns_flag & SLP_VALID) == 0) 2549 return; 2550 2551 /* 2552 * Do not allow an infinite number of completed RPC records to build 2553 * up before we stop reading data from the socket. Otherwise we could 2554 * end up holding onto an unreasonable number of mbufs for requests 2555 * waiting for service. 2556 * 2557 * This should give pretty good feedback to the TCP layer and 2558 * prevents a memory crunch for other protocols. 2559 * 2560 * Note that the same service socket can be dispatched to several 2561 * nfs servers simultaniously. The tcp protocol callback calls us 2562 * with MB_DONTWAIT. nfsd calls us with MB_WAIT (typically). 2563 */ 2564 if (NFSRV_RECLIMIT(slp)) 2565 return; 2566 2567 /* 2568 * Handle protocol specifics to parse an RPC request. We always 2569 * pull from the socket using non-blocking I/O. 2570 */ 2571 if (so->so_type == SOCK_STREAM) { 2572 /* 2573 * The data has to be read in an orderly fashion from a TCP 2574 * stream, unlike a UDP socket. It is possible for soreceive 2575 * and/or nfsrv_getstream() to block, so make sure only one 2576 * entity is messing around with the TCP stream at any given 2577 * moment. The receive sockbuf's lock in soreceive is not 2578 * sufficient. 2579 */ 2580 if (slp->ns_flag & SLP_GETSTREAM) 2581 return; 2582 slp->ns_flag |= SLP_GETSTREAM; 2583 2584 /* 2585 * Do soreceive(). Pull out as much data as possible without 2586 * blocking. 2587 */ 2588 sbinit(&sio, 1000000000); 2589 flags = MSG_DONTWAIT; 2590 error = so_pru_soreceive(so, &nam, NULL, &sio, NULL, &flags); 2591 if (error || sio.sb_mb == NULL) { 2592 if (error != EWOULDBLOCK) 2593 slp->ns_flag |= SLP_DISCONN; 2594 slp->ns_flag &= ~(SLP_GETSTREAM | SLP_NEEDQ); 2595 goto done; 2596 } 2597 m = sio.sb_mb; 2598 if (slp->ns_rawend) { 2599 slp->ns_rawend->m_next = m; 2600 slp->ns_cc += sio.sb_cc; 2601 } else { 2602 slp->ns_raw = m; 2603 slp->ns_cc = sio.sb_cc; 2604 } 2605 while (m->m_next) 2606 m = m->m_next; 2607 slp->ns_rawend = m; 2608 2609 /* 2610 * Now try and parse as many record(s) as we can out of the 2611 * raw stream data. This will set SLP_DOREC. 2612 */ 2613 error = nfsrv_getstream(slp, waitflag, &nparallel_wakeup); 2614 if (error && error != EWOULDBLOCK) 2615 slp->ns_flag |= SLP_DISCONN; 2616 slp->ns_flag &= ~SLP_GETSTREAM; 2617 } else { 2618 /* 2619 * For UDP soreceive typically pulls just one packet, loop 2620 * to get the whole batch. 2621 */ 2622 do { 2623 sbinit(&sio, 1000000000); 2624 flags = MSG_DONTWAIT; 2625 error = so_pru_soreceive(so, &nam, NULL, &sio, 2626 NULL, &flags); 2627 if (sio.sb_mb) { 2628 struct nfsrv_rec *rec; 2629 int mf = (waitflag & MB_DONTWAIT) ? 2630 M_NOWAIT : M_WAITOK; 2631 rec = kmalloc(sizeof(struct nfsrv_rec), 2632 M_NFSRVDESC, mf); 2633 if (!rec) { 2634 if (nam) 2635 kfree(nam, M_SONAME); 2636 m_freem(sio.sb_mb); 2637 continue; 2638 } 2639 nfs_realign(&sio.sb_mb, 10 * NFSX_UNSIGNED); 2640 rec->nr_address = nam; 2641 rec->nr_packet = sio.sb_mb; 2642 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link); 2643 ++slp->ns_numrec; 2644 slp->ns_flag |= SLP_DOREC; 2645 ++nparallel_wakeup; 2646 } else { 2647 slp->ns_flag &= ~SLP_NEEDQ; 2648 } 2649 if (error) { 2650 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) 2651 && error != EWOULDBLOCK) { 2652 slp->ns_flag |= SLP_DISCONN; 2653 break; 2654 } 2655 } 2656 if (NFSRV_RECLIMIT(slp)) 2657 break; 2658 } while (sio.sb_mb); 2659 } 2660 2661 /* 2662 * If we were upcalled from the tcp protocol layer and we have 2663 * fully parsed records ready to go, or there is new data pending, 2664 * or something went wrong, try to wake up a nfsd thread to deal 2665 * with it. 2666 */ 2667 done: 2668 /* XXX this code is currently not executed (nfsrv_rcv_upcall) */ 2669 if (waitflag == MB_DONTWAIT && (slp->ns_flag & SLP_ACTION_MASK)) { 2670 lwkt_gettoken(&nfs_token); 2671 nfsrv_wakenfsd(slp, nparallel_wakeup); 2672 lwkt_reltoken(&nfs_token); 2673 } 2674 } 2675 2676 /* 2677 * Try and extract an RPC request from the mbuf data list received on a 2678 * stream socket. The "waitflag" argument indicates whether or not it 2679 * can sleep. 2680 */ 2681 static int 2682 nfsrv_getstream(struct nfssvc_sock *slp, int waitflag, int *countp) 2683 { 2684 struct mbuf *m, **mpp; 2685 char *cp1, *cp2; 2686 int len; 2687 struct mbuf *om, *m2, *recm; 2688 u_int32_t recmark; 2689 2690 for (;;) { 2691 if (slp->ns_reclen == 0) { 2692 if (slp->ns_cc < NFSX_UNSIGNED) 2693 return (0); 2694 m = slp->ns_raw; 2695 if (m->m_len >= NFSX_UNSIGNED) { 2696 bcopy(mtod(m, caddr_t), (caddr_t)&recmark, NFSX_UNSIGNED); 2697 m->m_data += NFSX_UNSIGNED; 2698 m->m_len -= NFSX_UNSIGNED; 2699 } else { 2700 cp1 = (caddr_t)&recmark; 2701 cp2 = mtod(m, caddr_t); 2702 while (cp1 < ((caddr_t)&recmark) + NFSX_UNSIGNED) { 2703 while (m->m_len == 0) { 2704 m = m->m_next; 2705 cp2 = mtod(m, caddr_t); 2706 } 2707 *cp1++ = *cp2++; 2708 m->m_data++; 2709 m->m_len--; 2710 } 2711 } 2712 slp->ns_cc -= NFSX_UNSIGNED; 2713 recmark = ntohl(recmark); 2714 slp->ns_reclen = recmark & ~0x80000000; 2715 if (recmark & 0x80000000) 2716 slp->ns_flag |= SLP_LASTFRAG; 2717 else 2718 slp->ns_flag &= ~SLP_LASTFRAG; 2719 if (slp->ns_reclen > NFS_MAXPACKET || slp->ns_reclen <= 0) { 2720 log(LOG_ERR, "%s (%d) from nfs client\n", 2721 "impossible packet length", 2722 slp->ns_reclen); 2723 return (EPERM); 2724 } 2725 } 2726 2727 /* 2728 * Now get the record part. 2729 * 2730 * Note that slp->ns_reclen may be 0. Linux sometimes 2731 * generates 0-length RPCs 2732 */ 2733 recm = NULL; 2734 if (slp->ns_cc == slp->ns_reclen) { 2735 recm = slp->ns_raw; 2736 slp->ns_raw = slp->ns_rawend = NULL; 2737 slp->ns_cc = slp->ns_reclen = 0; 2738 } else if (slp->ns_cc > slp->ns_reclen) { 2739 len = 0; 2740 m = slp->ns_raw; 2741 om = NULL; 2742 2743 while (len < slp->ns_reclen) { 2744 if ((len + m->m_len) > slp->ns_reclen) { 2745 m2 = m_copym(m, 0, slp->ns_reclen - len, 2746 waitflag); 2747 if (m2) { 2748 if (om) { 2749 om->m_next = m2; 2750 recm = slp->ns_raw; 2751 } else 2752 recm = m2; 2753 m->m_data += slp->ns_reclen - len; 2754 m->m_len -= slp->ns_reclen - len; 2755 len = slp->ns_reclen; 2756 } else { 2757 return (EWOULDBLOCK); 2758 } 2759 } else if ((len + m->m_len) == slp->ns_reclen) { 2760 om = m; 2761 len += m->m_len; 2762 m = m->m_next; 2763 recm = slp->ns_raw; 2764 om->m_next = NULL; 2765 } else { 2766 om = m; 2767 len += m->m_len; 2768 m = m->m_next; 2769 } 2770 } 2771 slp->ns_raw = m; 2772 slp->ns_cc -= len; 2773 slp->ns_reclen = 0; 2774 } else { 2775 return (0); 2776 } 2777 2778 /* 2779 * Accumulate the fragments into a record. 2780 */ 2781 mpp = &slp->ns_frag; 2782 while (*mpp) 2783 mpp = &((*mpp)->m_next); 2784 *mpp = recm; 2785 if (slp->ns_flag & SLP_LASTFRAG) { 2786 struct nfsrv_rec *rec; 2787 int mf = (waitflag & MB_DONTWAIT) ? M_NOWAIT : M_WAITOK; 2788 rec = kmalloc(sizeof(struct nfsrv_rec), M_NFSRVDESC, mf); 2789 if (!rec) { 2790 m_freem(slp->ns_frag); 2791 } else { 2792 nfs_realign(&slp->ns_frag, 10 * NFSX_UNSIGNED); 2793 rec->nr_address = NULL; 2794 rec->nr_packet = slp->ns_frag; 2795 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link); 2796 ++slp->ns_numrec; 2797 slp->ns_flag |= SLP_DOREC; 2798 ++*countp; 2799 } 2800 slp->ns_frag = NULL; 2801 } 2802 } 2803 } 2804 2805 #ifdef INVARIANTS 2806 2807 /* 2808 * Sanity check our mbuf chain. 2809 */ 2810 static void 2811 nfs_checkpkt(struct mbuf *m, int len) 2812 { 2813 int xlen = 0; 2814 while (m) { 2815 xlen += m->m_len; 2816 m = m->m_next; 2817 } 2818 if (xlen != len) { 2819 panic("nfs_checkpkt: len mismatch %d/%d mbuf %p\n", 2820 xlen, len, m); 2821 } 2822 } 2823 2824 #else 2825 2826 static void 2827 nfs_checkpkt(struct mbuf *m __unused, int len __unused) 2828 { 2829 } 2830 2831 #endif 2832 2833 /* 2834 * Parse an RPC header. 2835 * 2836 * If the socket is invalid or no records are pending we return ENOBUFS. 2837 * The caller must deal with NEEDQ races. 2838 */ 2839 int 2840 nfsrv_dorec(struct nfssvc_sock *slp, struct nfsd *nfsd, 2841 struct nfsrv_descript **ndp) 2842 { 2843 struct nfsrv_rec *rec; 2844 struct mbuf *m; 2845 struct sockaddr *nam; 2846 struct nfsrv_descript *nd; 2847 int error; 2848 2849 *ndp = NULL; 2850 if ((slp->ns_flag & SLP_VALID) == 0 || !STAILQ_FIRST(&slp->ns_rec)) 2851 return (ENOBUFS); 2852 rec = STAILQ_FIRST(&slp->ns_rec); 2853 STAILQ_REMOVE_HEAD(&slp->ns_rec, nr_link); 2854 KKASSERT(slp->ns_numrec > 0); 2855 if (--slp->ns_numrec == 0) 2856 slp->ns_flag &= ~SLP_DOREC; 2857 nam = rec->nr_address; 2858 m = rec->nr_packet; 2859 kfree(rec, M_NFSRVDESC); 2860 nd = kmalloc(sizeof(struct nfsrv_descript), M_NFSRVDESC, M_WAITOK); 2861 nd->nd_md = nd->nd_mrep = m; 2862 nd->nd_nam2 = nam; 2863 nd->nd_dpos = mtod(m, caddr_t); 2864 error = nfs_getreq(nd, nfsd, TRUE); 2865 if (error) { 2866 if (nam) { 2867 kfree(nam, M_SONAME); 2868 } 2869 kfree((caddr_t)nd, M_NFSRVDESC); 2870 return (error); 2871 } 2872 *ndp = nd; 2873 nfsd->nfsd_nd = nd; 2874 return (0); 2875 } 2876 2877 /* 2878 * Try to assign service sockets to nfsd threads based on the number 2879 * of new rpc requests that have been queued on the service socket. 2880 * 2881 * If no nfsd's are available or additonal requests are pending, set the 2882 * NFSD_CHECKSLP flag so that one of the running nfsds will go look for 2883 * the work in the nfssvc_sock list when it is finished processing its 2884 * current work. This flag is only cleared when an nfsd can not find 2885 * any new work to perform. 2886 */ 2887 void 2888 nfsrv_wakenfsd(struct nfssvc_sock *slp, int nparallel) 2889 { 2890 struct nfsd *nd; 2891 2892 if ((slp->ns_flag & SLP_VALID) == 0) 2893 return; 2894 if (nparallel <= 1) 2895 nparallel = 1; 2896 TAILQ_FOREACH(nd, &nfsd_head, nfsd_chain) { 2897 if (nd->nfsd_flag & NFSD_WAITING) { 2898 nd->nfsd_flag &= ~NFSD_WAITING; 2899 if (nd->nfsd_slp) 2900 panic("nfsd wakeup"); 2901 nfsrv_slpref(slp); 2902 nd->nfsd_slp = slp; 2903 wakeup((caddr_t)nd); 2904 if (--nparallel == 0) 2905 break; 2906 } 2907 } 2908 2909 /* 2910 * If we couldn't assign slp then the NFSDs are all busy and 2911 * we set a flag indicating that there is pending work. 2912 */ 2913 if (nparallel) 2914 nfsd_head_flag |= NFSD_CHECKSLP; 2915 } 2916 #endif /* NFS_NOSERVER */ 2917