1 /* 2 * Copyright (c) 1989, 1991, 1993, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95 37 * $FreeBSD: src/sys/nfs/nfs_socket.c,v 1.60.2.6 2003/03/26 01:44:46 alfred Exp $ 38 * $DragonFly: src/sys/vfs/nfs/nfs_socket.c,v 1.29 2005/06/09 18:39:05 hsu Exp $ 39 */ 40 41 /* 42 * Socket operations for use by nfs 43 */ 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/proc.h> 48 #include <sys/malloc.h> 49 #include <sys/mount.h> 50 #include <sys/kernel.h> 51 #include <sys/mbuf.h> 52 #include <sys/vnode.h> 53 #include <sys/protosw.h> 54 #include <sys/resourcevar.h> 55 #include <sys/socket.h> 56 #include <sys/socketvar.h> 57 #include <sys/socketops.h> 58 #include <sys/syslog.h> 59 #include <sys/thread.h> 60 #include <sys/tprintf.h> 61 #include <sys/sysctl.h> 62 #include <sys/signalvar.h> 63 64 #include <netinet/in.h> 65 #include <netinet/tcp.h> 66 #include <sys/thread2.h> 67 68 #include "rpcv2.h" 69 #include "nfsproto.h" 70 #include "nfs.h" 71 #include "xdr_subs.h" 72 #include "nfsm_subs.h" 73 #include "nfsmount.h" 74 #include "nfsnode.h" 75 #include "nfsrtt.h" 76 #include "nqnfs.h" 77 78 #define TRUE 1 79 #define FALSE 0 80 81 /* 82 * Estimate rto for an nfs rpc sent via. an unreliable datagram. 83 * Use the mean and mean deviation of rtt for the appropriate type of rpc 84 * for the frequent rpcs and a default for the others. 85 * The justification for doing "other" this way is that these rpcs 86 * happen so infrequently that timer est. would probably be stale. 87 * Also, since many of these rpcs are 88 * non-idempotent, a conservative timeout is desired. 89 * getattr, lookup - A+2D 90 * read, write - A+4D 91 * other - nm_timeo 92 */ 93 #define NFS_RTO(n, t) \ 94 ((t) == 0 ? (n)->nm_timeo : \ 95 ((t) < 3 ? \ 96 (((((n)->nm_srtt[t-1] + 3) >> 2) + (n)->nm_sdrtt[t-1] + 1) >> 1) : \ 97 ((((n)->nm_srtt[t-1] + 7) >> 3) + (n)->nm_sdrtt[t-1] + 1))) 98 #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum] - 1] 99 #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum] - 1] 100 /* 101 * External data, mostly RPC constants in XDR form 102 */ 103 extern u_int32_t rpc_reply, rpc_msgdenied, rpc_mismatch, rpc_vers, 104 rpc_auth_unix, rpc_msgaccepted, rpc_call, rpc_autherr, 105 rpc_auth_kerb; 106 extern u_int32_t nfs_prog, nqnfs_prog; 107 extern time_t nqnfsstarttime; 108 extern struct nfsstats nfsstats; 109 extern int nfsv3_procid[NFS_NPROCS]; 110 extern int nfs_ticks; 111 112 /* 113 * Defines which timer to use for the procnum. 114 * 0 - default 115 * 1 - getattr 116 * 2 - lookup 117 * 3 - read 118 * 4 - write 119 */ 120 static int proct[NFS_NPROCS] = { 121 0, 1, 0, 2, 1, 3, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 0, 0, 0, 0, 0, 122 0, 0, 0, 123 }; 124 125 static int nfs_realign_test; 126 static int nfs_realign_count; 127 static int nfs_bufpackets = 4; 128 129 SYSCTL_DECL(_vfs_nfs); 130 131 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_test, CTLFLAG_RW, &nfs_realign_test, 0, ""); 132 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_count, CTLFLAG_RW, &nfs_realign_count, 0, ""); 133 SYSCTL_INT(_vfs_nfs, OID_AUTO, bufpackets, CTLFLAG_RW, &nfs_bufpackets, 0, ""); 134 135 136 /* 137 * There is a congestion window for outstanding rpcs maintained per mount 138 * point. The cwnd size is adjusted in roughly the way that: 139 * Van Jacobson, Congestion avoidance and Control, In "Proceedings of 140 * SIGCOMM '88". ACM, August 1988. 141 * describes for TCP. The cwnd size is chopped in half on a retransmit timeout 142 * and incremented by 1/cwnd when each rpc reply is received and a full cwnd 143 * of rpcs is in progress. 144 * (The sent count and cwnd are scaled for integer arith.) 145 * Variants of "slow start" were tried and were found to be too much of a 146 * performance hit (ave. rtt 3 times larger), 147 * I suspect due to the large rtt that nfs rpcs have. 148 */ 149 #define NFS_CWNDSCALE 256 150 #define NFS_MAXCWND (NFS_CWNDSCALE * 32) 151 static int nfs_backoff[8] = { 2, 4, 8, 16, 32, 64, 128, 256, }; 152 int nfsrtton = 0; 153 struct nfsrtt nfsrtt; 154 struct callout nfs_timer_handle; 155 156 static int nfs_msg (struct thread *,char *,char *); 157 static int nfs_rcvlock (struct nfsreq *); 158 static void nfs_rcvunlock (struct nfsreq *); 159 static void nfs_realign (struct mbuf **pm, int hsiz); 160 static int nfs_receive (struct nfsreq *rep, struct sockaddr **aname, 161 struct mbuf **mp); 162 static void nfs_softterm (struct nfsreq *rep); 163 static int nfs_reconnect (struct nfsreq *rep); 164 #ifndef NFS_NOSERVER 165 static int nfsrv_getstream (struct nfssvc_sock *, int, int *); 166 167 int (*nfsrv3_procs[NFS_NPROCS]) (struct nfsrv_descript *nd, 168 struct nfssvc_sock *slp, 169 struct thread *td, 170 struct mbuf **mreqp) = { 171 nfsrv_null, 172 nfsrv_getattr, 173 nfsrv_setattr, 174 nfsrv_lookup, 175 nfsrv3_access, 176 nfsrv_readlink, 177 nfsrv_read, 178 nfsrv_write, 179 nfsrv_create, 180 nfsrv_mkdir, 181 nfsrv_symlink, 182 nfsrv_mknod, 183 nfsrv_remove, 184 nfsrv_rmdir, 185 nfsrv_rename, 186 nfsrv_link, 187 nfsrv_readdir, 188 nfsrv_readdirplus, 189 nfsrv_statfs, 190 nfsrv_fsinfo, 191 nfsrv_pathconf, 192 nfsrv_commit, 193 nqnfsrv_getlease, 194 nqnfsrv_vacated, 195 nfsrv_noop, 196 nfsrv_noop 197 }; 198 #endif /* NFS_NOSERVER */ 199 200 /* 201 * Initialize sockets and congestion for a new NFS connection. 202 * We do not free the sockaddr if error. 203 */ 204 int 205 nfs_connect(struct nfsmount *nmp, struct nfsreq *rep) 206 { 207 struct socket *so; 208 int error, rcvreserve, sndreserve; 209 int pktscale; 210 struct sockaddr *saddr; 211 struct sockaddr_in *sin; 212 struct thread *td = &thread0; /* only used for socreate and sobind */ 213 214 nmp->nm_so = (struct socket *)0; 215 saddr = nmp->nm_nam; 216 error = socreate(saddr->sa_family, &nmp->nm_so, nmp->nm_sotype, 217 nmp->nm_soproto, td); 218 if (error) 219 goto bad; 220 so = nmp->nm_so; 221 nmp->nm_soflags = so->so_proto->pr_flags; 222 223 /* 224 * Some servers require that the client port be a reserved port number. 225 */ 226 if (saddr->sa_family == AF_INET && (nmp->nm_flag & NFSMNT_RESVPORT)) { 227 struct sockopt sopt; 228 int ip; 229 struct sockaddr_in ssin; 230 231 bzero(&sopt, sizeof sopt); 232 ip = IP_PORTRANGE_LOW; 233 sopt.sopt_level = IPPROTO_IP; 234 sopt.sopt_name = IP_PORTRANGE; 235 sopt.sopt_val = (void *)&ip; 236 sopt.sopt_valsize = sizeof(ip); 237 sopt.sopt_td = NULL; 238 error = sosetopt(so, &sopt); 239 if (error) 240 goto bad; 241 bzero(&ssin, sizeof ssin); 242 sin = &ssin; 243 sin->sin_len = sizeof (struct sockaddr_in); 244 sin->sin_family = AF_INET; 245 sin->sin_addr.s_addr = INADDR_ANY; 246 sin->sin_port = htons(0); 247 error = sobind(so, (struct sockaddr *)sin, td); 248 if (error) 249 goto bad; 250 bzero(&sopt, sizeof sopt); 251 ip = IP_PORTRANGE_DEFAULT; 252 sopt.sopt_level = IPPROTO_IP; 253 sopt.sopt_name = IP_PORTRANGE; 254 sopt.sopt_val = (void *)&ip; 255 sopt.sopt_valsize = sizeof(ip); 256 sopt.sopt_td = NULL; 257 error = sosetopt(so, &sopt); 258 if (error) 259 goto bad; 260 } 261 262 /* 263 * Protocols that do not require connections may be optionally left 264 * unconnected for servers that reply from a port other than NFS_PORT. 265 */ 266 if (nmp->nm_flag & NFSMNT_NOCONN) { 267 if (nmp->nm_soflags & PR_CONNREQUIRED) { 268 error = ENOTCONN; 269 goto bad; 270 } 271 } else { 272 error = soconnect(so, nmp->nm_nam, td); 273 if (error) 274 goto bad; 275 276 /* 277 * Wait for the connection to complete. Cribbed from the 278 * connect system call but with the wait timing out so 279 * that interruptible mounts don't hang here for a long time. 280 */ 281 crit_enter(); 282 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) { 283 (void) tsleep((caddr_t)&so->so_timeo, 0, 284 "nfscon", 2 * hz); 285 if ((so->so_state & SS_ISCONNECTING) && 286 so->so_error == 0 && rep && 287 (error = nfs_sigintr(nmp, rep, rep->r_td)) != 0){ 288 so->so_state &= ~SS_ISCONNECTING; 289 crit_exit(); 290 goto bad; 291 } 292 } 293 if (so->so_error) { 294 error = so->so_error; 295 so->so_error = 0; 296 crit_exit(); 297 goto bad; 298 } 299 crit_exit(); 300 } 301 so->so_rcv.sb_timeo = (5 * hz); 302 so->so_snd.sb_timeo = (5 * hz); 303 304 /* 305 * Get buffer reservation size from sysctl, but impose reasonable 306 * limits. 307 */ 308 pktscale = nfs_bufpackets; 309 if (pktscale < 2) 310 pktscale = 2; 311 if (pktscale > 64) 312 pktscale = 64; 313 314 if (nmp->nm_sotype == SOCK_DGRAM) { 315 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * pktscale; 316 rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) + 317 NFS_MAXPKTHDR) * pktscale; 318 } else if (nmp->nm_sotype == SOCK_SEQPACKET) { 319 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * pktscale; 320 rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) + 321 NFS_MAXPKTHDR) * pktscale; 322 } else { 323 if (nmp->nm_sotype != SOCK_STREAM) 324 panic("nfscon sotype"); 325 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 326 struct sockopt sopt; 327 int val; 328 329 bzero(&sopt, sizeof sopt); 330 sopt.sopt_level = SOL_SOCKET; 331 sopt.sopt_name = SO_KEEPALIVE; 332 sopt.sopt_val = &val; 333 sopt.sopt_valsize = sizeof val; 334 val = 1; 335 sosetopt(so, &sopt); 336 } 337 if (so->so_proto->pr_protocol == IPPROTO_TCP) { 338 struct sockopt sopt; 339 int val; 340 341 bzero(&sopt, sizeof sopt); 342 sopt.sopt_level = IPPROTO_TCP; 343 sopt.sopt_name = TCP_NODELAY; 344 sopt.sopt_val = &val; 345 sopt.sopt_valsize = sizeof val; 346 val = 1; 347 sosetopt(so, &sopt); 348 } 349 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR + 350 sizeof (u_int32_t)) * pktscale; 351 rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR + 352 sizeof (u_int32_t)) * pktscale; 353 } 354 error = soreserve(so, sndreserve, rcvreserve, 355 &td->td_proc->p_rlimit[RLIMIT_SBSIZE]); 356 if (error) 357 goto bad; 358 so->so_rcv.sb_flags |= SB_NOINTR; 359 so->so_snd.sb_flags |= SB_NOINTR; 360 361 /* Initialize other non-zero congestion variables */ 362 nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] = 363 nmp->nm_srtt[3] = (NFS_TIMEO << 3); 364 nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] = 365 nmp->nm_sdrtt[3] = 0; 366 nmp->nm_cwnd = NFS_MAXCWND / 2; /* Initial send window */ 367 nmp->nm_sent = 0; 368 nmp->nm_timeouts = 0; 369 return (0); 370 371 bad: 372 nfs_disconnect(nmp); 373 return (error); 374 } 375 376 /* 377 * Reconnect routine: 378 * Called when a connection is broken on a reliable protocol. 379 * - clean up the old socket 380 * - nfs_connect() again 381 * - set R_MUSTRESEND for all outstanding requests on mount point 382 * If this fails the mount point is DEAD! 383 * nb: Must be called with the nfs_sndlock() set on the mount point. 384 */ 385 static int 386 nfs_reconnect(struct nfsreq *rep) 387 { 388 struct nfsreq *rp; 389 struct nfsmount *nmp = rep->r_nmp; 390 int error; 391 392 nfs_disconnect(nmp); 393 while ((error = nfs_connect(nmp, rep)) != 0) { 394 if (error == EINTR || error == ERESTART) 395 return (EINTR); 396 (void) tsleep((caddr_t)&lbolt, 0, "nfscon", 0); 397 } 398 399 /* 400 * Loop through outstanding request list and fix up all requests 401 * on old socket. 402 */ 403 TAILQ_FOREACH(rp, &nfs_reqq, r_chain) { 404 if (rp->r_nmp == nmp) 405 rp->r_flags |= R_MUSTRESEND; 406 } 407 return (0); 408 } 409 410 /* 411 * NFS disconnect. Clean up and unlink. 412 */ 413 void 414 nfs_disconnect(struct nfsmount *nmp) 415 { 416 struct socket *so; 417 418 if (nmp->nm_so) { 419 so = nmp->nm_so; 420 nmp->nm_so = (struct socket *)0; 421 soshutdown(so, 2); 422 soclose(so); 423 } 424 } 425 426 void 427 nfs_safedisconnect(struct nfsmount *nmp) 428 { 429 struct nfsreq dummyreq; 430 431 bzero(&dummyreq, sizeof(dummyreq)); 432 dummyreq.r_nmp = nmp; 433 dummyreq.r_td = NULL; 434 nfs_rcvlock(&dummyreq); 435 nfs_disconnect(nmp); 436 nfs_rcvunlock(&dummyreq); 437 } 438 439 /* 440 * This is the nfs send routine. For connection based socket types, it 441 * must be called with an nfs_sndlock() on the socket. 442 * "rep == NULL" indicates that it has been called from a server. 443 * For the client side: 444 * - return EINTR if the RPC is terminated, 0 otherwise 445 * - set R_MUSTRESEND if the send fails for any reason 446 * - do any cleanup required by recoverable socket errors (?) 447 * For the server side: 448 * - return EINTR or ERESTART if interrupted by a signal 449 * - return EPIPE if a connection is lost for connection based sockets (TCP...) 450 * - do any cleanup required by recoverable socket errors (?) 451 */ 452 int 453 nfs_send(struct socket *so, struct sockaddr *nam, struct mbuf *top, 454 struct nfsreq *rep) 455 { 456 struct sockaddr *sendnam; 457 int error, soflags, flags; 458 459 if (rep) { 460 if (rep->r_flags & R_SOFTTERM) { 461 m_freem(top); 462 return (EINTR); 463 } 464 if ((so = rep->r_nmp->nm_so) == NULL) { 465 rep->r_flags |= R_MUSTRESEND; 466 m_freem(top); 467 return (0); 468 } 469 rep->r_flags &= ~R_MUSTRESEND; 470 soflags = rep->r_nmp->nm_soflags; 471 } else 472 soflags = so->so_proto->pr_flags; 473 if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED)) 474 sendnam = (struct sockaddr *)0; 475 else 476 sendnam = nam; 477 if (so->so_type == SOCK_SEQPACKET) 478 flags = MSG_EOR; 479 else 480 flags = 0; 481 482 error = so_pru_sosend(so, sendnam, NULL, top, NULL, flags, 483 curthread /*XXX*/); 484 /* 485 * ENOBUFS for dgram sockets is transient and non fatal. 486 * No need to log, and no need to break a soft mount. 487 */ 488 if (error == ENOBUFS && so->so_type == SOCK_DGRAM) { 489 error = 0; 490 if (rep) /* do backoff retransmit on client */ 491 rep->r_flags |= R_MUSTRESEND; 492 } 493 494 if (error) { 495 if (rep) { 496 log(LOG_INFO, "nfs send error %d for server %s\n",error, 497 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); 498 /* 499 * Deal with errors for the client side. 500 */ 501 if (rep->r_flags & R_SOFTTERM) 502 error = EINTR; 503 else 504 rep->r_flags |= R_MUSTRESEND; 505 } else 506 log(LOG_INFO, "nfsd send error %d\n", error); 507 508 /* 509 * Handle any recoverable (soft) socket errors here. (?) 510 */ 511 if (error != EINTR && error != ERESTART && 512 error != EWOULDBLOCK && error != EPIPE) 513 error = 0; 514 } 515 return (error); 516 } 517 518 /* 519 * Receive a Sun RPC Request/Reply. For SOCK_DGRAM, the work is all 520 * done by soreceive(), but for SOCK_STREAM we must deal with the Record 521 * Mark and consolidate the data into a new mbuf list. 522 * nb: Sometimes TCP passes the data up to soreceive() in long lists of 523 * small mbufs. 524 * For SOCK_STREAM we must be very careful to read an entire record once 525 * we have read any of it, even if the system call has been interrupted. 526 */ 527 static int 528 nfs_receive(struct nfsreq *rep, struct sockaddr **aname, struct mbuf **mp) 529 { 530 struct socket *so; 531 struct uio auio; 532 struct iovec aio; 533 struct mbuf *m; 534 struct mbuf *control; 535 u_int32_t len; 536 struct sockaddr **getnam; 537 int error, sotype, rcvflg; 538 struct thread *td = curthread; /* XXX */ 539 540 /* 541 * Set up arguments for soreceive() 542 */ 543 *mp = (struct mbuf *)0; 544 *aname = (struct sockaddr *)0; 545 sotype = rep->r_nmp->nm_sotype; 546 547 /* 548 * For reliable protocols, lock against other senders/receivers 549 * in case a reconnect is necessary. 550 * For SOCK_STREAM, first get the Record Mark to find out how much 551 * more there is to get. 552 * We must lock the socket against other receivers 553 * until we have an entire rpc request/reply. 554 */ 555 if (sotype != SOCK_DGRAM) { 556 error = nfs_sndlock(rep); 557 if (error) 558 return (error); 559 tryagain: 560 /* 561 * Check for fatal errors and resending request. 562 */ 563 /* 564 * Ugh: If a reconnect attempt just happened, nm_so 565 * would have changed. NULL indicates a failed 566 * attempt that has essentially shut down this 567 * mount point. 568 */ 569 if (rep->r_mrep || (rep->r_flags & R_SOFTTERM)) { 570 nfs_sndunlock(rep); 571 return (EINTR); 572 } 573 so = rep->r_nmp->nm_so; 574 if (!so) { 575 error = nfs_reconnect(rep); 576 if (error) { 577 nfs_sndunlock(rep); 578 return (error); 579 } 580 goto tryagain; 581 } 582 while (rep->r_flags & R_MUSTRESEND) { 583 m = m_copym(rep->r_mreq, 0, M_COPYALL, MB_WAIT); 584 nfsstats.rpcretries++; 585 error = nfs_send(so, rep->r_nmp->nm_nam, m, rep); 586 if (error) { 587 if (error == EINTR || error == ERESTART || 588 (error = nfs_reconnect(rep)) != 0) { 589 nfs_sndunlock(rep); 590 return (error); 591 } 592 goto tryagain; 593 } 594 } 595 nfs_sndunlock(rep); 596 if (sotype == SOCK_STREAM) { 597 aio.iov_base = (caddr_t) &len; 598 aio.iov_len = sizeof(u_int32_t); 599 auio.uio_iov = &aio; 600 auio.uio_iovcnt = 1; 601 auio.uio_segflg = UIO_SYSSPACE; 602 auio.uio_rw = UIO_READ; 603 auio.uio_offset = 0; 604 auio.uio_resid = sizeof(u_int32_t); 605 auio.uio_td = td; 606 do { 607 rcvflg = MSG_WAITALL; 608 error = so_pru_soreceive(so, NULL, &auio, NULL, 609 NULL, &rcvflg); 610 if (error == EWOULDBLOCK && rep) { 611 if (rep->r_flags & R_SOFTTERM) 612 return (EINTR); 613 } 614 } while (error == EWOULDBLOCK); 615 if (!error && auio.uio_resid > 0) { 616 /* 617 * Don't log a 0 byte receive; it means 618 * that the socket has been closed, and 619 * can happen during normal operation 620 * (forcible unmount or Solaris server). 621 */ 622 if (auio.uio_resid != sizeof (u_int32_t)) 623 log(LOG_INFO, 624 "short receive (%d/%d) from nfs server %s\n", 625 (int)(sizeof(u_int32_t) - auio.uio_resid), 626 (int)sizeof(u_int32_t), 627 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); 628 error = EPIPE; 629 } 630 if (error) 631 goto errout; 632 len = ntohl(len) & ~0x80000000; 633 /* 634 * This is SERIOUS! We are out of sync with the sender 635 * and forcing a disconnect/reconnect is all I can do. 636 */ 637 if (len > NFS_MAXPACKET) { 638 log(LOG_ERR, "%s (%d) from nfs server %s\n", 639 "impossible packet length", 640 len, 641 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); 642 error = EFBIG; 643 goto errout; 644 } 645 auio.uio_resid = len; 646 do { 647 rcvflg = MSG_WAITALL; 648 error = so_pru_soreceive(so, NULL, &auio, mp, 649 NULL, &rcvflg); 650 } while (error == EWOULDBLOCK || error == EINTR || 651 error == ERESTART); 652 if (!error && auio.uio_resid > 0) { 653 if (len != auio.uio_resid) 654 log(LOG_INFO, 655 "short receive (%d/%d) from nfs server %s\n", 656 len - auio.uio_resid, len, 657 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); 658 error = EPIPE; 659 } 660 } else { 661 /* 662 * NB: Since uio_resid is big, MSG_WAITALL is ignored 663 * and soreceive() will return when it has either a 664 * control msg or a data msg. 665 * We have no use for control msg., but must grab them 666 * and then throw them away so we know what is going 667 * on. 668 */ 669 auio.uio_resid = len = 100000000; /* Anything Big */ 670 auio.uio_td = td; 671 do { 672 rcvflg = 0; 673 error = so_pru_soreceive(so, NULL, &auio, mp, 674 &control, &rcvflg); 675 if (control) 676 m_freem(control); 677 if (error == EWOULDBLOCK && rep) { 678 if (rep->r_flags & R_SOFTTERM) 679 return (EINTR); 680 } 681 } while (error == EWOULDBLOCK || 682 (!error && *mp == NULL && control)); 683 if ((rcvflg & MSG_EOR) == 0) 684 printf("Egad!!\n"); 685 if (!error && *mp == NULL) 686 error = EPIPE; 687 len -= auio.uio_resid; 688 } 689 errout: 690 if (error && error != EINTR && error != ERESTART) { 691 m_freem(*mp); 692 *mp = (struct mbuf *)0; 693 if (error != EPIPE) 694 log(LOG_INFO, 695 "receive error %d from nfs server %s\n", 696 error, 697 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); 698 error = nfs_sndlock(rep); 699 if (!error) { 700 error = nfs_reconnect(rep); 701 if (!error) 702 goto tryagain; 703 else 704 nfs_sndunlock(rep); 705 } 706 } 707 } else { 708 if ((so = rep->r_nmp->nm_so) == NULL) 709 return (EACCES); 710 if (so->so_state & SS_ISCONNECTED) 711 getnam = (struct sockaddr **)0; 712 else 713 getnam = aname; 714 auio.uio_resid = len = 1000000; 715 auio.uio_td = td; 716 do { 717 rcvflg = 0; 718 error = so_pru_soreceive(so, getnam, &auio, mp, NULL, 719 &rcvflg); 720 if (error == EWOULDBLOCK && 721 (rep->r_flags & R_SOFTTERM)) 722 return (EINTR); 723 } while (error == EWOULDBLOCK); 724 len -= auio.uio_resid; 725 } 726 if (error) { 727 m_freem(*mp); 728 *mp = (struct mbuf *)0; 729 } 730 /* 731 * Search for any mbufs that are not a multiple of 4 bytes long 732 * or with m_data not longword aligned. 733 * These could cause pointer alignment problems, so copy them to 734 * well aligned mbufs. 735 */ 736 nfs_realign(mp, 5 * NFSX_UNSIGNED); 737 return (error); 738 } 739 740 /* 741 * Implement receipt of reply on a socket. 742 * We must search through the list of received datagrams matching them 743 * with outstanding requests using the xid, until ours is found. 744 */ 745 /* ARGSUSED */ 746 int 747 nfs_reply(struct nfsreq *myrep) 748 { 749 struct nfsreq *rep; 750 struct nfsmount *nmp = myrep->r_nmp; 751 int32_t t1; 752 struct mbuf *mrep, *md; 753 struct sockaddr *nam; 754 u_int32_t rxid, *tl; 755 caddr_t dpos, cp2; 756 int error; 757 758 /* 759 * Loop around until we get our own reply 760 */ 761 for (;;) { 762 /* 763 * Lock against other receivers so that I don't get stuck in 764 * sbwait() after someone else has received my reply for me. 765 * Also necessary for connection based protocols to avoid 766 * race conditions during a reconnect. 767 * If nfs_rcvlock() returns EALREADY, that means that 768 * the reply has already been recieved by another 769 * process and we can return immediately. In this 770 * case, the lock is not taken to avoid races with 771 * other processes. 772 */ 773 error = nfs_rcvlock(myrep); 774 if (error == EALREADY) 775 return (0); 776 if (error) 777 return (error); 778 /* 779 * Get the next Rpc reply off the socket 780 */ 781 error = nfs_receive(myrep, &nam, &mrep); 782 nfs_rcvunlock(myrep); 783 if (error) { 784 785 /* 786 * Ignore routing errors on connectionless protocols?? 787 */ 788 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) { 789 nmp->nm_so->so_error = 0; 790 if (myrep->r_flags & R_GETONEREP) 791 return (0); 792 continue; 793 } 794 return (error); 795 } 796 if (nam) 797 FREE(nam, M_SONAME); 798 799 /* 800 * Get the xid and check that it is an rpc reply 801 */ 802 md = mrep; 803 dpos = mtod(md, caddr_t); 804 nfsm_dissect(tl, u_int32_t *, 2*NFSX_UNSIGNED); 805 rxid = *tl++; 806 if (*tl != rpc_reply) { 807 #ifndef NFS_NOSERVER 808 if (nmp->nm_flag & NFSMNT_NQNFS) { 809 if (nqnfs_callback(nmp, mrep, md, dpos)) 810 nfsstats.rpcinvalid++; 811 } else { 812 nfsstats.rpcinvalid++; 813 m_freem(mrep); 814 } 815 #else 816 nfsstats.rpcinvalid++; 817 m_freem(mrep); 818 #endif 819 nfsmout: 820 if (myrep->r_flags & R_GETONEREP) 821 return (0); 822 continue; 823 } 824 825 /* 826 * Loop through the request list to match up the reply 827 * Iff no match, just drop the datagram 828 */ 829 TAILQ_FOREACH(rep, &nfs_reqq, r_chain) { 830 if (rep->r_mrep == NULL && rxid == rep->r_xid) { 831 /* Found it.. */ 832 rep->r_mrep = mrep; 833 rep->r_md = md; 834 rep->r_dpos = dpos; 835 if (nfsrtton) { 836 struct rttl *rt; 837 838 rt = &nfsrtt.rttl[nfsrtt.pos]; 839 rt->proc = rep->r_procnum; 840 rt->rto = NFS_RTO(nmp, proct[rep->r_procnum]); 841 rt->sent = nmp->nm_sent; 842 rt->cwnd = nmp->nm_cwnd; 843 rt->srtt = nmp->nm_srtt[proct[rep->r_procnum] - 1]; 844 rt->sdrtt = nmp->nm_sdrtt[proct[rep->r_procnum] - 1]; 845 rt->fsid = nmp->nm_mountp->mnt_stat.f_fsid; 846 getmicrotime(&rt->tstamp); 847 if (rep->r_flags & R_TIMING) 848 rt->rtt = rep->r_rtt; 849 else 850 rt->rtt = 1000000; 851 nfsrtt.pos = (nfsrtt.pos + 1) % NFSRTTLOGSIZ; 852 } 853 /* 854 * Update congestion window. 855 * Do the additive increase of 856 * one rpc/rtt. 857 */ 858 if (nmp->nm_cwnd <= nmp->nm_sent) { 859 nmp->nm_cwnd += 860 (NFS_CWNDSCALE * NFS_CWNDSCALE + 861 (nmp->nm_cwnd >> 1)) / nmp->nm_cwnd; 862 if (nmp->nm_cwnd > NFS_MAXCWND) 863 nmp->nm_cwnd = NFS_MAXCWND; 864 } 865 crit_enter(); /* nfs_timer interlock*/ 866 if (rep->r_flags & R_SENT) { 867 rep->r_flags &= ~R_SENT; 868 nmp->nm_sent -= NFS_CWNDSCALE; 869 } 870 crit_exit(); 871 /* 872 * Update rtt using a gain of 0.125 on the mean 873 * and a gain of 0.25 on the deviation. 874 */ 875 if (rep->r_flags & R_TIMING) { 876 /* 877 * Since the timer resolution of 878 * NFS_HZ is so course, it can often 879 * result in r_rtt == 0. Since 880 * r_rtt == N means that the actual 881 * rtt is between N+dt and N+2-dt ticks, 882 * add 1. 883 */ 884 t1 = rep->r_rtt + 1; 885 t1 -= (NFS_SRTT(rep) >> 3); 886 NFS_SRTT(rep) += t1; 887 if (t1 < 0) 888 t1 = -t1; 889 t1 -= (NFS_SDRTT(rep) >> 2); 890 NFS_SDRTT(rep) += t1; 891 } 892 nmp->nm_timeouts = 0; 893 break; 894 } 895 } 896 /* 897 * If not matched to a request, drop it. 898 * If it's mine, get out. 899 */ 900 if (rep == 0) { 901 nfsstats.rpcunexpected++; 902 m_freem(mrep); 903 } else if (rep == myrep) { 904 if (rep->r_mrep == NULL) 905 panic("nfsreply nil"); 906 return (0); 907 } 908 if (myrep->r_flags & R_GETONEREP) 909 return (0); 910 } 911 } 912 913 /* 914 * nfs_request - goes something like this 915 * - fill in request struct 916 * - links it into list 917 * - calls nfs_send() for first transmit 918 * - calls nfs_receive() to get reply 919 * - break down rpc header and return with nfs reply pointed to 920 * by mrep or error 921 * nb: always frees up mreq mbuf list 922 */ 923 int 924 nfs_request(struct vnode *vp, struct mbuf *mrest, int procnum, 925 struct thread *td, struct ucred *cred, struct mbuf **mrp, 926 struct mbuf **mdp, caddr_t *dposp) 927 { 928 struct mbuf *mrep, *m2; 929 struct nfsreq *rep; 930 u_int32_t *tl; 931 int i; 932 struct nfsmount *nmp; 933 struct mbuf *m, *md, *mheadend; 934 struct nfsnode *np; 935 char nickv[RPCX_NICKVERF]; 936 time_t reqtime, waituntil; 937 caddr_t dpos, cp2; 938 int t1, nqlflag, cachable, error = 0, mrest_len, auth_len, auth_type; 939 int trylater_delay = NQ_TRYLATERDEL, trylater_cnt = 0, failed_auth = 0; 940 int verf_len, verf_type; 941 u_int32_t xid; 942 u_quad_t frev; 943 char *auth_str, *verf_str; 944 NFSKERBKEY_T key; /* save session key */ 945 946 /* Reject requests while attempting a forced unmount. */ 947 if (vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF) { 948 m_freem(mrest); 949 return (ESTALE); 950 } 951 nmp = VFSTONFS(vp->v_mount); 952 MALLOC(rep, struct nfsreq *, sizeof(struct nfsreq), M_NFSREQ, M_WAITOK); 953 rep->r_nmp = nmp; 954 rep->r_vp = vp; 955 rep->r_td = td; 956 rep->r_procnum = procnum; 957 i = 0; 958 m = mrest; 959 while (m) { 960 i += m->m_len; 961 m = m->m_next; 962 } 963 mrest_len = i; 964 965 /* 966 * Get the RPC header with authorization. 967 */ 968 kerbauth: 969 verf_str = auth_str = (char *)0; 970 if (nmp->nm_flag & NFSMNT_KERB) { 971 verf_str = nickv; 972 verf_len = sizeof (nickv); 973 auth_type = RPCAUTH_KERB4; 974 bzero((caddr_t)key, sizeof (key)); 975 if (failed_auth || nfs_getnickauth(nmp, cred, &auth_str, 976 &auth_len, verf_str, verf_len)) { 977 error = nfs_getauth(nmp, rep, cred, &auth_str, 978 &auth_len, verf_str, &verf_len, key); 979 if (error) { 980 free((caddr_t)rep, M_NFSREQ); 981 m_freem(mrest); 982 return (error); 983 } 984 } 985 } else { 986 auth_type = RPCAUTH_UNIX; 987 if (cred->cr_ngroups < 1) 988 panic("nfsreq nogrps"); 989 auth_len = ((((cred->cr_ngroups - 1) > nmp->nm_numgrps) ? 990 nmp->nm_numgrps : (cred->cr_ngroups - 1)) << 2) + 991 5 * NFSX_UNSIGNED; 992 } 993 m = nfsm_rpchead(cred, nmp->nm_flag, procnum, auth_type, auth_len, 994 auth_str, verf_len, verf_str, mrest, mrest_len, &mheadend, &xid); 995 if (auth_str) 996 free(auth_str, M_TEMP); 997 998 /* 999 * For stream protocols, insert a Sun RPC Record Mark. 1000 */ 1001 if (nmp->nm_sotype == SOCK_STREAM) { 1002 M_PREPEND(m, NFSX_UNSIGNED, MB_WAIT); 1003 if (m == NULL) { 1004 free(rep, M_NFSREQ); 1005 return (ENOBUFS); 1006 } 1007 *mtod(m, u_int32_t *) = htonl(0x80000000 | 1008 (m->m_pkthdr.len - NFSX_UNSIGNED)); 1009 } 1010 rep->r_mreq = m; 1011 rep->r_xid = xid; 1012 tryagain: 1013 if (nmp->nm_flag & NFSMNT_SOFT) 1014 rep->r_retry = nmp->nm_retry; 1015 else 1016 rep->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */ 1017 rep->r_rtt = rep->r_rexmit = 0; 1018 if (proct[procnum] > 0) 1019 rep->r_flags = R_TIMING | R_MASKTIMER; 1020 else 1021 rep->r_flags = R_MASKTIMER; 1022 rep->r_mrep = NULL; 1023 1024 /* 1025 * Do the client side RPC. 1026 */ 1027 nfsstats.rpcrequests++; 1028 1029 /* 1030 * Chain request into list of outstanding requests. Be sure 1031 * to put it LAST so timer finds oldest requests first. Note 1032 * that R_MASKTIMER is set at the moment to prevent any timer 1033 * action on this request while we are still doing processing on 1034 * it below. splsoftclock() primarily protects nm_sent. Note 1035 * that we may block in this code so there is no atomicy guarentee. 1036 */ 1037 crit_enter(); 1038 TAILQ_INSERT_TAIL(&nfs_reqq, rep, r_chain); 1039 1040 /* Get send time for nqnfs */ 1041 reqtime = time_second; 1042 1043 /* 1044 * If backing off another request or avoiding congestion, don't 1045 * send this one now but let timer do it. If not timing a request, 1046 * do it now. 1047 */ 1048 if (nmp->nm_so && (nmp->nm_sotype != SOCK_DGRAM || 1049 (nmp->nm_flag & NFSMNT_DUMBTIMR) || 1050 nmp->nm_sent < nmp->nm_cwnd)) { 1051 if (nmp->nm_soflags & PR_CONNREQUIRED) 1052 error = nfs_sndlock(rep); 1053 if (!error) { 1054 m2 = m_copym(m, 0, M_COPYALL, MB_WAIT); 1055 error = nfs_send(nmp->nm_so, nmp->nm_nam, m2, rep); 1056 if (nmp->nm_soflags & PR_CONNREQUIRED) 1057 nfs_sndunlock(rep); 1058 } 1059 if (!error && (rep->r_flags & R_MUSTRESEND) == 0) { 1060 nmp->nm_sent += NFS_CWNDSCALE; 1061 rep->r_flags |= R_SENT; 1062 } 1063 } else { 1064 rep->r_rtt = -1; 1065 } 1066 1067 /* 1068 * Let the timer do what it will with the request, then 1069 * wait for the reply from our send or the timer's. 1070 */ 1071 rep->r_flags &= ~R_MASKTIMER; 1072 crit_exit(); 1073 if (!error || error == EPIPE) 1074 error = nfs_reply(rep); 1075 1076 /* 1077 * RPC done, unlink the request. 1078 */ 1079 crit_enter(); 1080 TAILQ_REMOVE(&nfs_reqq, rep, r_chain); 1081 1082 /* 1083 * Decrement the outstanding request count. 1084 */ 1085 if (rep->r_flags & R_SENT) { 1086 rep->r_flags &= ~R_SENT; 1087 nmp->nm_sent -= NFS_CWNDSCALE; 1088 } 1089 crit_exit(); 1090 1091 /* 1092 * If there was a successful reply and a tprintf msg. 1093 * tprintf a response. 1094 */ 1095 if (!error && (rep->r_flags & R_TPRINTFMSG)) 1096 nfs_msg(rep->r_td, nmp->nm_mountp->mnt_stat.f_mntfromname, 1097 "is alive again"); 1098 mrep = rep->r_mrep; 1099 md = rep->r_md; 1100 dpos = rep->r_dpos; 1101 if (error) { 1102 m_freem(rep->r_mreq); 1103 free((caddr_t)rep, M_NFSREQ); 1104 return (error); 1105 } 1106 1107 /* 1108 * break down the rpc header and check if ok 1109 */ 1110 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 1111 if (*tl++ == rpc_msgdenied) { 1112 if (*tl == rpc_mismatch) 1113 error = EOPNOTSUPP; 1114 else if ((nmp->nm_flag & NFSMNT_KERB) && *tl++ == rpc_autherr) { 1115 if (!failed_auth) { 1116 failed_auth++; 1117 mheadend->m_next = (struct mbuf *)0; 1118 m_freem(mrep); 1119 m_freem(rep->r_mreq); 1120 goto kerbauth; 1121 } else 1122 error = EAUTH; 1123 } else 1124 error = EACCES; 1125 m_freem(mrep); 1126 m_freem(rep->r_mreq); 1127 free((caddr_t)rep, M_NFSREQ); 1128 return (error); 1129 } 1130 1131 /* 1132 * Grab any Kerberos verifier, otherwise just throw it away. 1133 */ 1134 verf_type = fxdr_unsigned(int, *tl++); 1135 i = fxdr_unsigned(int32_t, *tl); 1136 if ((nmp->nm_flag & NFSMNT_KERB) && verf_type == RPCAUTH_KERB4) { 1137 error = nfs_savenickauth(nmp, cred, i, key, &md, &dpos, mrep); 1138 if (error) 1139 goto nfsmout; 1140 } else if (i > 0) 1141 nfsm_adv(nfsm_rndup(i)); 1142 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 1143 /* 0 == ok */ 1144 if (*tl == 0) { 1145 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 1146 if (*tl != 0) { 1147 error = fxdr_unsigned(int, *tl); 1148 if ((nmp->nm_flag & NFSMNT_NFSV3) && 1149 error == NFSERR_TRYLATER) { 1150 m_freem(mrep); 1151 error = 0; 1152 waituntil = time_second + trylater_delay; 1153 while (time_second < waituntil) 1154 (void) tsleep((caddr_t)&lbolt, 1155 0, "nqnfstry", 0); 1156 trylater_delay *= nfs_backoff[trylater_cnt]; 1157 if (trylater_cnt < 7) 1158 trylater_cnt++; 1159 goto tryagain; 1160 } 1161 1162 /* 1163 * If the File Handle was stale, invalidate the 1164 * lookup cache, just in case. 1165 */ 1166 if (error == ESTALE) 1167 cache_inval_vp(vp, CINV_CHILDREN); 1168 if (nmp->nm_flag & NFSMNT_NFSV3) { 1169 *mrp = mrep; 1170 *mdp = md; 1171 *dposp = dpos; 1172 error |= NFSERR_RETERR; 1173 } else 1174 m_freem(mrep); 1175 m_freem(rep->r_mreq); 1176 free((caddr_t)rep, M_NFSREQ); 1177 return (error); 1178 } 1179 1180 /* 1181 * For nqnfs, get any lease in reply 1182 */ 1183 if (nmp->nm_flag & NFSMNT_NQNFS) { 1184 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 1185 if (*tl) { 1186 np = VTONFS(vp); 1187 nqlflag = fxdr_unsigned(int, *tl); 1188 nfsm_dissect(tl, u_int32_t *, 4*NFSX_UNSIGNED); 1189 cachable = fxdr_unsigned(int, *tl++); 1190 reqtime += fxdr_unsigned(int, *tl++); 1191 if (reqtime > time_second) { 1192 frev = fxdr_hyper(tl); 1193 nqnfs_clientlease(nmp, np, nqlflag, 1194 cachable, reqtime, frev); 1195 } 1196 } 1197 } 1198 *mrp = mrep; 1199 *mdp = md; 1200 *dposp = dpos; 1201 m_freem(rep->r_mreq); 1202 FREE((caddr_t)rep, M_NFSREQ); 1203 return (0); 1204 } 1205 m_freem(mrep); 1206 error = EPROTONOSUPPORT; 1207 nfsmout: 1208 m_freem(rep->r_mreq); 1209 free((caddr_t)rep, M_NFSREQ); 1210 return (error); 1211 } 1212 1213 #ifndef NFS_NOSERVER 1214 /* 1215 * Generate the rpc reply header 1216 * siz arg. is used to decide if adding a cluster is worthwhile 1217 */ 1218 int 1219 nfs_rephead(int siz, struct nfsrv_descript *nd, struct nfssvc_sock *slp, 1220 int err, int cache, u_quad_t *frev, struct mbuf **mrq, 1221 struct mbuf **mbp, caddr_t *bposp) 1222 { 1223 u_int32_t *tl; 1224 struct mbuf *mreq; 1225 caddr_t bpos; 1226 struct mbuf *mb, *mb2; 1227 1228 siz += RPC_REPLYSIZ; 1229 mb = mreq = m_getl(max_hdr + siz, MB_WAIT, MT_DATA, M_PKTHDR, NULL); 1230 mreq->m_pkthdr.len = 0; 1231 /* 1232 * If this is not a cluster, try and leave leading space 1233 * for the lower level headers. 1234 */ 1235 if ((max_hdr + siz) < MINCLSIZE) 1236 mreq->m_data += max_hdr; 1237 tl = mtod(mreq, u_int32_t *); 1238 mreq->m_len = 6 * NFSX_UNSIGNED; 1239 bpos = ((caddr_t)tl) + mreq->m_len; 1240 *tl++ = txdr_unsigned(nd->nd_retxid); 1241 *tl++ = rpc_reply; 1242 if (err == ERPCMISMATCH || (err & NFSERR_AUTHERR)) { 1243 *tl++ = rpc_msgdenied; 1244 if (err & NFSERR_AUTHERR) { 1245 *tl++ = rpc_autherr; 1246 *tl = txdr_unsigned(err & ~NFSERR_AUTHERR); 1247 mreq->m_len -= NFSX_UNSIGNED; 1248 bpos -= NFSX_UNSIGNED; 1249 } else { 1250 *tl++ = rpc_mismatch; 1251 *tl++ = txdr_unsigned(RPC_VER2); 1252 *tl = txdr_unsigned(RPC_VER2); 1253 } 1254 } else { 1255 *tl++ = rpc_msgaccepted; 1256 1257 /* 1258 * For Kerberos authentication, we must send the nickname 1259 * verifier back, otherwise just RPCAUTH_NULL. 1260 */ 1261 if (nd->nd_flag & ND_KERBFULL) { 1262 struct nfsuid *nuidp; 1263 struct timeval ktvin, ktvout; 1264 1265 for (nuidp = NUIDHASH(slp, nd->nd_cr.cr_uid)->lh_first; 1266 nuidp != 0; nuidp = nuidp->nu_hash.le_next) { 1267 if (nuidp->nu_cr.cr_uid == nd->nd_cr.cr_uid && 1268 (!nd->nd_nam2 || netaddr_match(NU_NETFAM(nuidp), 1269 &nuidp->nu_haddr, nd->nd_nam2))) 1270 break; 1271 } 1272 if (nuidp) { 1273 ktvin.tv_sec = 1274 txdr_unsigned(nuidp->nu_timestamp.tv_sec - 1); 1275 ktvin.tv_usec = 1276 txdr_unsigned(nuidp->nu_timestamp.tv_usec); 1277 1278 /* 1279 * Encrypt the timestamp in ecb mode using the 1280 * session key. 1281 */ 1282 #ifdef NFSKERB 1283 XXX 1284 #endif 1285 1286 *tl++ = rpc_auth_kerb; 1287 *tl++ = txdr_unsigned(3 * NFSX_UNSIGNED); 1288 *tl = ktvout.tv_sec; 1289 nfsm_build(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 1290 *tl++ = ktvout.tv_usec; 1291 *tl++ = txdr_unsigned(nuidp->nu_cr.cr_uid); 1292 } else { 1293 *tl++ = 0; 1294 *tl++ = 0; 1295 } 1296 } else { 1297 *tl++ = 0; 1298 *tl++ = 0; 1299 } 1300 switch (err) { 1301 case EPROGUNAVAIL: 1302 *tl = txdr_unsigned(RPC_PROGUNAVAIL); 1303 break; 1304 case EPROGMISMATCH: 1305 *tl = txdr_unsigned(RPC_PROGMISMATCH); 1306 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 1307 if (nd->nd_flag & ND_NQNFS) { 1308 *tl++ = txdr_unsigned(3); 1309 *tl = txdr_unsigned(3); 1310 } else { 1311 *tl++ = txdr_unsigned(2); 1312 *tl = txdr_unsigned(3); 1313 } 1314 break; 1315 case EPROCUNAVAIL: 1316 *tl = txdr_unsigned(RPC_PROCUNAVAIL); 1317 break; 1318 case EBADRPC: 1319 *tl = txdr_unsigned(RPC_GARBAGE); 1320 break; 1321 default: 1322 *tl = 0; 1323 if (err != NFSERR_RETVOID) { 1324 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED); 1325 if (err) 1326 *tl = txdr_unsigned(nfsrv_errmap(nd, err)); 1327 else 1328 *tl = 0; 1329 } 1330 break; 1331 }; 1332 } 1333 1334 /* 1335 * For nqnfs, piggyback lease as requested. 1336 */ 1337 if ((nd->nd_flag & ND_NQNFS) && err == 0) { 1338 if (nd->nd_flag & ND_LEASE) { 1339 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED); 1340 *tl++ = txdr_unsigned(nd->nd_flag & ND_LEASE); 1341 *tl++ = txdr_unsigned(cache); 1342 *tl++ = txdr_unsigned(nd->nd_duration); 1343 txdr_hyper(*frev, tl); 1344 } else { 1345 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED); 1346 *tl = 0; 1347 } 1348 } 1349 if (mrq != NULL) 1350 *mrq = mreq; 1351 *mbp = mb; 1352 *bposp = bpos; 1353 if (err != 0 && err != NFSERR_RETVOID) 1354 nfsstats.srvrpc_errs++; 1355 return (0); 1356 } 1357 1358 1359 #endif /* NFS_NOSERVER */ 1360 /* 1361 * Nfs timer routine 1362 * Scan the nfsreq list and retranmit any requests that have timed out 1363 * To avoid retransmission attempts on STREAM sockets (in the future) make 1364 * sure to set the r_retry field to 0 (implies nm_retry == 0). 1365 */ 1366 void 1367 nfs_timer(void *arg /* never used */) 1368 { 1369 struct nfsreq *rep; 1370 struct mbuf *m; 1371 struct socket *so; 1372 struct nfsmount *nmp; 1373 int timeo; 1374 int error; 1375 #ifndef NFS_NOSERVER 1376 static long lasttime = 0; 1377 struct nfssvc_sock *slp; 1378 u_quad_t cur_usec; 1379 #endif /* NFS_NOSERVER */ 1380 struct thread *td = &thread0; /* XXX for credentials, will break if sleep */ 1381 1382 crit_enter(); 1383 TAILQ_FOREACH(rep, &nfs_reqq, r_chain) { 1384 nmp = rep->r_nmp; 1385 if (rep->r_mrep || (rep->r_flags & (R_SOFTTERM|R_MASKTIMER))) 1386 continue; 1387 if (nfs_sigintr(nmp, rep, rep->r_td)) { 1388 nfs_softterm(rep); 1389 continue; 1390 } 1391 if (rep->r_rtt >= 0) { 1392 rep->r_rtt++; 1393 if (nmp->nm_flag & NFSMNT_DUMBTIMR) 1394 timeo = nmp->nm_timeo; 1395 else 1396 timeo = NFS_RTO(nmp, proct[rep->r_procnum]); 1397 if (nmp->nm_timeouts > 0) 1398 timeo *= nfs_backoff[nmp->nm_timeouts - 1]; 1399 if (rep->r_rtt <= timeo) 1400 continue; 1401 if (nmp->nm_timeouts < 8) 1402 nmp->nm_timeouts++; 1403 } 1404 /* 1405 * Check for server not responding 1406 */ 1407 if ((rep->r_flags & R_TPRINTFMSG) == 0 && 1408 rep->r_rexmit > nmp->nm_deadthresh) { 1409 nfs_msg(rep->r_td, 1410 nmp->nm_mountp->mnt_stat.f_mntfromname, 1411 "not responding"); 1412 rep->r_flags |= R_TPRINTFMSG; 1413 } 1414 if (rep->r_rexmit >= rep->r_retry) { /* too many */ 1415 nfsstats.rpctimeouts++; 1416 nfs_softterm(rep); 1417 continue; 1418 } 1419 if (nmp->nm_sotype != SOCK_DGRAM) { 1420 if (++rep->r_rexmit > NFS_MAXREXMIT) 1421 rep->r_rexmit = NFS_MAXREXMIT; 1422 continue; 1423 } 1424 if ((so = nmp->nm_so) == NULL) 1425 continue; 1426 1427 /* 1428 * If there is enough space and the window allows.. 1429 * Resend it 1430 * Set r_rtt to -1 in case we fail to send it now. 1431 */ 1432 rep->r_rtt = -1; 1433 if (sbspace(&so->so_snd) >= rep->r_mreq->m_pkthdr.len && 1434 ((nmp->nm_flag & NFSMNT_DUMBTIMR) || 1435 (rep->r_flags & R_SENT) || 1436 nmp->nm_sent < nmp->nm_cwnd) && 1437 (m = m_copym(rep->r_mreq, 0, M_COPYALL, MB_DONTWAIT))){ 1438 if ((nmp->nm_flag & NFSMNT_NOCONN) == 0) 1439 error = so_pru_send(so, 0, m, (struct sockaddr *)0, 1440 (struct mbuf *)0, td); 1441 else 1442 error = so_pru_send(so, 0, m, nmp->nm_nam, 1443 (struct mbuf *)0, td); 1444 if (error) { 1445 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) 1446 so->so_error = 0; 1447 } else { 1448 /* 1449 * Iff first send, start timing 1450 * else turn timing off, backoff timer 1451 * and divide congestion window by 2. 1452 */ 1453 if (rep->r_flags & R_SENT) { 1454 rep->r_flags &= ~R_TIMING; 1455 if (++rep->r_rexmit > NFS_MAXREXMIT) 1456 rep->r_rexmit = NFS_MAXREXMIT; 1457 nmp->nm_cwnd >>= 1; 1458 if (nmp->nm_cwnd < NFS_CWNDSCALE) 1459 nmp->nm_cwnd = NFS_CWNDSCALE; 1460 nfsstats.rpcretries++; 1461 } else { 1462 rep->r_flags |= R_SENT; 1463 nmp->nm_sent += NFS_CWNDSCALE; 1464 } 1465 rep->r_rtt = 0; 1466 } 1467 } 1468 } 1469 #ifndef NFS_NOSERVER 1470 /* 1471 * Call the nqnfs server timer once a second to handle leases. 1472 */ 1473 if (lasttime != time_second) { 1474 lasttime = time_second; 1475 nqnfs_serverd(); 1476 } 1477 1478 /* 1479 * Scan the write gathering queues for writes that need to be 1480 * completed now. 1481 */ 1482 cur_usec = nfs_curusec(); 1483 TAILQ_FOREACH(slp, &nfssvc_sockhead, ns_chain) { 1484 if (slp->ns_tq.lh_first && slp->ns_tq.lh_first->nd_time<=cur_usec) 1485 nfsrv_wakenfsd(slp, 1); 1486 } 1487 #endif /* NFS_NOSERVER */ 1488 crit_exit(); 1489 callout_reset(&nfs_timer_handle, nfs_ticks, nfs_timer, NULL); 1490 } 1491 1492 /* 1493 * Mark all of an nfs mount's outstanding requests with R_SOFTTERM and 1494 * wait for all requests to complete. This is used by forced unmounts 1495 * to terminate any outstanding RPCs. 1496 */ 1497 int 1498 nfs_nmcancelreqs(struct nfsmount *nmp) 1499 { 1500 struct nfsreq *req; 1501 int i; 1502 1503 crit_enter(); 1504 TAILQ_FOREACH(req, &nfs_reqq, r_chain) { 1505 if (nmp != req->r_nmp || req->r_mrep != NULL || 1506 (req->r_flags & R_SOFTTERM)) 1507 continue; 1508 nfs_softterm(req); 1509 } 1510 crit_exit(); 1511 1512 for (i = 0; i < 30; i++) { 1513 crit_enter(); 1514 TAILQ_FOREACH(req, &nfs_reqq, r_chain) { 1515 if (nmp == req->r_nmp) 1516 break; 1517 } 1518 crit_exit(); 1519 if (req == NULL) 1520 return (0); 1521 tsleep(&lbolt, 0, "nfscancel", 0); 1522 } 1523 return (EBUSY); 1524 } 1525 1526 /* 1527 * Flag a request as being about to terminate (due to NFSMNT_INT/NFSMNT_SOFT). 1528 * The nm_send count is decremented now to avoid deadlocks when the process in 1529 * soreceive() hasn't yet managed to send its own request. 1530 * 1531 * This routine must be called at splsoftclock() to protect r_flags and 1532 * nm_sent. 1533 */ 1534 1535 static void 1536 nfs_softterm(struct nfsreq *rep) 1537 { 1538 rep->r_flags |= R_SOFTTERM; 1539 1540 if (rep->r_flags & R_SENT) { 1541 rep->r_nmp->nm_sent -= NFS_CWNDSCALE; 1542 rep->r_flags &= ~R_SENT; 1543 } 1544 } 1545 1546 /* 1547 * Test for a termination condition pending on the process. 1548 * This is used for NFSMNT_INT mounts. 1549 */ 1550 int 1551 nfs_sigintr(struct nfsmount *nmp, struct nfsreq *rep, struct thread *td) 1552 { 1553 sigset_t tmpset; 1554 struct proc *p; 1555 1556 if (rep && (rep->r_flags & R_SOFTTERM)) 1557 return (EINTR); 1558 /* Terminate all requests while attempting a forced unmount. */ 1559 if (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF) 1560 return (EINTR); 1561 if (!(nmp->nm_flag & NFSMNT_INT)) 1562 return (0); 1563 /* td might be NULL YYY */ 1564 if (td == NULL || (p = td->td_proc) == NULL) 1565 return (0); 1566 1567 tmpset = p->p_siglist; 1568 SIGSETNAND(tmpset, p->p_sigmask); 1569 SIGSETNAND(tmpset, p->p_sigignore); 1570 if (SIGNOTEMPTY(p->p_siglist) && NFSINT_SIGMASK(tmpset)) 1571 return (EINTR); 1572 1573 return (0); 1574 } 1575 1576 /* 1577 * Lock a socket against others. 1578 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply 1579 * and also to avoid race conditions between the processes with nfs requests 1580 * in progress when a reconnect is necessary. 1581 */ 1582 int 1583 nfs_sndlock(struct nfsreq *rep) 1584 { 1585 int *statep = &rep->r_nmp->nm_state; 1586 struct thread *td; 1587 int slptimeo; 1588 int slpflag; 1589 int error; 1590 1591 slpflag = 0; 1592 slptimeo = 0; 1593 td = rep->r_td; 1594 if (rep->r_nmp->nm_flag & NFSMNT_INT) 1595 slpflag = PCATCH; 1596 1597 error = 0; 1598 crit_enter(); 1599 while (*statep & NFSSTA_SNDLOCK) { 1600 *statep |= NFSSTA_WANTSND; 1601 if (nfs_sigintr(rep->r_nmp, rep, td)) { 1602 error = EINTR; 1603 break; 1604 } 1605 tsleep((caddr_t)statep, slpflag, "nfsndlck", slptimeo); 1606 if (slpflag == PCATCH) { 1607 slpflag = 0; 1608 slptimeo = 2 * hz; 1609 } 1610 } 1611 /* Always fail if our request has been cancelled. */ 1612 if ((rep->r_flags & R_SOFTTERM)) 1613 error = EINTR; 1614 if (error == 0) 1615 *statep |= NFSSTA_SNDLOCK; 1616 crit_exit(); 1617 return (error); 1618 } 1619 1620 /* 1621 * Unlock the stream socket for others. 1622 */ 1623 void 1624 nfs_sndunlock(struct nfsreq *rep) 1625 { 1626 int *statep = &rep->r_nmp->nm_state; 1627 1628 if ((*statep & NFSSTA_SNDLOCK) == 0) 1629 panic("nfs sndunlock"); 1630 crit_enter(); 1631 *statep &= ~NFSSTA_SNDLOCK; 1632 if (*statep & NFSSTA_WANTSND) { 1633 *statep &= ~NFSSTA_WANTSND; 1634 wakeup((caddr_t)statep); 1635 } 1636 crit_exit(); 1637 } 1638 1639 static int 1640 nfs_rcvlock(struct nfsreq *rep) 1641 { 1642 int *statep = &rep->r_nmp->nm_state; 1643 int slpflag; 1644 int slptimeo; 1645 int error; 1646 1647 /* 1648 * Unconditionally check for completion in case another nfsiod 1649 * get the packet while the caller was blocked, before the caller 1650 * called us. Packet reception is handled by mainline code which 1651 * is protected by the BGL at the moment. 1652 * 1653 * We do not strictly need the second check just before the 1654 * tsleep(), but it's good defensive programming. 1655 */ 1656 if (rep->r_mrep != NULL) 1657 return (EALREADY); 1658 1659 if (rep->r_nmp->nm_flag & NFSMNT_INT) 1660 slpflag = PCATCH; 1661 else 1662 slpflag = 0; 1663 slptimeo = 0; 1664 error = 0; 1665 crit_enter(); 1666 while (*statep & NFSSTA_RCVLOCK) { 1667 if (nfs_sigintr(rep->r_nmp, rep, rep->r_td)) { 1668 error = EINTR; 1669 break; 1670 } 1671 if (rep->r_mrep != NULL) { 1672 error = EALREADY; 1673 break; 1674 } 1675 *statep |= NFSSTA_WANTRCV; 1676 tsleep((caddr_t)statep, slpflag, "nfsrcvlk", slptimeo); 1677 /* 1678 * If our reply was recieved while we were sleeping, 1679 * then just return without taking the lock to avoid a 1680 * situation where a single iod could 'capture' the 1681 * recieve lock. 1682 */ 1683 if (rep->r_mrep != NULL) { 1684 error = EALREADY; 1685 break; 1686 } 1687 if (slpflag == PCATCH) { 1688 slpflag = 0; 1689 slptimeo = 2 * hz; 1690 } 1691 } 1692 if (error == 0) { 1693 *statep |= NFSSTA_RCVLOCK; 1694 rep->r_nmp->nm_rcvlock_td = curthread; /* DEBUGGING */ 1695 } 1696 crit_exit(); 1697 return (error); 1698 } 1699 1700 /* 1701 * Unlock the stream socket for others. 1702 */ 1703 static void 1704 nfs_rcvunlock(struct nfsreq *rep) 1705 { 1706 int *statep = &rep->r_nmp->nm_state; 1707 1708 if ((*statep & NFSSTA_RCVLOCK) == 0) 1709 panic("nfs rcvunlock"); 1710 crit_enter(); 1711 rep->r_nmp->nm_rcvlock_td = (void *)-1; /* DEBUGGING */ 1712 *statep &= ~NFSSTA_RCVLOCK; 1713 if (*statep & NFSSTA_WANTRCV) { 1714 *statep &= ~NFSSTA_WANTRCV; 1715 wakeup((caddr_t)statep); 1716 } 1717 crit_exit(); 1718 } 1719 1720 /* 1721 * nfs_realign: 1722 * 1723 * Check for badly aligned mbuf data and realign by copying the unaligned 1724 * portion of the data into a new mbuf chain and freeing the portions 1725 * of the old chain that were replaced. 1726 * 1727 * We cannot simply realign the data within the existing mbuf chain 1728 * because the underlying buffers may contain other rpc commands and 1729 * we cannot afford to overwrite them. 1730 * 1731 * We would prefer to avoid this situation entirely. The situation does 1732 * not occur with NFS/UDP and is supposed to only occassionally occur 1733 * with TCP. Use vfs.nfs.realign_count and realign_test to check this. 1734 */ 1735 static void 1736 nfs_realign(struct mbuf **pm, int hsiz) 1737 { 1738 struct mbuf *m; 1739 struct mbuf *n = NULL; 1740 int off = 0; 1741 1742 ++nfs_realign_test; 1743 1744 while ((m = *pm) != NULL) { 1745 if ((m->m_len & 0x3) || (mtod(m, intptr_t) & 0x3)) { 1746 n = m_getl(m->m_len, MB_WAIT, MT_DATA, 0, NULL); 1747 n->m_len = 0; 1748 break; 1749 } 1750 pm = &m->m_next; 1751 } 1752 1753 /* 1754 * If n is non-NULL, loop on m copying data, then replace the 1755 * portion of the chain that had to be realigned. 1756 */ 1757 if (n != NULL) { 1758 ++nfs_realign_count; 1759 while (m) { 1760 m_copyback(n, off, m->m_len, mtod(m, caddr_t)); 1761 off += m->m_len; 1762 m = m->m_next; 1763 } 1764 m_freem(*pm); 1765 *pm = n; 1766 } 1767 } 1768 1769 #ifndef NFS_NOSERVER 1770 1771 /* 1772 * Parse an RPC request 1773 * - verify it 1774 * - fill in the cred struct. 1775 */ 1776 int 1777 nfs_getreq(struct nfsrv_descript *nd, struct nfsd *nfsd, int has_header) 1778 { 1779 int len, i; 1780 u_int32_t *tl; 1781 int32_t t1; 1782 struct uio uio; 1783 struct iovec iov; 1784 caddr_t dpos, cp2, cp; 1785 u_int32_t nfsvers, auth_type; 1786 uid_t nickuid; 1787 int error = 0, nqnfs = 0, ticklen; 1788 struct mbuf *mrep, *md; 1789 struct nfsuid *nuidp; 1790 struct timeval tvin, tvout; 1791 #if 0 /* until encrypted keys are implemented */ 1792 NFSKERBKEYSCHED_T keys; /* stores key schedule */ 1793 #endif 1794 1795 mrep = nd->nd_mrep; 1796 md = nd->nd_md; 1797 dpos = nd->nd_dpos; 1798 if (has_header) { 1799 nfsm_dissect(tl, u_int32_t *, 10 * NFSX_UNSIGNED); 1800 nd->nd_retxid = fxdr_unsigned(u_int32_t, *tl++); 1801 if (*tl++ != rpc_call) { 1802 m_freem(mrep); 1803 return (EBADRPC); 1804 } 1805 } else 1806 nfsm_dissect(tl, u_int32_t *, 8 * NFSX_UNSIGNED); 1807 nd->nd_repstat = 0; 1808 nd->nd_flag = 0; 1809 if (*tl++ != rpc_vers) { 1810 nd->nd_repstat = ERPCMISMATCH; 1811 nd->nd_procnum = NFSPROC_NOOP; 1812 return (0); 1813 } 1814 if (*tl != nfs_prog) { 1815 if (*tl == nqnfs_prog) 1816 nqnfs++; 1817 else { 1818 nd->nd_repstat = EPROGUNAVAIL; 1819 nd->nd_procnum = NFSPROC_NOOP; 1820 return (0); 1821 } 1822 } 1823 tl++; 1824 nfsvers = fxdr_unsigned(u_int32_t, *tl++); 1825 if (((nfsvers < NFS_VER2 || nfsvers > NFS_VER3) && !nqnfs) || 1826 (nfsvers != NQNFS_VER3 && nqnfs)) { 1827 nd->nd_repstat = EPROGMISMATCH; 1828 nd->nd_procnum = NFSPROC_NOOP; 1829 return (0); 1830 } 1831 if (nqnfs) 1832 nd->nd_flag = (ND_NFSV3 | ND_NQNFS); 1833 else if (nfsvers == NFS_VER3) 1834 nd->nd_flag = ND_NFSV3; 1835 nd->nd_procnum = fxdr_unsigned(u_int32_t, *tl++); 1836 if (nd->nd_procnum == NFSPROC_NULL) 1837 return (0); 1838 if (nd->nd_procnum >= NFS_NPROCS || 1839 (!nqnfs && nd->nd_procnum >= NQNFSPROC_GETLEASE) || 1840 (!nd->nd_flag && nd->nd_procnum > NFSV2PROC_STATFS)) { 1841 nd->nd_repstat = EPROCUNAVAIL; 1842 nd->nd_procnum = NFSPROC_NOOP; 1843 return (0); 1844 } 1845 if ((nd->nd_flag & ND_NFSV3) == 0) 1846 nd->nd_procnum = nfsv3_procid[nd->nd_procnum]; 1847 auth_type = *tl++; 1848 len = fxdr_unsigned(int, *tl++); 1849 if (len < 0 || len > RPCAUTH_MAXSIZ) { 1850 m_freem(mrep); 1851 return (EBADRPC); 1852 } 1853 1854 nd->nd_flag &= ~ND_KERBAUTH; 1855 /* 1856 * Handle auth_unix or auth_kerb. 1857 */ 1858 if (auth_type == rpc_auth_unix) { 1859 len = fxdr_unsigned(int, *++tl); 1860 if (len < 0 || len > NFS_MAXNAMLEN) { 1861 m_freem(mrep); 1862 return (EBADRPC); 1863 } 1864 nfsm_adv(nfsm_rndup(len)); 1865 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 1866 bzero((caddr_t)&nd->nd_cr, sizeof (struct ucred)); 1867 nd->nd_cr.cr_ref = 1; 1868 nd->nd_cr.cr_uid = fxdr_unsigned(uid_t, *tl++); 1869 nd->nd_cr.cr_gid = fxdr_unsigned(gid_t, *tl++); 1870 len = fxdr_unsigned(int, *tl); 1871 if (len < 0 || len > RPCAUTH_UNIXGIDS) { 1872 m_freem(mrep); 1873 return (EBADRPC); 1874 } 1875 nfsm_dissect(tl, u_int32_t *, (len + 2) * NFSX_UNSIGNED); 1876 for (i = 1; i <= len; i++) 1877 if (i < NGROUPS) 1878 nd->nd_cr.cr_groups[i] = fxdr_unsigned(gid_t, *tl++); 1879 else 1880 tl++; 1881 nd->nd_cr.cr_ngroups = (len >= NGROUPS) ? NGROUPS : (len + 1); 1882 if (nd->nd_cr.cr_ngroups > 1) 1883 nfsrvw_sort(nd->nd_cr.cr_groups, nd->nd_cr.cr_ngroups); 1884 len = fxdr_unsigned(int, *++tl); 1885 if (len < 0 || len > RPCAUTH_MAXSIZ) { 1886 m_freem(mrep); 1887 return (EBADRPC); 1888 } 1889 if (len > 0) 1890 nfsm_adv(nfsm_rndup(len)); 1891 } else if (auth_type == rpc_auth_kerb) { 1892 switch (fxdr_unsigned(int, *tl++)) { 1893 case RPCAKN_FULLNAME: 1894 ticklen = fxdr_unsigned(int, *tl); 1895 *((u_int32_t *)nfsd->nfsd_authstr) = *tl; 1896 uio.uio_resid = nfsm_rndup(ticklen) + NFSX_UNSIGNED; 1897 nfsd->nfsd_authlen = uio.uio_resid + NFSX_UNSIGNED; 1898 if (uio.uio_resid > (len - 2 * NFSX_UNSIGNED)) { 1899 m_freem(mrep); 1900 return (EBADRPC); 1901 } 1902 uio.uio_offset = 0; 1903 uio.uio_iov = &iov; 1904 uio.uio_iovcnt = 1; 1905 uio.uio_segflg = UIO_SYSSPACE; 1906 iov.iov_base = (caddr_t)&nfsd->nfsd_authstr[4]; 1907 iov.iov_len = RPCAUTH_MAXSIZ - 4; 1908 nfsm_mtouio(&uio, uio.uio_resid); 1909 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 1910 if (*tl++ != rpc_auth_kerb || 1911 fxdr_unsigned(int, *tl) != 4 * NFSX_UNSIGNED) { 1912 printf("Bad kerb verifier\n"); 1913 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF); 1914 nd->nd_procnum = NFSPROC_NOOP; 1915 return (0); 1916 } 1917 nfsm_dissect(cp, caddr_t, 4 * NFSX_UNSIGNED); 1918 tl = (u_int32_t *)cp; 1919 if (fxdr_unsigned(int, *tl) != RPCAKN_FULLNAME) { 1920 printf("Not fullname kerb verifier\n"); 1921 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF); 1922 nd->nd_procnum = NFSPROC_NOOP; 1923 return (0); 1924 } 1925 cp += NFSX_UNSIGNED; 1926 bcopy(cp, nfsd->nfsd_verfstr, 3 * NFSX_UNSIGNED); 1927 nfsd->nfsd_verflen = 3 * NFSX_UNSIGNED; 1928 nd->nd_flag |= ND_KERBFULL; 1929 nfsd->nfsd_flag |= NFSD_NEEDAUTH; 1930 break; 1931 case RPCAKN_NICKNAME: 1932 if (len != 2 * NFSX_UNSIGNED) { 1933 printf("Kerb nickname short\n"); 1934 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADCRED); 1935 nd->nd_procnum = NFSPROC_NOOP; 1936 return (0); 1937 } 1938 nickuid = fxdr_unsigned(uid_t, *tl); 1939 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 1940 if (*tl++ != rpc_auth_kerb || 1941 fxdr_unsigned(int, *tl) != 3 * NFSX_UNSIGNED) { 1942 printf("Kerb nick verifier bad\n"); 1943 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF); 1944 nd->nd_procnum = NFSPROC_NOOP; 1945 return (0); 1946 } 1947 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 1948 tvin.tv_sec = *tl++; 1949 tvin.tv_usec = *tl; 1950 1951 for (nuidp = NUIDHASH(nfsd->nfsd_slp,nickuid)->lh_first; 1952 nuidp != 0; nuidp = nuidp->nu_hash.le_next) { 1953 if (nuidp->nu_cr.cr_uid == nickuid && 1954 (!nd->nd_nam2 || 1955 netaddr_match(NU_NETFAM(nuidp), 1956 &nuidp->nu_haddr, nd->nd_nam2))) 1957 break; 1958 } 1959 if (!nuidp) { 1960 nd->nd_repstat = 1961 (NFSERR_AUTHERR|AUTH_REJECTCRED); 1962 nd->nd_procnum = NFSPROC_NOOP; 1963 return (0); 1964 } 1965 1966 /* 1967 * Now, decrypt the timestamp using the session key 1968 * and validate it. 1969 */ 1970 #ifdef NFSKERB 1971 XXX 1972 #endif 1973 1974 tvout.tv_sec = fxdr_unsigned(long, tvout.tv_sec); 1975 tvout.tv_usec = fxdr_unsigned(long, tvout.tv_usec); 1976 if (nuidp->nu_expire < time_second || 1977 nuidp->nu_timestamp.tv_sec > tvout.tv_sec || 1978 (nuidp->nu_timestamp.tv_sec == tvout.tv_sec && 1979 nuidp->nu_timestamp.tv_usec > tvout.tv_usec)) { 1980 nuidp->nu_expire = 0; 1981 nd->nd_repstat = 1982 (NFSERR_AUTHERR|AUTH_REJECTVERF); 1983 nd->nd_procnum = NFSPROC_NOOP; 1984 return (0); 1985 } 1986 nfsrv_setcred(&nuidp->nu_cr, &nd->nd_cr); 1987 nd->nd_flag |= ND_KERBNICK; 1988 }; 1989 } else { 1990 nd->nd_repstat = (NFSERR_AUTHERR | AUTH_REJECTCRED); 1991 nd->nd_procnum = NFSPROC_NOOP; 1992 return (0); 1993 } 1994 1995 /* 1996 * For nqnfs, get piggybacked lease request. 1997 */ 1998 if (nqnfs && nd->nd_procnum != NQNFSPROC_EVICTED) { 1999 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2000 nd->nd_flag |= fxdr_unsigned(int, *tl); 2001 if (nd->nd_flag & ND_LEASE) { 2002 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2003 nd->nd_duration = fxdr_unsigned(int32_t, *tl); 2004 } else 2005 nd->nd_duration = NQ_MINLEASE; 2006 } else 2007 nd->nd_duration = NQ_MINLEASE; 2008 nd->nd_md = md; 2009 nd->nd_dpos = dpos; 2010 return (0); 2011 nfsmout: 2012 return (error); 2013 } 2014 2015 #endif 2016 2017 /* 2018 * Send a message to the originating process's terminal. The thread and/or 2019 * process may be NULL. YYY the thread should not be NULL but there may 2020 * still be some uio_td's that are still being passed as NULL through to 2021 * nfsm_request(). 2022 */ 2023 static int 2024 nfs_msg(struct thread *td, char *server, char *msg) 2025 { 2026 tpr_t tpr; 2027 2028 if (td && td->td_proc) 2029 tpr = tprintf_open(td->td_proc); 2030 else 2031 tpr = NULL; 2032 tprintf(tpr, "nfs server %s: %s\n", server, msg); 2033 tprintf_close(tpr); 2034 return (0); 2035 } 2036 2037 #ifndef NFS_NOSERVER 2038 /* 2039 * Socket upcall routine for the nfsd sockets. 2040 * The caddr_t arg is a pointer to the "struct nfssvc_sock". 2041 * Essentially do as much as possible non-blocking, else punt and it will 2042 * be called with MB_WAIT from an nfsd. 2043 */ 2044 void 2045 nfsrv_rcv(struct socket *so, void *arg, int waitflag) 2046 { 2047 struct nfssvc_sock *slp = (struct nfssvc_sock *)arg; 2048 struct mbuf *m; 2049 struct mbuf *mp; 2050 struct sockaddr *nam; 2051 struct uio auio; 2052 int flags, error; 2053 int nparallel_wakeup = 0; 2054 2055 if ((slp->ns_flag & SLP_VALID) == 0) 2056 return; 2057 2058 /* 2059 * Do not allow an infinite number of completed RPC records to build 2060 * up before we stop reading data from the socket. Otherwise we could 2061 * end up holding onto an unreasonable number of mbufs for requests 2062 * waiting for service. 2063 * 2064 * This should give pretty good feedback to the TCP 2065 * layer and prevents a memory crunch for other protocols. 2066 * 2067 * Note that the same service socket can be dispatched to several 2068 * nfs servers simultaniously. 2069 * 2070 * the tcp protocol callback calls us with MB_DONTWAIT. 2071 * nfsd calls us with MB_WAIT (typically). 2072 */ 2073 if (waitflag == MB_DONTWAIT && slp->ns_numrec >= nfsd_waiting / 2 + 1) { 2074 slp->ns_flag |= SLP_NEEDQ; 2075 goto dorecs; 2076 } 2077 2078 /* 2079 * Handle protocol specifics to parse an RPC request. We always 2080 * pull from the socket using non-blocking I/O. 2081 */ 2082 auio.uio_td = NULL; 2083 if (so->so_type == SOCK_STREAM) { 2084 /* 2085 * The data has to be read in an orderly fashion from a TCP 2086 * stream, unlike a UDP socket. It is possible for soreceive 2087 * and/or nfsrv_getstream() to block, so make sure only one 2088 * entity is messing around with the TCP stream at any given 2089 * moment. The receive sockbuf's lock in soreceive is not 2090 * sufficient. 2091 * 2092 * Note that this procedure can be called from any number of 2093 * NFS severs *OR* can be upcalled directly from a TCP 2094 * protocol thread. 2095 */ 2096 if (slp->ns_flag & SLP_GETSTREAM) { 2097 slp->ns_flag |= SLP_NEEDQ; 2098 goto dorecs; 2099 } 2100 slp->ns_flag |= SLP_GETSTREAM; 2101 2102 /* 2103 * Do soreceive(). 2104 */ 2105 auio.uio_resid = 1000000000; 2106 flags = MSG_DONTWAIT; 2107 error = so_pru_soreceive(so, &nam, &auio, &mp, NULL, &flags); 2108 if (error || mp == (struct mbuf *)0) { 2109 if (error == EWOULDBLOCK) 2110 slp->ns_flag |= SLP_NEEDQ; 2111 else 2112 slp->ns_flag |= SLP_DISCONN; 2113 slp->ns_flag &= ~SLP_GETSTREAM; 2114 goto dorecs; 2115 } 2116 m = mp; 2117 if (slp->ns_rawend) { 2118 slp->ns_rawend->m_next = m; 2119 slp->ns_cc += 1000000000 - auio.uio_resid; 2120 } else { 2121 slp->ns_raw = m; 2122 slp->ns_cc = 1000000000 - auio.uio_resid; 2123 } 2124 while (m->m_next) 2125 m = m->m_next; 2126 slp->ns_rawend = m; 2127 2128 /* 2129 * Now try and parse as many record(s) as we can out of the 2130 * raw stream data. 2131 */ 2132 error = nfsrv_getstream(slp, waitflag, &nparallel_wakeup); 2133 if (error) { 2134 if (error == EPERM) 2135 slp->ns_flag |= SLP_DISCONN; 2136 else 2137 slp->ns_flag |= SLP_NEEDQ; 2138 } 2139 slp->ns_flag &= ~SLP_GETSTREAM; 2140 } else { 2141 /* 2142 * For UDP soreceive typically pulls just one packet, loop 2143 * to get the whole batch. 2144 */ 2145 do { 2146 auio.uio_resid = 1000000000; 2147 flags = MSG_DONTWAIT; 2148 error = so_pru_soreceive(so, &nam, &auio, &mp, NULL, 2149 &flags); 2150 if (mp) { 2151 struct nfsrv_rec *rec; 2152 int mf = (waitflag & MB_DONTWAIT) ? 2153 M_NOWAIT : M_WAITOK; 2154 rec = malloc(sizeof(struct nfsrv_rec), 2155 M_NFSRVDESC, mf); 2156 if (!rec) { 2157 if (nam) 2158 FREE(nam, M_SONAME); 2159 m_freem(mp); 2160 continue; 2161 } 2162 nfs_realign(&mp, 10 * NFSX_UNSIGNED); 2163 rec->nr_address = nam; 2164 rec->nr_packet = mp; 2165 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link); 2166 ++slp->ns_numrec; 2167 ++nparallel_wakeup; 2168 } 2169 if (error) { 2170 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) 2171 && error != EWOULDBLOCK) { 2172 slp->ns_flag |= SLP_DISCONN; 2173 goto dorecs; 2174 } 2175 } 2176 } while (mp); 2177 } 2178 2179 /* 2180 * If we were upcalled from the tcp protocol layer and we have 2181 * fully parsed records ready to go, or there is new data pending, 2182 * or something went wrong, try to wake up an nfsd thread to deal 2183 * with it. 2184 */ 2185 dorecs: 2186 if (waitflag == MB_DONTWAIT && (slp->ns_numrec > 0 2187 || (slp->ns_flag & (SLP_NEEDQ | SLP_DISCONN)))) { 2188 nfsrv_wakenfsd(slp, nparallel_wakeup); 2189 } 2190 } 2191 2192 /* 2193 * Try and extract an RPC request from the mbuf data list received on a 2194 * stream socket. The "waitflag" argument indicates whether or not it 2195 * can sleep. 2196 */ 2197 static int 2198 nfsrv_getstream(struct nfssvc_sock *slp, int waitflag, int *countp) 2199 { 2200 struct mbuf *m, **mpp; 2201 char *cp1, *cp2; 2202 int len; 2203 struct mbuf *om, *m2, *recm; 2204 u_int32_t recmark; 2205 2206 for (;;) { 2207 if (slp->ns_reclen == 0) { 2208 if (slp->ns_cc < NFSX_UNSIGNED) 2209 return (0); 2210 m = slp->ns_raw; 2211 if (m->m_len >= NFSX_UNSIGNED) { 2212 bcopy(mtod(m, caddr_t), (caddr_t)&recmark, NFSX_UNSIGNED); 2213 m->m_data += NFSX_UNSIGNED; 2214 m->m_len -= NFSX_UNSIGNED; 2215 } else { 2216 cp1 = (caddr_t)&recmark; 2217 cp2 = mtod(m, caddr_t); 2218 while (cp1 < ((caddr_t)&recmark) + NFSX_UNSIGNED) { 2219 while (m->m_len == 0) { 2220 m = m->m_next; 2221 cp2 = mtod(m, caddr_t); 2222 } 2223 *cp1++ = *cp2++; 2224 m->m_data++; 2225 m->m_len--; 2226 } 2227 } 2228 slp->ns_cc -= NFSX_UNSIGNED; 2229 recmark = ntohl(recmark); 2230 slp->ns_reclen = recmark & ~0x80000000; 2231 if (recmark & 0x80000000) 2232 slp->ns_flag |= SLP_LASTFRAG; 2233 else 2234 slp->ns_flag &= ~SLP_LASTFRAG; 2235 if (slp->ns_reclen > NFS_MAXPACKET) { 2236 log(LOG_ERR, "%s (%d) from nfs client\n", 2237 "impossible packet length", 2238 slp->ns_reclen); 2239 return (EPERM); 2240 } 2241 } 2242 2243 /* 2244 * Now get the record part. 2245 * 2246 * Note that slp->ns_reclen may be 0. Linux sometimes 2247 * generates 0-length RPCs 2248 */ 2249 recm = NULL; 2250 if (slp->ns_cc == slp->ns_reclen) { 2251 recm = slp->ns_raw; 2252 slp->ns_raw = slp->ns_rawend = (struct mbuf *)0; 2253 slp->ns_cc = slp->ns_reclen = 0; 2254 } else if (slp->ns_cc > slp->ns_reclen) { 2255 len = 0; 2256 m = slp->ns_raw; 2257 om = (struct mbuf *)0; 2258 2259 while (len < slp->ns_reclen) { 2260 if ((len + m->m_len) > slp->ns_reclen) { 2261 m2 = m_copym(m, 0, slp->ns_reclen - len, 2262 waitflag); 2263 if (m2) { 2264 if (om) { 2265 om->m_next = m2; 2266 recm = slp->ns_raw; 2267 } else 2268 recm = m2; 2269 m->m_data += slp->ns_reclen - len; 2270 m->m_len -= slp->ns_reclen - len; 2271 len = slp->ns_reclen; 2272 } else { 2273 return (EWOULDBLOCK); 2274 } 2275 } else if ((len + m->m_len) == slp->ns_reclen) { 2276 om = m; 2277 len += m->m_len; 2278 m = m->m_next; 2279 recm = slp->ns_raw; 2280 om->m_next = (struct mbuf *)0; 2281 } else { 2282 om = m; 2283 len += m->m_len; 2284 m = m->m_next; 2285 } 2286 } 2287 slp->ns_raw = m; 2288 slp->ns_cc -= len; 2289 slp->ns_reclen = 0; 2290 } else { 2291 return (0); 2292 } 2293 2294 /* 2295 * Accumulate the fragments into a record. 2296 */ 2297 mpp = &slp->ns_frag; 2298 while (*mpp) 2299 mpp = &((*mpp)->m_next); 2300 *mpp = recm; 2301 if (slp->ns_flag & SLP_LASTFRAG) { 2302 struct nfsrv_rec *rec; 2303 int mf = (waitflag & MB_DONTWAIT) ? M_NOWAIT : M_WAITOK; 2304 rec = malloc(sizeof(struct nfsrv_rec), M_NFSRVDESC, mf); 2305 if (!rec) { 2306 m_freem(slp->ns_frag); 2307 } else { 2308 nfs_realign(&slp->ns_frag, 10 * NFSX_UNSIGNED); 2309 rec->nr_address = (struct sockaddr *)0; 2310 rec->nr_packet = slp->ns_frag; 2311 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link); 2312 ++slp->ns_numrec; 2313 ++*countp; 2314 } 2315 slp->ns_frag = (struct mbuf *)0; 2316 } 2317 } 2318 } 2319 2320 /* 2321 * Parse an RPC header. 2322 */ 2323 int 2324 nfsrv_dorec(struct nfssvc_sock *slp, struct nfsd *nfsd, 2325 struct nfsrv_descript **ndp) 2326 { 2327 struct nfsrv_rec *rec; 2328 struct mbuf *m; 2329 struct sockaddr *nam; 2330 struct nfsrv_descript *nd; 2331 int error; 2332 2333 *ndp = NULL; 2334 if ((slp->ns_flag & SLP_VALID) == 0 || !STAILQ_FIRST(&slp->ns_rec)) 2335 return (ENOBUFS); 2336 rec = STAILQ_FIRST(&slp->ns_rec); 2337 STAILQ_REMOVE_HEAD(&slp->ns_rec, nr_link); 2338 KKASSERT(slp->ns_numrec > 0); 2339 --slp->ns_numrec; 2340 nam = rec->nr_address; 2341 m = rec->nr_packet; 2342 free(rec, M_NFSRVDESC); 2343 MALLOC(nd, struct nfsrv_descript *, sizeof (struct nfsrv_descript), 2344 M_NFSRVDESC, M_WAITOK); 2345 nd->nd_md = nd->nd_mrep = m; 2346 nd->nd_nam2 = nam; 2347 nd->nd_dpos = mtod(m, caddr_t); 2348 error = nfs_getreq(nd, nfsd, TRUE); 2349 if (error) { 2350 if (nam) { 2351 FREE(nam, M_SONAME); 2352 } 2353 free((caddr_t)nd, M_NFSRVDESC); 2354 return (error); 2355 } 2356 *ndp = nd; 2357 nfsd->nfsd_nd = nd; 2358 return (0); 2359 } 2360 2361 /* 2362 * Try to assign service sockets to nfsd threads based on the number 2363 * of new rpc requests that have been queued on the service socket. 2364 * 2365 * If no nfsd's are available or additonal requests are pending, set the 2366 * NFSD_CHECKSLP flag so that one of the running nfsds will go look for 2367 * the work in the nfssvc_sock list when it is finished processing its 2368 * current work. This flag is only cleared when an nfsd can not find 2369 * any new work to perform. 2370 */ 2371 void 2372 nfsrv_wakenfsd(struct nfssvc_sock *slp, int nparallel) 2373 { 2374 struct nfsd *nd; 2375 2376 if ((slp->ns_flag & SLP_VALID) == 0) 2377 return; 2378 if (nparallel <= 1) 2379 nparallel = 1; 2380 TAILQ_FOREACH(nd, &nfsd_head, nfsd_chain) { 2381 if (nd->nfsd_flag & NFSD_WAITING) { 2382 nd->nfsd_flag &= ~NFSD_WAITING; 2383 if (nd->nfsd_slp) 2384 panic("nfsd wakeup"); 2385 slp->ns_sref++; 2386 nd->nfsd_slp = slp; 2387 wakeup((caddr_t)nd); 2388 if (--nparallel == 0) 2389 break; 2390 } 2391 } 2392 if (nparallel) { 2393 slp->ns_flag |= SLP_DOREC; 2394 nfsd_head_flag |= NFSD_CHECKSLP; 2395 } 2396 } 2397 #endif /* NFS_NOSERVER */ 2398