1 /* 2 * Copyright (c) 1989, 1991, 1993, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95 37 * $FreeBSD: src/sys/nfs/nfs_socket.c,v 1.60.2.6 2003/03/26 01:44:46 alfred Exp $ 38 * $DragonFly: src/sys/vfs/nfs/nfs_socket.c,v 1.10 2003/09/03 14:30:57 hmp Exp $ 39 */ 40 41 /* 42 * Socket operations for use by nfs 43 */ 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/proc.h> 48 #include <sys/malloc.h> 49 #include <sys/mount.h> 50 #include <sys/kernel.h> 51 #include <sys/mbuf.h> 52 #include <sys/vnode.h> 53 #include <sys/protosw.h> 54 #include <sys/socket.h> 55 #include <sys/socketvar.h> 56 #include <sys/syslog.h> 57 #include <sys/tprintf.h> 58 #include <sys/sysctl.h> 59 #include <sys/signalvar.h> 60 61 #include <netinet/in.h> 62 #include <netinet/tcp.h> 63 64 #include "rpcv2.h" 65 #include "nfsproto.h" 66 #include "nfs.h" 67 #include "xdr_subs.h" 68 #include "nfsm_subs.h" 69 #include "nfsmount.h" 70 #include "nfsnode.h" 71 #include "nfsrtt.h" 72 #include "nqnfs.h" 73 74 #define TRUE 1 75 #define FALSE 0 76 77 /* 78 * Estimate rto for an nfs rpc sent via. an unreliable datagram. 79 * Use the mean and mean deviation of rtt for the appropriate type of rpc 80 * for the frequent rpcs and a default for the others. 81 * The justification for doing "other" this way is that these rpcs 82 * happen so infrequently that timer est. would probably be stale. 83 * Also, since many of these rpcs are 84 * non-idempotent, a conservative timeout is desired. 85 * getattr, lookup - A+2D 86 * read, write - A+4D 87 * other - nm_timeo 88 */ 89 #define NFS_RTO(n, t) \ 90 ((t) == 0 ? (n)->nm_timeo : \ 91 ((t) < 3 ? \ 92 (((((n)->nm_srtt[t-1] + 3) >> 2) + (n)->nm_sdrtt[t-1] + 1) >> 1) : \ 93 ((((n)->nm_srtt[t-1] + 7) >> 3) + (n)->nm_sdrtt[t-1] + 1))) 94 #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum] - 1] 95 #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum] - 1] 96 /* 97 * External data, mostly RPC constants in XDR form 98 */ 99 extern u_int32_t rpc_reply, rpc_msgdenied, rpc_mismatch, rpc_vers, 100 rpc_auth_unix, rpc_msgaccepted, rpc_call, rpc_autherr, 101 rpc_auth_kerb; 102 extern u_int32_t nfs_prog, nqnfs_prog; 103 extern time_t nqnfsstarttime; 104 extern struct nfsstats nfsstats; 105 extern int nfsv3_procid[NFS_NPROCS]; 106 extern int nfs_ticks; 107 108 /* 109 * Defines which timer to use for the procnum. 110 * 0 - default 111 * 1 - getattr 112 * 2 - lookup 113 * 3 - read 114 * 4 - write 115 */ 116 static int proct[NFS_NPROCS] = { 117 0, 1, 0, 2, 1, 3, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 0, 0, 0, 0, 0, 118 0, 0, 0, 119 }; 120 121 static int nfs_realign_test; 122 static int nfs_realign_count; 123 static int nfs_bufpackets = 4; 124 125 SYSCTL_DECL(_vfs_nfs); 126 127 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_test, CTLFLAG_RW, &nfs_realign_test, 0, ""); 128 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_count, CTLFLAG_RW, &nfs_realign_count, 0, ""); 129 SYSCTL_INT(_vfs_nfs, OID_AUTO, bufpackets, CTLFLAG_RW, &nfs_bufpackets, 0, ""); 130 131 132 /* 133 * There is a congestion window for outstanding rpcs maintained per mount 134 * point. The cwnd size is adjusted in roughly the way that: 135 * Van Jacobson, Congestion avoidance and Control, In "Proceedings of 136 * SIGCOMM '88". ACM, August 1988. 137 * describes for TCP. The cwnd size is chopped in half on a retransmit timeout 138 * and incremented by 1/cwnd when each rpc reply is received and a full cwnd 139 * of rpcs is in progress. 140 * (The sent count and cwnd are scaled for integer arith.) 141 * Variants of "slow start" were tried and were found to be too much of a 142 * performance hit (ave. rtt 3 times larger), 143 * I suspect due to the large rtt that nfs rpcs have. 144 */ 145 #define NFS_CWNDSCALE 256 146 #define NFS_MAXCWND (NFS_CWNDSCALE * 32) 147 static int nfs_backoff[8] = { 2, 4, 8, 16, 32, 64, 128, 256, }; 148 int nfsrtton = 0; 149 struct nfsrtt nfsrtt; 150 struct callout_handle nfs_timer_handle; 151 152 static int nfs_msg (struct thread *,char *,char *); 153 static int nfs_rcvlock (struct nfsreq *); 154 static void nfs_rcvunlock (struct nfsreq *); 155 static void nfs_realign (struct mbuf **pm, int hsiz); 156 static int nfs_receive (struct nfsreq *rep, struct sockaddr **aname, 157 struct mbuf **mp); 158 static void nfs_softterm (struct nfsreq *rep); 159 static int nfs_reconnect (struct nfsreq *rep); 160 #ifndef NFS_NOSERVER 161 static int nfsrv_getstream (struct nfssvc_sock *,int); 162 163 int (*nfsrv3_procs[NFS_NPROCS]) (struct nfsrv_descript *nd, 164 struct nfssvc_sock *slp, 165 struct thread *td, 166 struct mbuf **mreqp) = { 167 nfsrv_null, 168 nfsrv_getattr, 169 nfsrv_setattr, 170 nfsrv_lookup, 171 nfsrv3_access, 172 nfsrv_readlink, 173 nfsrv_read, 174 nfsrv_write, 175 nfsrv_create, 176 nfsrv_mkdir, 177 nfsrv_symlink, 178 nfsrv_mknod, 179 nfsrv_remove, 180 nfsrv_rmdir, 181 nfsrv_rename, 182 nfsrv_link, 183 nfsrv_readdir, 184 nfsrv_readdirplus, 185 nfsrv_statfs, 186 nfsrv_fsinfo, 187 nfsrv_pathconf, 188 nfsrv_commit, 189 nqnfsrv_getlease, 190 nqnfsrv_vacated, 191 nfsrv_noop, 192 nfsrv_noop 193 }; 194 #endif /* NFS_NOSERVER */ 195 196 /* 197 * Initialize sockets and congestion for a new NFS connection. 198 * We do not free the sockaddr if error. 199 */ 200 int 201 nfs_connect(struct nfsmount *nmp, struct nfsreq *rep) 202 { 203 struct socket *so; 204 int s, error, rcvreserve, sndreserve; 205 int pktscale; 206 struct sockaddr *saddr; 207 struct sockaddr_in *sin; 208 struct thread *td = &thread0; /* only used for socreate and sobind */ 209 210 nmp->nm_so = (struct socket *)0; 211 saddr = nmp->nm_nam; 212 error = socreate(saddr->sa_family, &nmp->nm_so, nmp->nm_sotype, 213 nmp->nm_soproto, td); 214 if (error) 215 goto bad; 216 so = nmp->nm_so; 217 nmp->nm_soflags = so->so_proto->pr_flags; 218 219 /* 220 * Some servers require that the client port be a reserved port number. 221 */ 222 if (saddr->sa_family == AF_INET && (nmp->nm_flag & NFSMNT_RESVPORT)) { 223 struct sockopt sopt; 224 int ip; 225 struct sockaddr_in ssin; 226 227 bzero(&sopt, sizeof sopt); 228 ip = IP_PORTRANGE_LOW; 229 sopt.sopt_dir = SOPT_SET; 230 sopt.sopt_level = IPPROTO_IP; 231 sopt.sopt_name = IP_PORTRANGE; 232 sopt.sopt_val = (void *)&ip; 233 sopt.sopt_valsize = sizeof(ip); 234 sopt.sopt_td = NULL; 235 error = sosetopt(so, &sopt); 236 if (error) 237 goto bad; 238 bzero(&ssin, sizeof ssin); 239 sin = &ssin; 240 sin->sin_len = sizeof (struct sockaddr_in); 241 sin->sin_family = AF_INET; 242 sin->sin_addr.s_addr = INADDR_ANY; 243 sin->sin_port = htons(0); 244 error = sobind(so, (struct sockaddr *)sin, td); 245 if (error) 246 goto bad; 247 bzero(&sopt, sizeof sopt); 248 ip = IP_PORTRANGE_DEFAULT; 249 sopt.sopt_dir = SOPT_SET; 250 sopt.sopt_level = IPPROTO_IP; 251 sopt.sopt_name = IP_PORTRANGE; 252 sopt.sopt_val = (void *)&ip; 253 sopt.sopt_valsize = sizeof(ip); 254 sopt.sopt_td = NULL; 255 error = sosetopt(so, &sopt); 256 if (error) 257 goto bad; 258 } 259 260 /* 261 * Protocols that do not require connections may be optionally left 262 * unconnected for servers that reply from a port other than NFS_PORT. 263 */ 264 if (nmp->nm_flag & NFSMNT_NOCONN) { 265 if (nmp->nm_soflags & PR_CONNREQUIRED) { 266 error = ENOTCONN; 267 goto bad; 268 } 269 } else { 270 error = soconnect(so, nmp->nm_nam, td); 271 if (error) 272 goto bad; 273 274 /* 275 * Wait for the connection to complete. Cribbed from the 276 * connect system call but with the wait timing out so 277 * that interruptible mounts don't hang here for a long time. 278 */ 279 s = splnet(); 280 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) { 281 (void) tsleep((caddr_t)&so->so_timeo, 0, 282 "nfscon", 2 * hz); 283 if ((so->so_state & SS_ISCONNECTING) && 284 so->so_error == 0 && rep && 285 (error = nfs_sigintr(nmp, rep, rep->r_td)) != 0){ 286 so->so_state &= ~SS_ISCONNECTING; 287 splx(s); 288 goto bad; 289 } 290 } 291 if (so->so_error) { 292 error = so->so_error; 293 so->so_error = 0; 294 splx(s); 295 goto bad; 296 } 297 splx(s); 298 } 299 so->so_rcv.sb_timeo = (5 * hz); 300 so->so_snd.sb_timeo = (5 * hz); 301 302 /* 303 * Get buffer reservation size from sysctl, but impose reasonable 304 * limits. 305 */ 306 pktscale = nfs_bufpackets; 307 if (pktscale < 2) 308 pktscale = 2; 309 if (pktscale > 64) 310 pktscale = 64; 311 312 if (nmp->nm_sotype == SOCK_DGRAM) { 313 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * pktscale; 314 rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) + 315 NFS_MAXPKTHDR) * pktscale; 316 } else if (nmp->nm_sotype == SOCK_SEQPACKET) { 317 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * pktscale; 318 rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) + 319 NFS_MAXPKTHDR) * pktscale; 320 } else { 321 if (nmp->nm_sotype != SOCK_STREAM) 322 panic("nfscon sotype"); 323 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 324 struct sockopt sopt; 325 int val; 326 327 bzero(&sopt, sizeof sopt); 328 sopt.sopt_level = SOL_SOCKET; 329 sopt.sopt_name = SO_KEEPALIVE; 330 sopt.sopt_val = &val; 331 sopt.sopt_valsize = sizeof val; 332 val = 1; 333 sosetopt(so, &sopt); 334 } 335 if (so->so_proto->pr_protocol == IPPROTO_TCP) { 336 struct sockopt sopt; 337 int val; 338 339 bzero(&sopt, sizeof sopt); 340 sopt.sopt_level = IPPROTO_TCP; 341 sopt.sopt_name = TCP_NODELAY; 342 sopt.sopt_val = &val; 343 sopt.sopt_valsize = sizeof val; 344 val = 1; 345 sosetopt(so, &sopt); 346 } 347 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR + 348 sizeof (u_int32_t)) * pktscale; 349 rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR + 350 sizeof (u_int32_t)) * pktscale; 351 } 352 error = soreserve(so, sndreserve, rcvreserve); 353 if (error) 354 goto bad; 355 so->so_rcv.sb_flags |= SB_NOINTR; 356 so->so_snd.sb_flags |= SB_NOINTR; 357 358 /* Initialize other non-zero congestion variables */ 359 nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] = 360 nmp->nm_srtt[3] = (NFS_TIMEO << 3); 361 nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] = 362 nmp->nm_sdrtt[3] = 0; 363 nmp->nm_cwnd = NFS_MAXCWND / 2; /* Initial send window */ 364 nmp->nm_sent = 0; 365 nmp->nm_timeouts = 0; 366 return (0); 367 368 bad: 369 nfs_disconnect(nmp); 370 return (error); 371 } 372 373 /* 374 * Reconnect routine: 375 * Called when a connection is broken on a reliable protocol. 376 * - clean up the old socket 377 * - nfs_connect() again 378 * - set R_MUSTRESEND for all outstanding requests on mount point 379 * If this fails the mount point is DEAD! 380 * nb: Must be called with the nfs_sndlock() set on the mount point. 381 */ 382 static int 383 nfs_reconnect(rep) 384 struct nfsreq *rep; 385 { 386 struct nfsreq *rp; 387 struct nfsmount *nmp = rep->r_nmp; 388 int error; 389 390 nfs_disconnect(nmp); 391 while ((error = nfs_connect(nmp, rep)) != 0) { 392 if (error == EINTR || error == ERESTART) 393 return (EINTR); 394 (void) tsleep((caddr_t)&lbolt, 0, "nfscon", 0); 395 } 396 397 /* 398 * Loop through outstanding request list and fix up all requests 399 * on old socket. 400 */ 401 for (rp = nfs_reqq.tqh_first; rp != 0; rp = rp->r_chain.tqe_next) { 402 if (rp->r_nmp == nmp) 403 rp->r_flags |= R_MUSTRESEND; 404 } 405 return (0); 406 } 407 408 /* 409 * NFS disconnect. Clean up and unlink. 410 */ 411 void 412 nfs_disconnect(nmp) 413 struct nfsmount *nmp; 414 { 415 struct socket *so; 416 417 if (nmp->nm_so) { 418 so = nmp->nm_so; 419 nmp->nm_so = (struct socket *)0; 420 soshutdown(so, 2); 421 soclose(so); 422 } 423 } 424 425 void 426 nfs_safedisconnect(nmp) 427 struct nfsmount *nmp; 428 { 429 struct nfsreq dummyreq; 430 431 bzero(&dummyreq, sizeof(dummyreq)); 432 dummyreq.r_nmp = nmp; 433 dummyreq.r_td = NULL; 434 nfs_rcvlock(&dummyreq); 435 nfs_disconnect(nmp); 436 nfs_rcvunlock(&dummyreq); 437 } 438 439 /* 440 * This is the nfs send routine. For connection based socket types, it 441 * must be called with an nfs_sndlock() on the socket. 442 * "rep == NULL" indicates that it has been called from a server. 443 * For the client side: 444 * - return EINTR if the RPC is terminated, 0 otherwise 445 * - set R_MUSTRESEND if the send fails for any reason 446 * - do any cleanup required by recoverable socket errors (?) 447 * For the server side: 448 * - return EINTR or ERESTART if interrupted by a signal 449 * - return EPIPE if a connection is lost for connection based sockets (TCP...) 450 * - do any cleanup required by recoverable socket errors (?) 451 */ 452 int 453 nfs_send(so, nam, top, rep) 454 struct socket *so; 455 struct sockaddr *nam; 456 struct mbuf *top; 457 struct nfsreq *rep; 458 { 459 struct sockaddr *sendnam; 460 int error, soflags, flags; 461 462 if (rep) { 463 if (rep->r_flags & R_SOFTTERM) { 464 m_freem(top); 465 return (EINTR); 466 } 467 if ((so = rep->r_nmp->nm_so) == NULL) { 468 rep->r_flags |= R_MUSTRESEND; 469 m_freem(top); 470 return (0); 471 } 472 rep->r_flags &= ~R_MUSTRESEND; 473 soflags = rep->r_nmp->nm_soflags; 474 } else 475 soflags = so->so_proto->pr_flags; 476 if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED)) 477 sendnam = (struct sockaddr *)0; 478 else 479 sendnam = nam; 480 if (so->so_type == SOCK_SEQPACKET) 481 flags = MSG_EOR; 482 else 483 flags = 0; 484 485 error = so->so_proto->pr_usrreqs->pru_sosend 486 (so, sendnam, 0, top, 0, flags, curthread /*XXX*/); 487 /* 488 * ENOBUFS for dgram sockets is transient and non fatal. 489 * No need to log, and no need to break a soft mount. 490 */ 491 if (error == ENOBUFS && so->so_type == SOCK_DGRAM) { 492 error = 0; 493 if (rep) /* do backoff retransmit on client */ 494 rep->r_flags |= R_MUSTRESEND; 495 } 496 497 if (error) { 498 if (rep) { 499 log(LOG_INFO, "nfs send error %d for server %s\n",error, 500 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); 501 /* 502 * Deal with errors for the client side. 503 */ 504 if (rep->r_flags & R_SOFTTERM) 505 error = EINTR; 506 else 507 rep->r_flags |= R_MUSTRESEND; 508 } else 509 log(LOG_INFO, "nfsd send error %d\n", error); 510 511 /* 512 * Handle any recoverable (soft) socket errors here. (?) 513 */ 514 if (error != EINTR && error != ERESTART && 515 error != EWOULDBLOCK && error != EPIPE) 516 error = 0; 517 } 518 return (error); 519 } 520 521 /* 522 * Receive a Sun RPC Request/Reply. For SOCK_DGRAM, the work is all 523 * done by soreceive(), but for SOCK_STREAM we must deal with the Record 524 * Mark and consolidate the data into a new mbuf list. 525 * nb: Sometimes TCP passes the data up to soreceive() in long lists of 526 * small mbufs. 527 * For SOCK_STREAM we must be very careful to read an entire record once 528 * we have read any of it, even if the system call has been interrupted. 529 */ 530 static int 531 nfs_receive(struct nfsreq *rep, struct sockaddr **aname, struct mbuf **mp) 532 { 533 struct socket *so; 534 struct uio auio; 535 struct iovec aio; 536 struct mbuf *m; 537 struct mbuf *control; 538 u_int32_t len; 539 struct sockaddr **getnam; 540 int error, sotype, rcvflg; 541 struct thread *td = curthread; /* XXX */ 542 543 /* 544 * Set up arguments for soreceive() 545 */ 546 *mp = (struct mbuf *)0; 547 *aname = (struct sockaddr *)0; 548 sotype = rep->r_nmp->nm_sotype; 549 550 /* 551 * For reliable protocols, lock against other senders/receivers 552 * in case a reconnect is necessary. 553 * For SOCK_STREAM, first get the Record Mark to find out how much 554 * more there is to get. 555 * We must lock the socket against other receivers 556 * until we have an entire rpc request/reply. 557 */ 558 if (sotype != SOCK_DGRAM) { 559 error = nfs_sndlock(rep); 560 if (error) 561 return (error); 562 tryagain: 563 /* 564 * Check for fatal errors and resending request. 565 */ 566 /* 567 * Ugh: If a reconnect attempt just happened, nm_so 568 * would have changed. NULL indicates a failed 569 * attempt that has essentially shut down this 570 * mount point. 571 */ 572 if (rep->r_mrep || (rep->r_flags & R_SOFTTERM)) { 573 nfs_sndunlock(rep); 574 return (EINTR); 575 } 576 so = rep->r_nmp->nm_so; 577 if (!so) { 578 error = nfs_reconnect(rep); 579 if (error) { 580 nfs_sndunlock(rep); 581 return (error); 582 } 583 goto tryagain; 584 } 585 while (rep->r_flags & R_MUSTRESEND) { 586 m = m_copym(rep->r_mreq, 0, M_COPYALL, M_WAIT); 587 nfsstats.rpcretries++; 588 error = nfs_send(so, rep->r_nmp->nm_nam, m, rep); 589 if (error) { 590 if (error == EINTR || error == ERESTART || 591 (error = nfs_reconnect(rep)) != 0) { 592 nfs_sndunlock(rep); 593 return (error); 594 } 595 goto tryagain; 596 } 597 } 598 nfs_sndunlock(rep); 599 if (sotype == SOCK_STREAM) { 600 aio.iov_base = (caddr_t) &len; 601 aio.iov_len = sizeof(u_int32_t); 602 auio.uio_iov = &aio; 603 auio.uio_iovcnt = 1; 604 auio.uio_segflg = UIO_SYSSPACE; 605 auio.uio_rw = UIO_READ; 606 auio.uio_offset = 0; 607 auio.uio_resid = sizeof(u_int32_t); 608 auio.uio_td = td; 609 do { 610 rcvflg = MSG_WAITALL; 611 error = so->so_proto->pr_usrreqs->pru_soreceive 612 (so, (struct sockaddr **)0, &auio, 613 (struct mbuf **)0, (struct mbuf **)0, 614 &rcvflg); 615 if (error == EWOULDBLOCK && rep) { 616 if (rep->r_flags & R_SOFTTERM) 617 return (EINTR); 618 } 619 } while (error == EWOULDBLOCK); 620 if (!error && auio.uio_resid > 0) { 621 /* 622 * Don't log a 0 byte receive; it means 623 * that the socket has been closed, and 624 * can happen during normal operation 625 * (forcible unmount or Solaris server). 626 */ 627 if (auio.uio_resid != sizeof (u_int32_t)) 628 log(LOG_INFO, 629 "short receive (%d/%d) from nfs server %s\n", 630 (int)(sizeof(u_int32_t) - auio.uio_resid), 631 (int)sizeof(u_int32_t), 632 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); 633 error = EPIPE; 634 } 635 if (error) 636 goto errout; 637 len = ntohl(len) & ~0x80000000; 638 /* 639 * This is SERIOUS! We are out of sync with the sender 640 * and forcing a disconnect/reconnect is all I can do. 641 */ 642 if (len > NFS_MAXPACKET) { 643 log(LOG_ERR, "%s (%d) from nfs server %s\n", 644 "impossible packet length", 645 len, 646 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); 647 error = EFBIG; 648 goto errout; 649 } 650 auio.uio_resid = len; 651 do { 652 rcvflg = MSG_WAITALL; 653 error = so->so_proto->pr_usrreqs->pru_soreceive 654 (so, (struct sockaddr **)0, 655 &auio, mp, (struct mbuf **)0, &rcvflg); 656 } while (error == EWOULDBLOCK || error == EINTR || 657 error == ERESTART); 658 if (!error && auio.uio_resid > 0) { 659 if (len != auio.uio_resid) 660 log(LOG_INFO, 661 "short receive (%d/%d) from nfs server %s\n", 662 len - auio.uio_resid, len, 663 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); 664 error = EPIPE; 665 } 666 } else { 667 /* 668 * NB: Since uio_resid is big, MSG_WAITALL is ignored 669 * and soreceive() will return when it has either a 670 * control msg or a data msg. 671 * We have no use for control msg., but must grab them 672 * and then throw them away so we know what is going 673 * on. 674 */ 675 auio.uio_resid = len = 100000000; /* Anything Big */ 676 auio.uio_td = td; 677 do { 678 rcvflg = 0; 679 error = so->so_proto->pr_usrreqs->pru_soreceive 680 (so, (struct sockaddr **)0, 681 &auio, mp, &control, &rcvflg); 682 if (control) 683 m_freem(control); 684 if (error == EWOULDBLOCK && rep) { 685 if (rep->r_flags & R_SOFTTERM) 686 return (EINTR); 687 } 688 } while (error == EWOULDBLOCK || 689 (!error && *mp == NULL && control)); 690 if ((rcvflg & MSG_EOR) == 0) 691 printf("Egad!!\n"); 692 if (!error && *mp == NULL) 693 error = EPIPE; 694 len -= auio.uio_resid; 695 } 696 errout: 697 if (error && error != EINTR && error != ERESTART) { 698 m_freem(*mp); 699 *mp = (struct mbuf *)0; 700 if (error != EPIPE) 701 log(LOG_INFO, 702 "receive error %d from nfs server %s\n", 703 error, 704 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); 705 error = nfs_sndlock(rep); 706 if (!error) { 707 error = nfs_reconnect(rep); 708 if (!error) 709 goto tryagain; 710 else 711 nfs_sndunlock(rep); 712 } 713 } 714 } else { 715 if ((so = rep->r_nmp->nm_so) == NULL) 716 return (EACCES); 717 if (so->so_state & SS_ISCONNECTED) 718 getnam = (struct sockaddr **)0; 719 else 720 getnam = aname; 721 auio.uio_resid = len = 1000000; 722 auio.uio_td = td; 723 do { 724 rcvflg = 0; 725 error = so->so_proto->pr_usrreqs->pru_soreceive 726 (so, getnam, &auio, mp, 727 (struct mbuf **)0, &rcvflg); 728 if (error == EWOULDBLOCK && 729 (rep->r_flags & R_SOFTTERM)) 730 return (EINTR); 731 } while (error == EWOULDBLOCK); 732 len -= auio.uio_resid; 733 } 734 if (error) { 735 m_freem(*mp); 736 *mp = (struct mbuf *)0; 737 } 738 /* 739 * Search for any mbufs that are not a multiple of 4 bytes long 740 * or with m_data not longword aligned. 741 * These could cause pointer alignment problems, so copy them to 742 * well aligned mbufs. 743 */ 744 nfs_realign(mp, 5 * NFSX_UNSIGNED); 745 return (error); 746 } 747 748 /* 749 * Implement receipt of reply on a socket. 750 * We must search through the list of received datagrams matching them 751 * with outstanding requests using the xid, until ours is found. 752 */ 753 /* ARGSUSED */ 754 int 755 nfs_reply(myrep) 756 struct nfsreq *myrep; 757 { 758 struct nfsreq *rep; 759 struct nfsmount *nmp = myrep->r_nmp; 760 int32_t t1; 761 struct mbuf *mrep, *md; 762 struct sockaddr *nam; 763 u_int32_t rxid, *tl; 764 caddr_t dpos, cp2; 765 int error; 766 767 /* 768 * Loop around until we get our own reply 769 */ 770 for (;;) { 771 /* 772 * Lock against other receivers so that I don't get stuck in 773 * sbwait() after someone else has received my reply for me. 774 * Also necessary for connection based protocols to avoid 775 * race conditions during a reconnect. 776 * If nfs_rcvlock() returns EALREADY, that means that 777 * the reply has already been recieved by another 778 * process and we can return immediately. In this 779 * case, the lock is not taken to avoid races with 780 * other processes. 781 */ 782 error = nfs_rcvlock(myrep); 783 if (error == EALREADY) 784 return (0); 785 if (error) 786 return (error); 787 /* 788 * Get the next Rpc reply off the socket 789 */ 790 error = nfs_receive(myrep, &nam, &mrep); 791 nfs_rcvunlock(myrep); 792 if (error) { 793 794 /* 795 * Ignore routing errors on connectionless protocols?? 796 */ 797 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) { 798 nmp->nm_so->so_error = 0; 799 if (myrep->r_flags & R_GETONEREP) 800 return (0); 801 continue; 802 } 803 return (error); 804 } 805 if (nam) 806 FREE(nam, M_SONAME); 807 808 /* 809 * Get the xid and check that it is an rpc reply 810 */ 811 md = mrep; 812 dpos = mtod(md, caddr_t); 813 nfsm_dissect(tl, u_int32_t *, 2*NFSX_UNSIGNED); 814 rxid = *tl++; 815 if (*tl != rpc_reply) { 816 #ifndef NFS_NOSERVER 817 if (nmp->nm_flag & NFSMNT_NQNFS) { 818 if (nqnfs_callback(nmp, mrep, md, dpos)) 819 nfsstats.rpcinvalid++; 820 } else { 821 nfsstats.rpcinvalid++; 822 m_freem(mrep); 823 } 824 #else 825 nfsstats.rpcinvalid++; 826 m_freem(mrep); 827 #endif 828 nfsmout: 829 if (myrep->r_flags & R_GETONEREP) 830 return (0); 831 continue; 832 } 833 834 /* 835 * Loop through the request list to match up the reply 836 * Iff no match, just drop the datagram 837 */ 838 for (rep = nfs_reqq.tqh_first; rep != 0; 839 rep = rep->r_chain.tqe_next) { 840 if (rep->r_mrep == NULL && rxid == rep->r_xid) { 841 /* Found it.. */ 842 rep->r_mrep = mrep; 843 rep->r_md = md; 844 rep->r_dpos = dpos; 845 if (nfsrtton) { 846 struct rttl *rt; 847 848 rt = &nfsrtt.rttl[nfsrtt.pos]; 849 rt->proc = rep->r_procnum; 850 rt->rto = NFS_RTO(nmp, proct[rep->r_procnum]); 851 rt->sent = nmp->nm_sent; 852 rt->cwnd = nmp->nm_cwnd; 853 rt->srtt = nmp->nm_srtt[proct[rep->r_procnum] - 1]; 854 rt->sdrtt = nmp->nm_sdrtt[proct[rep->r_procnum] - 1]; 855 rt->fsid = nmp->nm_mountp->mnt_stat.f_fsid; 856 getmicrotime(&rt->tstamp); 857 if (rep->r_flags & R_TIMING) 858 rt->rtt = rep->r_rtt; 859 else 860 rt->rtt = 1000000; 861 nfsrtt.pos = (nfsrtt.pos + 1) % NFSRTTLOGSIZ; 862 } 863 /* 864 * Update congestion window. 865 * Do the additive increase of 866 * one rpc/rtt. 867 */ 868 if (nmp->nm_cwnd <= nmp->nm_sent) { 869 nmp->nm_cwnd += 870 (NFS_CWNDSCALE * NFS_CWNDSCALE + 871 (nmp->nm_cwnd >> 1)) / nmp->nm_cwnd; 872 if (nmp->nm_cwnd > NFS_MAXCWND) 873 nmp->nm_cwnd = NFS_MAXCWND; 874 } 875 if (rep->r_flags & R_SENT) { 876 rep->r_flags &= ~R_SENT; 877 nmp->nm_sent -= NFS_CWNDSCALE; 878 } 879 /* 880 * Update rtt using a gain of 0.125 on the mean 881 * and a gain of 0.25 on the deviation. 882 */ 883 if (rep->r_flags & R_TIMING) { 884 /* 885 * Since the timer resolution of 886 * NFS_HZ is so course, it can often 887 * result in r_rtt == 0. Since 888 * r_rtt == N means that the actual 889 * rtt is between N+dt and N+2-dt ticks, 890 * add 1. 891 */ 892 t1 = rep->r_rtt + 1; 893 t1 -= (NFS_SRTT(rep) >> 3); 894 NFS_SRTT(rep) += t1; 895 if (t1 < 0) 896 t1 = -t1; 897 t1 -= (NFS_SDRTT(rep) >> 2); 898 NFS_SDRTT(rep) += t1; 899 } 900 nmp->nm_timeouts = 0; 901 break; 902 } 903 } 904 /* 905 * If not matched to a request, drop it. 906 * If it's mine, get out. 907 */ 908 if (rep == 0) { 909 nfsstats.rpcunexpected++; 910 m_freem(mrep); 911 } else if (rep == myrep) { 912 if (rep->r_mrep == NULL) 913 panic("nfsreply nil"); 914 return (0); 915 } 916 if (myrep->r_flags & R_GETONEREP) 917 return (0); 918 } 919 } 920 921 /* 922 * nfs_request - goes something like this 923 * - fill in request struct 924 * - links it into list 925 * - calls nfs_send() for first transmit 926 * - calls nfs_receive() to get reply 927 * - break down rpc header and return with nfs reply pointed to 928 * by mrep or error 929 * nb: always frees up mreq mbuf list 930 */ 931 int 932 nfs_request(vp, mrest, procnum, td, cred, mrp, mdp, dposp) 933 struct vnode *vp; 934 struct mbuf *mrest; 935 int procnum; 936 struct thread *td; 937 struct ucred *cred; 938 struct mbuf **mrp; 939 struct mbuf **mdp; 940 caddr_t *dposp; 941 { 942 struct mbuf *mrep, *m2; 943 struct nfsreq *rep; 944 u_int32_t *tl; 945 int i; 946 struct nfsmount *nmp; 947 struct mbuf *m, *md, *mheadend; 948 struct nfsnode *np; 949 char nickv[RPCX_NICKVERF]; 950 time_t reqtime, waituntil; 951 caddr_t dpos, cp2; 952 int t1, nqlflag, cachable, s, error = 0, mrest_len, auth_len, auth_type; 953 int trylater_delay = NQ_TRYLATERDEL, trylater_cnt = 0, failed_auth = 0; 954 int verf_len, verf_type; 955 u_int32_t xid; 956 u_quad_t frev; 957 char *auth_str, *verf_str; 958 NFSKERBKEY_T key; /* save session key */ 959 960 /* Reject requests while attempting a forced unmount. */ 961 if (vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF) { 962 m_freem(mrest); 963 return (ESTALE); 964 } 965 nmp = VFSTONFS(vp->v_mount); 966 MALLOC(rep, struct nfsreq *, sizeof(struct nfsreq), M_NFSREQ, M_WAITOK); 967 rep->r_nmp = nmp; 968 rep->r_vp = vp; 969 rep->r_td = td; 970 rep->r_procnum = procnum; 971 i = 0; 972 m = mrest; 973 while (m) { 974 i += m->m_len; 975 m = m->m_next; 976 } 977 mrest_len = i; 978 979 /* 980 * Get the RPC header with authorization. 981 */ 982 kerbauth: 983 verf_str = auth_str = (char *)0; 984 if (nmp->nm_flag & NFSMNT_KERB) { 985 verf_str = nickv; 986 verf_len = sizeof (nickv); 987 auth_type = RPCAUTH_KERB4; 988 bzero((caddr_t)key, sizeof (key)); 989 if (failed_auth || nfs_getnickauth(nmp, cred, &auth_str, 990 &auth_len, verf_str, verf_len)) { 991 error = nfs_getauth(nmp, rep, cred, &auth_str, 992 &auth_len, verf_str, &verf_len, key); 993 if (error) { 994 free((caddr_t)rep, M_NFSREQ); 995 m_freem(mrest); 996 return (error); 997 } 998 } 999 } else { 1000 auth_type = RPCAUTH_UNIX; 1001 if (cred->cr_ngroups < 1) 1002 panic("nfsreq nogrps"); 1003 auth_len = ((((cred->cr_ngroups - 1) > nmp->nm_numgrps) ? 1004 nmp->nm_numgrps : (cred->cr_ngroups - 1)) << 2) + 1005 5 * NFSX_UNSIGNED; 1006 } 1007 m = nfsm_rpchead(cred, nmp->nm_flag, procnum, auth_type, auth_len, 1008 auth_str, verf_len, verf_str, mrest, mrest_len, &mheadend, &xid); 1009 if (auth_str) 1010 free(auth_str, M_TEMP); 1011 1012 /* 1013 * For stream protocols, insert a Sun RPC Record Mark. 1014 */ 1015 if (nmp->nm_sotype == SOCK_STREAM) { 1016 M_PREPEND(m, NFSX_UNSIGNED, M_WAIT); 1017 if (m == NULL) 1018 return (ENOBUFS); 1019 *mtod(m, u_int32_t *) = htonl(0x80000000 | 1020 (m->m_pkthdr.len - NFSX_UNSIGNED)); 1021 } 1022 rep->r_mreq = m; 1023 rep->r_xid = xid; 1024 tryagain: 1025 if (nmp->nm_flag & NFSMNT_SOFT) 1026 rep->r_retry = nmp->nm_retry; 1027 else 1028 rep->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */ 1029 rep->r_rtt = rep->r_rexmit = 0; 1030 if (proct[procnum] > 0) 1031 rep->r_flags = R_TIMING; 1032 else 1033 rep->r_flags = 0; 1034 rep->r_mrep = NULL; 1035 1036 /* 1037 * Do the client side RPC. 1038 */ 1039 nfsstats.rpcrequests++; 1040 /* 1041 * Chain request into list of outstanding requests. Be sure 1042 * to put it LAST so timer finds oldest requests first. 1043 */ 1044 s = splsoftclock(); 1045 TAILQ_INSERT_TAIL(&nfs_reqq, rep, r_chain); 1046 1047 /* Get send time for nqnfs */ 1048 reqtime = time_second; 1049 1050 /* 1051 * If backing off another request or avoiding congestion, don't 1052 * send this one now but let timer do it. If not timing a request, 1053 * do it now. 1054 */ 1055 if (nmp->nm_so && (nmp->nm_sotype != SOCK_DGRAM || 1056 (nmp->nm_flag & NFSMNT_DUMBTIMR) || 1057 nmp->nm_sent < nmp->nm_cwnd)) { 1058 splx(s); 1059 if (nmp->nm_soflags & PR_CONNREQUIRED) 1060 error = nfs_sndlock(rep); 1061 if (!error) { 1062 m2 = m_copym(m, 0, M_COPYALL, M_WAIT); 1063 error = nfs_send(nmp->nm_so, nmp->nm_nam, m2, rep); 1064 if (nmp->nm_soflags & PR_CONNREQUIRED) 1065 nfs_sndunlock(rep); 1066 } 1067 if (!error && (rep->r_flags & R_MUSTRESEND) == 0) { 1068 nmp->nm_sent += NFS_CWNDSCALE; 1069 rep->r_flags |= R_SENT; 1070 } 1071 } else { 1072 splx(s); 1073 rep->r_rtt = -1; 1074 } 1075 1076 /* 1077 * Wait for the reply from our send or the timer's. 1078 */ 1079 if (!error || error == EPIPE) 1080 error = nfs_reply(rep); 1081 1082 /* 1083 * RPC done, unlink the request. 1084 */ 1085 s = splsoftclock(); 1086 TAILQ_REMOVE(&nfs_reqq, rep, r_chain); 1087 splx(s); 1088 1089 /* 1090 * Decrement the outstanding request count. 1091 */ 1092 if (rep->r_flags & R_SENT) { 1093 rep->r_flags &= ~R_SENT; /* paranoia */ 1094 nmp->nm_sent -= NFS_CWNDSCALE; 1095 } 1096 1097 /* 1098 * If there was a successful reply and a tprintf msg. 1099 * tprintf a response. 1100 */ 1101 if (!error && (rep->r_flags & R_TPRINTFMSG)) 1102 nfs_msg(rep->r_td, nmp->nm_mountp->mnt_stat.f_mntfromname, 1103 "is alive again"); 1104 mrep = rep->r_mrep; 1105 md = rep->r_md; 1106 dpos = rep->r_dpos; 1107 if (error) { 1108 m_freem(rep->r_mreq); 1109 free((caddr_t)rep, M_NFSREQ); 1110 return (error); 1111 } 1112 1113 /* 1114 * break down the rpc header and check if ok 1115 */ 1116 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 1117 if (*tl++ == rpc_msgdenied) { 1118 if (*tl == rpc_mismatch) 1119 error = EOPNOTSUPP; 1120 else if ((nmp->nm_flag & NFSMNT_KERB) && *tl++ == rpc_autherr) { 1121 if (!failed_auth) { 1122 failed_auth++; 1123 mheadend->m_next = (struct mbuf *)0; 1124 m_freem(mrep); 1125 m_freem(rep->r_mreq); 1126 goto kerbauth; 1127 } else 1128 error = EAUTH; 1129 } else 1130 error = EACCES; 1131 m_freem(mrep); 1132 m_freem(rep->r_mreq); 1133 free((caddr_t)rep, M_NFSREQ); 1134 return (error); 1135 } 1136 1137 /* 1138 * Grab any Kerberos verifier, otherwise just throw it away. 1139 */ 1140 verf_type = fxdr_unsigned(int, *tl++); 1141 i = fxdr_unsigned(int32_t, *tl); 1142 if ((nmp->nm_flag & NFSMNT_KERB) && verf_type == RPCAUTH_KERB4) { 1143 error = nfs_savenickauth(nmp, cred, i, key, &md, &dpos, mrep); 1144 if (error) 1145 goto nfsmout; 1146 } else if (i > 0) 1147 nfsm_adv(nfsm_rndup(i)); 1148 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 1149 /* 0 == ok */ 1150 if (*tl == 0) { 1151 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 1152 if (*tl != 0) { 1153 error = fxdr_unsigned(int, *tl); 1154 if ((nmp->nm_flag & NFSMNT_NFSV3) && 1155 error == NFSERR_TRYLATER) { 1156 m_freem(mrep); 1157 error = 0; 1158 waituntil = time_second + trylater_delay; 1159 while (time_second < waituntil) 1160 (void) tsleep((caddr_t)&lbolt, 1161 0, "nqnfstry", 0); 1162 trylater_delay *= nfs_backoff[trylater_cnt]; 1163 if (trylater_cnt < 7) 1164 trylater_cnt++; 1165 goto tryagain; 1166 } 1167 1168 /* 1169 * If the File Handle was stale, invalidate the 1170 * lookup cache, just in case. 1171 */ 1172 if (error == ESTALE) 1173 cache_purge(vp); 1174 if (nmp->nm_flag & NFSMNT_NFSV3) { 1175 *mrp = mrep; 1176 *mdp = md; 1177 *dposp = dpos; 1178 error |= NFSERR_RETERR; 1179 } else 1180 m_freem(mrep); 1181 m_freem(rep->r_mreq); 1182 free((caddr_t)rep, M_NFSREQ); 1183 return (error); 1184 } 1185 1186 /* 1187 * For nqnfs, get any lease in reply 1188 */ 1189 if (nmp->nm_flag & NFSMNT_NQNFS) { 1190 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 1191 if (*tl) { 1192 np = VTONFS(vp); 1193 nqlflag = fxdr_unsigned(int, *tl); 1194 nfsm_dissect(tl, u_int32_t *, 4*NFSX_UNSIGNED); 1195 cachable = fxdr_unsigned(int, *tl++); 1196 reqtime += fxdr_unsigned(int, *tl++); 1197 if (reqtime > time_second) { 1198 frev = fxdr_hyper(tl); 1199 nqnfs_clientlease(nmp, np, nqlflag, 1200 cachable, reqtime, frev); 1201 } 1202 } 1203 } 1204 *mrp = mrep; 1205 *mdp = md; 1206 *dposp = dpos; 1207 m_freem(rep->r_mreq); 1208 FREE((caddr_t)rep, M_NFSREQ); 1209 return (0); 1210 } 1211 m_freem(mrep); 1212 error = EPROTONOSUPPORT; 1213 nfsmout: 1214 m_freem(rep->r_mreq); 1215 free((caddr_t)rep, M_NFSREQ); 1216 return (error); 1217 } 1218 1219 #ifndef NFS_NOSERVER 1220 /* 1221 * Generate the rpc reply header 1222 * siz arg. is used to decide if adding a cluster is worthwhile 1223 */ 1224 int 1225 nfs_rephead(siz, nd, slp, err, cache, frev, mrq, mbp, bposp) 1226 int siz; 1227 struct nfsrv_descript *nd; 1228 struct nfssvc_sock *slp; 1229 int err; 1230 int cache; 1231 u_quad_t *frev; 1232 struct mbuf **mrq; 1233 struct mbuf **mbp; 1234 caddr_t *bposp; 1235 { 1236 u_int32_t *tl; 1237 struct mbuf *mreq; 1238 caddr_t bpos; 1239 struct mbuf *mb, *mb2; 1240 1241 MGETHDR(mreq, M_WAIT, MT_DATA); 1242 mb = mreq; 1243 /* 1244 * If this is a big reply, use a cluster else 1245 * try and leave leading space for the lower level headers. 1246 */ 1247 siz += RPC_REPLYSIZ; 1248 if ((max_hdr + siz) >= MINCLSIZE) { 1249 MCLGET(mreq, M_WAIT); 1250 } else 1251 mreq->m_data += max_hdr; 1252 tl = mtod(mreq, u_int32_t *); 1253 mreq->m_len = 6 * NFSX_UNSIGNED; 1254 bpos = ((caddr_t)tl) + mreq->m_len; 1255 *tl++ = txdr_unsigned(nd->nd_retxid); 1256 *tl++ = rpc_reply; 1257 if (err == ERPCMISMATCH || (err & NFSERR_AUTHERR)) { 1258 *tl++ = rpc_msgdenied; 1259 if (err & NFSERR_AUTHERR) { 1260 *tl++ = rpc_autherr; 1261 *tl = txdr_unsigned(err & ~NFSERR_AUTHERR); 1262 mreq->m_len -= NFSX_UNSIGNED; 1263 bpos -= NFSX_UNSIGNED; 1264 } else { 1265 *tl++ = rpc_mismatch; 1266 *tl++ = txdr_unsigned(RPC_VER2); 1267 *tl = txdr_unsigned(RPC_VER2); 1268 } 1269 } else { 1270 *tl++ = rpc_msgaccepted; 1271 1272 /* 1273 * For Kerberos authentication, we must send the nickname 1274 * verifier back, otherwise just RPCAUTH_NULL. 1275 */ 1276 if (nd->nd_flag & ND_KERBFULL) { 1277 struct nfsuid *nuidp; 1278 struct timeval ktvin, ktvout; 1279 1280 for (nuidp = NUIDHASH(slp, nd->nd_cr.cr_uid)->lh_first; 1281 nuidp != 0; nuidp = nuidp->nu_hash.le_next) { 1282 if (nuidp->nu_cr.cr_uid == nd->nd_cr.cr_uid && 1283 (!nd->nd_nam2 || netaddr_match(NU_NETFAM(nuidp), 1284 &nuidp->nu_haddr, nd->nd_nam2))) 1285 break; 1286 } 1287 if (nuidp) { 1288 ktvin.tv_sec = 1289 txdr_unsigned(nuidp->nu_timestamp.tv_sec - 1); 1290 ktvin.tv_usec = 1291 txdr_unsigned(nuidp->nu_timestamp.tv_usec); 1292 1293 /* 1294 * Encrypt the timestamp in ecb mode using the 1295 * session key. 1296 */ 1297 #ifdef NFSKERB 1298 XXX 1299 #endif 1300 1301 *tl++ = rpc_auth_kerb; 1302 *tl++ = txdr_unsigned(3 * NFSX_UNSIGNED); 1303 *tl = ktvout.tv_sec; 1304 nfsm_build(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 1305 *tl++ = ktvout.tv_usec; 1306 *tl++ = txdr_unsigned(nuidp->nu_cr.cr_uid); 1307 } else { 1308 *tl++ = 0; 1309 *tl++ = 0; 1310 } 1311 } else { 1312 *tl++ = 0; 1313 *tl++ = 0; 1314 } 1315 switch (err) { 1316 case EPROGUNAVAIL: 1317 *tl = txdr_unsigned(RPC_PROGUNAVAIL); 1318 break; 1319 case EPROGMISMATCH: 1320 *tl = txdr_unsigned(RPC_PROGMISMATCH); 1321 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 1322 if (nd->nd_flag & ND_NQNFS) { 1323 *tl++ = txdr_unsigned(3); 1324 *tl = txdr_unsigned(3); 1325 } else { 1326 *tl++ = txdr_unsigned(2); 1327 *tl = txdr_unsigned(3); 1328 } 1329 break; 1330 case EPROCUNAVAIL: 1331 *tl = txdr_unsigned(RPC_PROCUNAVAIL); 1332 break; 1333 case EBADRPC: 1334 *tl = txdr_unsigned(RPC_GARBAGE); 1335 break; 1336 default: 1337 *tl = 0; 1338 if (err != NFSERR_RETVOID) { 1339 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED); 1340 if (err) 1341 *tl = txdr_unsigned(nfsrv_errmap(nd, err)); 1342 else 1343 *tl = 0; 1344 } 1345 break; 1346 }; 1347 } 1348 1349 /* 1350 * For nqnfs, piggyback lease as requested. 1351 */ 1352 if ((nd->nd_flag & ND_NQNFS) && err == 0) { 1353 if (nd->nd_flag & ND_LEASE) { 1354 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED); 1355 *tl++ = txdr_unsigned(nd->nd_flag & ND_LEASE); 1356 *tl++ = txdr_unsigned(cache); 1357 *tl++ = txdr_unsigned(nd->nd_duration); 1358 txdr_hyper(*frev, tl); 1359 } else { 1360 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED); 1361 *tl = 0; 1362 } 1363 } 1364 if (mrq != NULL) 1365 *mrq = mreq; 1366 *mbp = mb; 1367 *bposp = bpos; 1368 if (err != 0 && err != NFSERR_RETVOID) 1369 nfsstats.srvrpc_errs++; 1370 return (0); 1371 } 1372 1373 1374 #endif /* NFS_NOSERVER */ 1375 /* 1376 * Nfs timer routine 1377 * Scan the nfsreq list and retranmit any requests that have timed out 1378 * To avoid retransmission attempts on STREAM sockets (in the future) make 1379 * sure to set the r_retry field to 0 (implies nm_retry == 0). 1380 */ 1381 void 1382 nfs_timer(arg) 1383 void *arg; /* never used */ 1384 { 1385 struct nfsreq *rep; 1386 struct mbuf *m; 1387 struct socket *so; 1388 struct nfsmount *nmp; 1389 int timeo; 1390 int s, error; 1391 #ifndef NFS_NOSERVER 1392 static long lasttime = 0; 1393 struct nfssvc_sock *slp; 1394 u_quad_t cur_usec; 1395 #endif /* NFS_NOSERVER */ 1396 struct thread *td = &thread0; /* XXX for credentials, will break if sleep */ 1397 1398 s = splnet(); 1399 for (rep = nfs_reqq.tqh_first; rep != 0; rep = rep->r_chain.tqe_next) { 1400 nmp = rep->r_nmp; 1401 if (rep->r_mrep || (rep->r_flags & R_SOFTTERM)) 1402 continue; 1403 if (nfs_sigintr(nmp, rep, rep->r_td)) { 1404 nfs_softterm(rep); 1405 continue; 1406 } 1407 if (rep->r_rtt >= 0) { 1408 rep->r_rtt++; 1409 if (nmp->nm_flag & NFSMNT_DUMBTIMR) 1410 timeo = nmp->nm_timeo; 1411 else 1412 timeo = NFS_RTO(nmp, proct[rep->r_procnum]); 1413 if (nmp->nm_timeouts > 0) 1414 timeo *= nfs_backoff[nmp->nm_timeouts - 1]; 1415 if (rep->r_rtt <= timeo) 1416 continue; 1417 if (nmp->nm_timeouts < 8) 1418 nmp->nm_timeouts++; 1419 } 1420 /* 1421 * Check for server not responding 1422 */ 1423 if ((rep->r_flags & R_TPRINTFMSG) == 0 && 1424 rep->r_rexmit > nmp->nm_deadthresh) { 1425 nfs_msg(rep->r_td, 1426 nmp->nm_mountp->mnt_stat.f_mntfromname, 1427 "not responding"); 1428 rep->r_flags |= R_TPRINTFMSG; 1429 } 1430 if (rep->r_rexmit >= rep->r_retry) { /* too many */ 1431 nfsstats.rpctimeouts++; 1432 nfs_softterm(rep); 1433 continue; 1434 } 1435 if (nmp->nm_sotype != SOCK_DGRAM) { 1436 if (++rep->r_rexmit > NFS_MAXREXMIT) 1437 rep->r_rexmit = NFS_MAXREXMIT; 1438 continue; 1439 } 1440 if ((so = nmp->nm_so) == NULL) 1441 continue; 1442 1443 /* 1444 * If there is enough space and the window allows.. 1445 * Resend it 1446 * Set r_rtt to -1 in case we fail to send it now. 1447 */ 1448 rep->r_rtt = -1; 1449 if (sbspace(&so->so_snd) >= rep->r_mreq->m_pkthdr.len && 1450 ((nmp->nm_flag & NFSMNT_DUMBTIMR) || 1451 (rep->r_flags & R_SENT) || 1452 nmp->nm_sent < nmp->nm_cwnd) && 1453 (m = m_copym(rep->r_mreq, 0, M_COPYALL, M_DONTWAIT))){ 1454 if ((nmp->nm_flag & NFSMNT_NOCONN) == 0) 1455 error = (*so->so_proto->pr_usrreqs->pru_send) 1456 (so, 0, m, (struct sockaddr *)0, 1457 (struct mbuf *)0, td); 1458 else 1459 error = (*so->so_proto->pr_usrreqs->pru_send) 1460 (so, 0, m, nmp->nm_nam, (struct mbuf *)0, 1461 td); 1462 if (error) { 1463 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) 1464 so->so_error = 0; 1465 } else { 1466 /* 1467 * Iff first send, start timing 1468 * else turn timing off, backoff timer 1469 * and divide congestion window by 2. 1470 */ 1471 if (rep->r_flags & R_SENT) { 1472 rep->r_flags &= ~R_TIMING; 1473 if (++rep->r_rexmit > NFS_MAXREXMIT) 1474 rep->r_rexmit = NFS_MAXREXMIT; 1475 nmp->nm_cwnd >>= 1; 1476 if (nmp->nm_cwnd < NFS_CWNDSCALE) 1477 nmp->nm_cwnd = NFS_CWNDSCALE; 1478 nfsstats.rpcretries++; 1479 } else { 1480 rep->r_flags |= R_SENT; 1481 nmp->nm_sent += NFS_CWNDSCALE; 1482 } 1483 rep->r_rtt = 0; 1484 } 1485 } 1486 } 1487 #ifndef NFS_NOSERVER 1488 /* 1489 * Call the nqnfs server timer once a second to handle leases. 1490 */ 1491 if (lasttime != time_second) { 1492 lasttime = time_second; 1493 nqnfs_serverd(); 1494 } 1495 1496 /* 1497 * Scan the write gathering queues for writes that need to be 1498 * completed now. 1499 */ 1500 cur_usec = nfs_curusec(); 1501 for (slp = nfssvc_sockhead.tqh_first; slp != 0; 1502 slp = slp->ns_chain.tqe_next) { 1503 if (slp->ns_tq.lh_first && slp->ns_tq.lh_first->nd_time<=cur_usec) 1504 nfsrv_wakenfsd(slp); 1505 } 1506 #endif /* NFS_NOSERVER */ 1507 splx(s); 1508 nfs_timer_handle = timeout(nfs_timer, (void *)0, nfs_ticks); 1509 } 1510 1511 /* 1512 * Mark all of an nfs mount's outstanding requests with R_SOFTTERM and 1513 * wait for all requests to complete. This is used by forced unmounts 1514 * to terminate any outstanding RPCs. 1515 */ 1516 int 1517 nfs_nmcancelreqs(nmp) 1518 struct nfsmount *nmp; 1519 { 1520 struct nfsreq *req; 1521 int i, s; 1522 1523 s = splnet(); 1524 TAILQ_FOREACH(req, &nfs_reqq, r_chain) { 1525 if (nmp != req->r_nmp || req->r_mrep != NULL || 1526 (req->r_flags & R_SOFTTERM)) 1527 continue; 1528 nfs_softterm(req); 1529 } 1530 splx(s); 1531 1532 for (i = 0; i < 30; i++) { 1533 s = splnet(); 1534 TAILQ_FOREACH(req, &nfs_reqq, r_chain) { 1535 if (nmp == req->r_nmp) 1536 break; 1537 } 1538 splx(s); 1539 if (req == NULL) 1540 return (0); 1541 tsleep(&lbolt, 0, "nfscancel", 0); 1542 } 1543 return (EBUSY); 1544 } 1545 1546 /* 1547 * Flag a request as being about to terminate (due to NFSMNT_INT/NFSMNT_SOFT). 1548 * The nm_send count is decremented now to avoid deadlocks when the process in 1549 * soreceive() hasn't yet managed to send its own request. 1550 */ 1551 1552 static void 1553 nfs_softterm(rep) 1554 struct nfsreq *rep; 1555 { 1556 rep->r_flags |= R_SOFTTERM; 1557 1558 if (rep->r_flags & R_SENT) { 1559 rep->r_nmp->nm_sent -= NFS_CWNDSCALE; 1560 rep->r_flags &= ~R_SENT; 1561 } 1562 } 1563 1564 /* 1565 * Test for a termination condition pending on the process. 1566 * This is used for NFSMNT_INT mounts. 1567 */ 1568 int 1569 nfs_sigintr(struct nfsmount *nmp, struct nfsreq *rep, struct thread *td) 1570 { 1571 sigset_t tmpset; 1572 struct proc *p; 1573 1574 if (rep && (rep->r_flags & R_SOFTTERM)) 1575 return (EINTR); 1576 /* Terminate all requests while attempting a forced unmount. */ 1577 if (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF) 1578 return (EINTR); 1579 if (!(nmp->nm_flag & NFSMNT_INT)) 1580 return (0); 1581 /* td might be NULL YYY */ 1582 if (td == NULL || (p = td->td_proc) == NULL) 1583 return (0); 1584 1585 tmpset = p->p_siglist; 1586 SIGSETNAND(tmpset, p->p_sigmask); 1587 SIGSETNAND(tmpset, p->p_sigignore); 1588 if (SIGNOTEMPTY(p->p_siglist) && NFSINT_SIGMASK(tmpset)) 1589 return (EINTR); 1590 1591 return (0); 1592 } 1593 1594 /* 1595 * Lock a socket against others. 1596 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply 1597 * and also to avoid race conditions between the processes with nfs requests 1598 * in progress when a reconnect is necessary. 1599 */ 1600 int 1601 nfs_sndlock(struct nfsreq *rep) 1602 { 1603 int *statep = &rep->r_nmp->nm_state; 1604 struct thread *td; 1605 int slpflag = 0, slptimeo = 0; 1606 1607 td = rep->r_td; 1608 if (rep->r_nmp->nm_flag & NFSMNT_INT) 1609 slpflag = PCATCH; 1610 while (*statep & NFSSTA_SNDLOCK) { 1611 if (nfs_sigintr(rep->r_nmp, rep, td)) 1612 return (EINTR); 1613 *statep |= NFSSTA_WANTSND; 1614 (void) tsleep((caddr_t)statep, slpflag, 1615 "nfsndlck", slptimeo); 1616 if (slpflag == PCATCH) { 1617 slpflag = 0; 1618 slptimeo = 2 * hz; 1619 } 1620 } 1621 /* Always fail if our request has been cancelled. */ 1622 if ((rep->r_flags & R_SOFTTERM)) 1623 return (EINTR); 1624 *statep |= NFSSTA_SNDLOCK; 1625 return (0); 1626 } 1627 1628 /* 1629 * Unlock the stream socket for others. 1630 */ 1631 void 1632 nfs_sndunlock(rep) 1633 struct nfsreq *rep; 1634 { 1635 int *statep = &rep->r_nmp->nm_state; 1636 1637 if ((*statep & NFSSTA_SNDLOCK) == 0) 1638 panic("nfs sndunlock"); 1639 *statep &= ~NFSSTA_SNDLOCK; 1640 if (*statep & NFSSTA_WANTSND) { 1641 *statep &= ~NFSSTA_WANTSND; 1642 wakeup((caddr_t)statep); 1643 } 1644 } 1645 1646 static int 1647 nfs_rcvlock(rep) 1648 struct nfsreq *rep; 1649 { 1650 int *statep = &rep->r_nmp->nm_state; 1651 int slpflag, slptimeo = 0; 1652 1653 if (rep->r_nmp->nm_flag & NFSMNT_INT) 1654 slpflag = PCATCH; 1655 else 1656 slpflag = 0; 1657 while (*statep & NFSSTA_RCVLOCK) { 1658 if (nfs_sigintr(rep->r_nmp, rep, rep->r_td)) 1659 return (EINTR); 1660 *statep |= NFSSTA_WANTRCV; 1661 (void) tsleep((caddr_t)statep, slpflag, "nfsrcvlk", slptimeo); 1662 /* 1663 * If our reply was recieved while we were sleeping, 1664 * then just return without taking the lock to avoid a 1665 * situation where a single iod could 'capture' the 1666 * recieve lock. 1667 */ 1668 if (rep->r_mrep != NULL) 1669 return (EALREADY); 1670 if (slpflag == PCATCH) { 1671 slpflag = 0; 1672 slptimeo = 2 * hz; 1673 } 1674 } 1675 *statep |= NFSSTA_RCVLOCK; 1676 return (0); 1677 } 1678 1679 /* 1680 * Unlock the stream socket for others. 1681 */ 1682 static void 1683 nfs_rcvunlock(rep) 1684 struct nfsreq *rep; 1685 { 1686 int *statep = &rep->r_nmp->nm_state; 1687 1688 if ((*statep & NFSSTA_RCVLOCK) == 0) 1689 panic("nfs rcvunlock"); 1690 *statep &= ~NFSSTA_RCVLOCK; 1691 if (*statep & NFSSTA_WANTRCV) { 1692 *statep &= ~NFSSTA_WANTRCV; 1693 wakeup((caddr_t)statep); 1694 } 1695 } 1696 1697 /* 1698 * nfs_realign: 1699 * 1700 * Check for badly aligned mbuf data and realign by copying the unaligned 1701 * portion of the data into a new mbuf chain and freeing the portions 1702 * of the old chain that were replaced. 1703 * 1704 * We cannot simply realign the data within the existing mbuf chain 1705 * because the underlying buffers may contain other rpc commands and 1706 * we cannot afford to overwrite them. 1707 * 1708 * We would prefer to avoid this situation entirely. The situation does 1709 * not occur with NFS/UDP and is supposed to only occassionally occur 1710 * with TCP. Use vfs.nfs.realign_count and realign_test to check this. 1711 */ 1712 static void 1713 nfs_realign(pm, hsiz) 1714 struct mbuf **pm; 1715 int hsiz; 1716 { 1717 struct mbuf *m; 1718 struct mbuf *n = NULL; 1719 int off = 0; 1720 1721 ++nfs_realign_test; 1722 1723 while ((m = *pm) != NULL) { 1724 if ((m->m_len & 0x3) || (mtod(m, intptr_t) & 0x3)) { 1725 MGET(n, M_WAIT, MT_DATA); 1726 if (m->m_len >= MINCLSIZE) { 1727 MCLGET(n, M_WAIT); 1728 } 1729 n->m_len = 0; 1730 break; 1731 } 1732 pm = &m->m_next; 1733 } 1734 1735 /* 1736 * If n is non-NULL, loop on m copying data, then replace the 1737 * portion of the chain that had to be realigned. 1738 */ 1739 if (n != NULL) { 1740 ++nfs_realign_count; 1741 while (m) { 1742 m_copyback(n, off, m->m_len, mtod(m, caddr_t)); 1743 off += m->m_len; 1744 m = m->m_next; 1745 } 1746 m_freem(*pm); 1747 *pm = n; 1748 } 1749 } 1750 1751 #ifndef NFS_NOSERVER 1752 1753 /* 1754 * Parse an RPC request 1755 * - verify it 1756 * - fill in the cred struct. 1757 */ 1758 int 1759 nfs_getreq(nd, nfsd, has_header) 1760 struct nfsrv_descript *nd; 1761 struct nfsd *nfsd; 1762 int has_header; 1763 { 1764 int len, i; 1765 u_int32_t *tl; 1766 int32_t t1; 1767 struct uio uio; 1768 struct iovec iov; 1769 caddr_t dpos, cp2, cp; 1770 u_int32_t nfsvers, auth_type; 1771 uid_t nickuid; 1772 int error = 0, nqnfs = 0, ticklen; 1773 struct mbuf *mrep, *md; 1774 struct nfsuid *nuidp; 1775 struct timeval tvin, tvout; 1776 #if 0 /* until encrypted keys are implemented */ 1777 NFSKERBKEYSCHED_T keys; /* stores key schedule */ 1778 #endif 1779 1780 mrep = nd->nd_mrep; 1781 md = nd->nd_md; 1782 dpos = nd->nd_dpos; 1783 if (has_header) { 1784 nfsm_dissect(tl, u_int32_t *, 10 * NFSX_UNSIGNED); 1785 nd->nd_retxid = fxdr_unsigned(u_int32_t, *tl++); 1786 if (*tl++ != rpc_call) { 1787 m_freem(mrep); 1788 return (EBADRPC); 1789 } 1790 } else 1791 nfsm_dissect(tl, u_int32_t *, 8 * NFSX_UNSIGNED); 1792 nd->nd_repstat = 0; 1793 nd->nd_flag = 0; 1794 if (*tl++ != rpc_vers) { 1795 nd->nd_repstat = ERPCMISMATCH; 1796 nd->nd_procnum = NFSPROC_NOOP; 1797 return (0); 1798 } 1799 if (*tl != nfs_prog) { 1800 if (*tl == nqnfs_prog) 1801 nqnfs++; 1802 else { 1803 nd->nd_repstat = EPROGUNAVAIL; 1804 nd->nd_procnum = NFSPROC_NOOP; 1805 return (0); 1806 } 1807 } 1808 tl++; 1809 nfsvers = fxdr_unsigned(u_int32_t, *tl++); 1810 if (((nfsvers < NFS_VER2 || nfsvers > NFS_VER3) && !nqnfs) || 1811 (nfsvers != NQNFS_VER3 && nqnfs)) { 1812 nd->nd_repstat = EPROGMISMATCH; 1813 nd->nd_procnum = NFSPROC_NOOP; 1814 return (0); 1815 } 1816 if (nqnfs) 1817 nd->nd_flag = (ND_NFSV3 | ND_NQNFS); 1818 else if (nfsvers == NFS_VER3) 1819 nd->nd_flag = ND_NFSV3; 1820 nd->nd_procnum = fxdr_unsigned(u_int32_t, *tl++); 1821 if (nd->nd_procnum == NFSPROC_NULL) 1822 return (0); 1823 if (nd->nd_procnum >= NFS_NPROCS || 1824 (!nqnfs && nd->nd_procnum >= NQNFSPROC_GETLEASE) || 1825 (!nd->nd_flag && nd->nd_procnum > NFSV2PROC_STATFS)) { 1826 nd->nd_repstat = EPROCUNAVAIL; 1827 nd->nd_procnum = NFSPROC_NOOP; 1828 return (0); 1829 } 1830 if ((nd->nd_flag & ND_NFSV3) == 0) 1831 nd->nd_procnum = nfsv3_procid[nd->nd_procnum]; 1832 auth_type = *tl++; 1833 len = fxdr_unsigned(int, *tl++); 1834 if (len < 0 || len > RPCAUTH_MAXSIZ) { 1835 m_freem(mrep); 1836 return (EBADRPC); 1837 } 1838 1839 nd->nd_flag &= ~ND_KERBAUTH; 1840 /* 1841 * Handle auth_unix or auth_kerb. 1842 */ 1843 if (auth_type == rpc_auth_unix) { 1844 len = fxdr_unsigned(int, *++tl); 1845 if (len < 0 || len > NFS_MAXNAMLEN) { 1846 m_freem(mrep); 1847 return (EBADRPC); 1848 } 1849 nfsm_adv(nfsm_rndup(len)); 1850 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 1851 bzero((caddr_t)&nd->nd_cr, sizeof (struct ucred)); 1852 nd->nd_cr.cr_ref = 1; 1853 nd->nd_cr.cr_uid = fxdr_unsigned(uid_t, *tl++); 1854 nd->nd_cr.cr_gid = fxdr_unsigned(gid_t, *tl++); 1855 len = fxdr_unsigned(int, *tl); 1856 if (len < 0 || len > RPCAUTH_UNIXGIDS) { 1857 m_freem(mrep); 1858 return (EBADRPC); 1859 } 1860 nfsm_dissect(tl, u_int32_t *, (len + 2) * NFSX_UNSIGNED); 1861 for (i = 1; i <= len; i++) 1862 if (i < NGROUPS) 1863 nd->nd_cr.cr_groups[i] = fxdr_unsigned(gid_t, *tl++); 1864 else 1865 tl++; 1866 nd->nd_cr.cr_ngroups = (len >= NGROUPS) ? NGROUPS : (len + 1); 1867 if (nd->nd_cr.cr_ngroups > 1) 1868 nfsrvw_sort(nd->nd_cr.cr_groups, nd->nd_cr.cr_ngroups); 1869 len = fxdr_unsigned(int, *++tl); 1870 if (len < 0 || len > RPCAUTH_MAXSIZ) { 1871 m_freem(mrep); 1872 return (EBADRPC); 1873 } 1874 if (len > 0) 1875 nfsm_adv(nfsm_rndup(len)); 1876 } else if (auth_type == rpc_auth_kerb) { 1877 switch (fxdr_unsigned(int, *tl++)) { 1878 case RPCAKN_FULLNAME: 1879 ticklen = fxdr_unsigned(int, *tl); 1880 *((u_int32_t *)nfsd->nfsd_authstr) = *tl; 1881 uio.uio_resid = nfsm_rndup(ticklen) + NFSX_UNSIGNED; 1882 nfsd->nfsd_authlen = uio.uio_resid + NFSX_UNSIGNED; 1883 if (uio.uio_resid > (len - 2 * NFSX_UNSIGNED)) { 1884 m_freem(mrep); 1885 return (EBADRPC); 1886 } 1887 uio.uio_offset = 0; 1888 uio.uio_iov = &iov; 1889 uio.uio_iovcnt = 1; 1890 uio.uio_segflg = UIO_SYSSPACE; 1891 iov.iov_base = (caddr_t)&nfsd->nfsd_authstr[4]; 1892 iov.iov_len = RPCAUTH_MAXSIZ - 4; 1893 nfsm_mtouio(&uio, uio.uio_resid); 1894 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 1895 if (*tl++ != rpc_auth_kerb || 1896 fxdr_unsigned(int, *tl) != 4 * NFSX_UNSIGNED) { 1897 printf("Bad kerb verifier\n"); 1898 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF); 1899 nd->nd_procnum = NFSPROC_NOOP; 1900 return (0); 1901 } 1902 nfsm_dissect(cp, caddr_t, 4 * NFSX_UNSIGNED); 1903 tl = (u_int32_t *)cp; 1904 if (fxdr_unsigned(int, *tl) != RPCAKN_FULLNAME) { 1905 printf("Not fullname kerb verifier\n"); 1906 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF); 1907 nd->nd_procnum = NFSPROC_NOOP; 1908 return (0); 1909 } 1910 cp += NFSX_UNSIGNED; 1911 bcopy(cp, nfsd->nfsd_verfstr, 3 * NFSX_UNSIGNED); 1912 nfsd->nfsd_verflen = 3 * NFSX_UNSIGNED; 1913 nd->nd_flag |= ND_KERBFULL; 1914 nfsd->nfsd_flag |= NFSD_NEEDAUTH; 1915 break; 1916 case RPCAKN_NICKNAME: 1917 if (len != 2 * NFSX_UNSIGNED) { 1918 printf("Kerb nickname short\n"); 1919 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADCRED); 1920 nd->nd_procnum = NFSPROC_NOOP; 1921 return (0); 1922 } 1923 nickuid = fxdr_unsigned(uid_t, *tl); 1924 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 1925 if (*tl++ != rpc_auth_kerb || 1926 fxdr_unsigned(int, *tl) != 3 * NFSX_UNSIGNED) { 1927 printf("Kerb nick verifier bad\n"); 1928 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF); 1929 nd->nd_procnum = NFSPROC_NOOP; 1930 return (0); 1931 } 1932 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 1933 tvin.tv_sec = *tl++; 1934 tvin.tv_usec = *tl; 1935 1936 for (nuidp = NUIDHASH(nfsd->nfsd_slp,nickuid)->lh_first; 1937 nuidp != 0; nuidp = nuidp->nu_hash.le_next) { 1938 if (nuidp->nu_cr.cr_uid == nickuid && 1939 (!nd->nd_nam2 || 1940 netaddr_match(NU_NETFAM(nuidp), 1941 &nuidp->nu_haddr, nd->nd_nam2))) 1942 break; 1943 } 1944 if (!nuidp) { 1945 nd->nd_repstat = 1946 (NFSERR_AUTHERR|AUTH_REJECTCRED); 1947 nd->nd_procnum = NFSPROC_NOOP; 1948 return (0); 1949 } 1950 1951 /* 1952 * Now, decrypt the timestamp using the session key 1953 * and validate it. 1954 */ 1955 #ifdef NFSKERB 1956 XXX 1957 #endif 1958 1959 tvout.tv_sec = fxdr_unsigned(long, tvout.tv_sec); 1960 tvout.tv_usec = fxdr_unsigned(long, tvout.tv_usec); 1961 if (nuidp->nu_expire < time_second || 1962 nuidp->nu_timestamp.tv_sec > tvout.tv_sec || 1963 (nuidp->nu_timestamp.tv_sec == tvout.tv_sec && 1964 nuidp->nu_timestamp.tv_usec > tvout.tv_usec)) { 1965 nuidp->nu_expire = 0; 1966 nd->nd_repstat = 1967 (NFSERR_AUTHERR|AUTH_REJECTVERF); 1968 nd->nd_procnum = NFSPROC_NOOP; 1969 return (0); 1970 } 1971 nfsrv_setcred(&nuidp->nu_cr, &nd->nd_cr); 1972 nd->nd_flag |= ND_KERBNICK; 1973 }; 1974 } else { 1975 nd->nd_repstat = (NFSERR_AUTHERR | AUTH_REJECTCRED); 1976 nd->nd_procnum = NFSPROC_NOOP; 1977 return (0); 1978 } 1979 1980 /* 1981 * For nqnfs, get piggybacked lease request. 1982 */ 1983 if (nqnfs && nd->nd_procnum != NQNFSPROC_EVICTED) { 1984 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 1985 nd->nd_flag |= fxdr_unsigned(int, *tl); 1986 if (nd->nd_flag & ND_LEASE) { 1987 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 1988 nd->nd_duration = fxdr_unsigned(int32_t, *tl); 1989 } else 1990 nd->nd_duration = NQ_MINLEASE; 1991 } else 1992 nd->nd_duration = NQ_MINLEASE; 1993 nd->nd_md = md; 1994 nd->nd_dpos = dpos; 1995 return (0); 1996 nfsmout: 1997 return (error); 1998 } 1999 2000 #endif 2001 2002 /* 2003 * Send a message to the originating process's terminal. The thread and/or 2004 * process may be NULL. YYY the thread should not be NULL but there may 2005 * still be some uio_td's that are still being passed as NULL through to 2006 * nfsm_request(). 2007 */ 2008 static int 2009 nfs_msg(struct thread *td, char *server, char *msg) 2010 { 2011 tpr_t tpr; 2012 2013 if (td && td->td_proc) 2014 tpr = tprintf_open(td->td_proc); 2015 else 2016 tpr = NULL; 2017 tprintf(tpr, "nfs server %s: %s\n", server, msg); 2018 tprintf_close(tpr); 2019 return (0); 2020 } 2021 2022 #ifndef NFS_NOSERVER 2023 /* 2024 * Socket upcall routine for the nfsd sockets. 2025 * The caddr_t arg is a pointer to the "struct nfssvc_sock". 2026 * Essentially do as much as possible non-blocking, else punt and it will 2027 * be called with M_WAIT from an nfsd. 2028 */ 2029 void 2030 nfsrv_rcv(so, arg, waitflag) 2031 struct socket *so; 2032 void *arg; 2033 int waitflag; 2034 { 2035 struct nfssvc_sock *slp = (struct nfssvc_sock *)arg; 2036 struct mbuf *m; 2037 struct mbuf *mp; 2038 struct sockaddr *nam; 2039 struct uio auio; 2040 int flags, error; 2041 2042 if ((slp->ns_flag & SLP_VALID) == 0) 2043 return; 2044 #ifdef notdef 2045 /* 2046 * Define this to test for nfsds handling this under heavy load. 2047 */ 2048 if (waitflag == M_DONTWAIT) { 2049 slp->ns_flag |= SLP_NEEDQ; goto dorecs; 2050 } 2051 #endif 2052 auio.uio_td = NULL; 2053 if (so->so_type == SOCK_STREAM) { 2054 /* 2055 * If there are already records on the queue, defer soreceive() 2056 * to an nfsd so that there is feedback to the TCP layer that 2057 * the nfs servers are heavily loaded. 2058 */ 2059 if (STAILQ_FIRST(&slp->ns_rec) && waitflag == M_DONTWAIT) { 2060 slp->ns_flag |= SLP_NEEDQ; 2061 goto dorecs; 2062 } 2063 2064 /* 2065 * Do soreceive(). 2066 */ 2067 auio.uio_resid = 1000000000; 2068 flags = MSG_DONTWAIT; 2069 error = so->so_proto->pr_usrreqs->pru_soreceive 2070 (so, &nam, &auio, &mp, (struct mbuf **)0, &flags); 2071 if (error || mp == (struct mbuf *)0) { 2072 if (error == EWOULDBLOCK) 2073 slp->ns_flag |= SLP_NEEDQ; 2074 else 2075 slp->ns_flag |= SLP_DISCONN; 2076 goto dorecs; 2077 } 2078 m = mp; 2079 if (slp->ns_rawend) { 2080 slp->ns_rawend->m_next = m; 2081 slp->ns_cc += 1000000000 - auio.uio_resid; 2082 } else { 2083 slp->ns_raw = m; 2084 slp->ns_cc = 1000000000 - auio.uio_resid; 2085 } 2086 while (m->m_next) 2087 m = m->m_next; 2088 slp->ns_rawend = m; 2089 2090 /* 2091 * Now try and parse record(s) out of the raw stream data. 2092 */ 2093 error = nfsrv_getstream(slp, waitflag); 2094 if (error) { 2095 if (error == EPERM) 2096 slp->ns_flag |= SLP_DISCONN; 2097 else 2098 slp->ns_flag |= SLP_NEEDQ; 2099 } 2100 } else { 2101 do { 2102 auio.uio_resid = 1000000000; 2103 flags = MSG_DONTWAIT; 2104 error = so->so_proto->pr_usrreqs->pru_soreceive 2105 (so, &nam, &auio, &mp, 2106 (struct mbuf **)0, &flags); 2107 if (mp) { 2108 struct nfsrv_rec *rec; 2109 rec = malloc(sizeof(struct nfsrv_rec), 2110 M_NFSRVDESC, waitflag); 2111 if (!rec) { 2112 if (nam) 2113 FREE(nam, M_SONAME); 2114 m_freem(mp); 2115 continue; 2116 } 2117 nfs_realign(&mp, 10 * NFSX_UNSIGNED); 2118 rec->nr_address = nam; 2119 rec->nr_packet = mp; 2120 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link); 2121 } 2122 if (error) { 2123 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) 2124 && error != EWOULDBLOCK) { 2125 slp->ns_flag |= SLP_DISCONN; 2126 goto dorecs; 2127 } 2128 } 2129 } while (mp); 2130 } 2131 2132 /* 2133 * Now try and process the request records, non-blocking. 2134 */ 2135 dorecs: 2136 if (waitflag == M_DONTWAIT && 2137 (STAILQ_FIRST(&slp->ns_rec) 2138 || (slp->ns_flag & (SLP_NEEDQ | SLP_DISCONN)))) 2139 nfsrv_wakenfsd(slp); 2140 } 2141 2142 /* 2143 * Try and extract an RPC request from the mbuf data list received on a 2144 * stream socket. The "waitflag" argument indicates whether or not it 2145 * can sleep. 2146 */ 2147 static int 2148 nfsrv_getstream(slp, waitflag) 2149 struct nfssvc_sock *slp; 2150 int waitflag; 2151 { 2152 struct mbuf *m, **mpp; 2153 char *cp1, *cp2; 2154 int len; 2155 struct mbuf *om, *m2, *recm; 2156 u_int32_t recmark; 2157 2158 if (slp->ns_flag & SLP_GETSTREAM) 2159 panic("nfs getstream"); 2160 slp->ns_flag |= SLP_GETSTREAM; 2161 for (;;) { 2162 if (slp->ns_reclen == 0) { 2163 if (slp->ns_cc < NFSX_UNSIGNED) { 2164 slp->ns_flag &= ~SLP_GETSTREAM; 2165 return (0); 2166 } 2167 m = slp->ns_raw; 2168 if (m->m_len >= NFSX_UNSIGNED) { 2169 bcopy(mtod(m, caddr_t), (caddr_t)&recmark, NFSX_UNSIGNED); 2170 m->m_data += NFSX_UNSIGNED; 2171 m->m_len -= NFSX_UNSIGNED; 2172 } else { 2173 cp1 = (caddr_t)&recmark; 2174 cp2 = mtod(m, caddr_t); 2175 while (cp1 < ((caddr_t)&recmark) + NFSX_UNSIGNED) { 2176 while (m->m_len == 0) { 2177 m = m->m_next; 2178 cp2 = mtod(m, caddr_t); 2179 } 2180 *cp1++ = *cp2++; 2181 m->m_data++; 2182 m->m_len--; 2183 } 2184 } 2185 slp->ns_cc -= NFSX_UNSIGNED; 2186 recmark = ntohl(recmark); 2187 slp->ns_reclen = recmark & ~0x80000000; 2188 if (recmark & 0x80000000) 2189 slp->ns_flag |= SLP_LASTFRAG; 2190 else 2191 slp->ns_flag &= ~SLP_LASTFRAG; 2192 if (slp->ns_reclen > NFS_MAXPACKET) { 2193 slp->ns_flag &= ~SLP_GETSTREAM; 2194 return (EPERM); 2195 } 2196 } 2197 2198 /* 2199 * Now get the record part. 2200 * 2201 * Note that slp->ns_reclen may be 0. Linux sometimes 2202 * generates 0-length RPCs 2203 */ 2204 recm = NULL; 2205 if (slp->ns_cc == slp->ns_reclen) { 2206 recm = slp->ns_raw; 2207 slp->ns_raw = slp->ns_rawend = (struct mbuf *)0; 2208 slp->ns_cc = slp->ns_reclen = 0; 2209 } else if (slp->ns_cc > slp->ns_reclen) { 2210 len = 0; 2211 m = slp->ns_raw; 2212 om = (struct mbuf *)0; 2213 2214 while (len < slp->ns_reclen) { 2215 if ((len + m->m_len) > slp->ns_reclen) { 2216 m2 = m_copym(m, 0, slp->ns_reclen - len, 2217 waitflag); 2218 if (m2) { 2219 if (om) { 2220 om->m_next = m2; 2221 recm = slp->ns_raw; 2222 } else 2223 recm = m2; 2224 m->m_data += slp->ns_reclen - len; 2225 m->m_len -= slp->ns_reclen - len; 2226 len = slp->ns_reclen; 2227 } else { 2228 slp->ns_flag &= ~SLP_GETSTREAM; 2229 return (EWOULDBLOCK); 2230 } 2231 } else if ((len + m->m_len) == slp->ns_reclen) { 2232 om = m; 2233 len += m->m_len; 2234 m = m->m_next; 2235 recm = slp->ns_raw; 2236 om->m_next = (struct mbuf *)0; 2237 } else { 2238 om = m; 2239 len += m->m_len; 2240 m = m->m_next; 2241 } 2242 } 2243 slp->ns_raw = m; 2244 slp->ns_cc -= len; 2245 slp->ns_reclen = 0; 2246 } else { 2247 slp->ns_flag &= ~SLP_GETSTREAM; 2248 return (0); 2249 } 2250 2251 /* 2252 * Accumulate the fragments into a record. 2253 */ 2254 mpp = &slp->ns_frag; 2255 while (*mpp) 2256 mpp = &((*mpp)->m_next); 2257 *mpp = recm; 2258 if (slp->ns_flag & SLP_LASTFRAG) { 2259 struct nfsrv_rec *rec; 2260 rec = malloc(sizeof(struct nfsrv_rec), M_NFSRVDESC, waitflag); 2261 if (!rec) { 2262 m_freem(slp->ns_frag); 2263 } else { 2264 nfs_realign(&slp->ns_frag, 10 * NFSX_UNSIGNED); 2265 rec->nr_address = (struct sockaddr *)0; 2266 rec->nr_packet = slp->ns_frag; 2267 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link); 2268 } 2269 slp->ns_frag = (struct mbuf *)0; 2270 } 2271 } 2272 } 2273 2274 /* 2275 * Parse an RPC header. 2276 */ 2277 int 2278 nfsrv_dorec(slp, nfsd, ndp) 2279 struct nfssvc_sock *slp; 2280 struct nfsd *nfsd; 2281 struct nfsrv_descript **ndp; 2282 { 2283 struct nfsrv_rec *rec; 2284 struct mbuf *m; 2285 struct sockaddr *nam; 2286 struct nfsrv_descript *nd; 2287 int error; 2288 2289 *ndp = NULL; 2290 if ((slp->ns_flag & SLP_VALID) == 0 || !STAILQ_FIRST(&slp->ns_rec)) 2291 return (ENOBUFS); 2292 rec = STAILQ_FIRST(&slp->ns_rec); 2293 STAILQ_REMOVE_HEAD(&slp->ns_rec, nr_link); 2294 nam = rec->nr_address; 2295 m = rec->nr_packet; 2296 free(rec, M_NFSRVDESC); 2297 MALLOC(nd, struct nfsrv_descript *, sizeof (struct nfsrv_descript), 2298 M_NFSRVDESC, M_WAITOK); 2299 nd->nd_md = nd->nd_mrep = m; 2300 nd->nd_nam2 = nam; 2301 nd->nd_dpos = mtod(m, caddr_t); 2302 error = nfs_getreq(nd, nfsd, TRUE); 2303 if (error) { 2304 if (nam) { 2305 FREE(nam, M_SONAME); 2306 } 2307 free((caddr_t)nd, M_NFSRVDESC); 2308 return (error); 2309 } 2310 *ndp = nd; 2311 nfsd->nfsd_nd = nd; 2312 return (0); 2313 } 2314 2315 /* 2316 * Search for a sleeping nfsd and wake it up. 2317 * SIDE EFFECT: If none found, set NFSD_CHECKSLP flag, so that one of the 2318 * running nfsds will go look for the work in the nfssvc_sock list. 2319 */ 2320 void 2321 nfsrv_wakenfsd(slp) 2322 struct nfssvc_sock *slp; 2323 { 2324 struct nfsd *nd; 2325 2326 if ((slp->ns_flag & SLP_VALID) == 0) 2327 return; 2328 for (nd = nfsd_head.tqh_first; nd != 0; nd = nd->nfsd_chain.tqe_next) { 2329 if (nd->nfsd_flag & NFSD_WAITING) { 2330 nd->nfsd_flag &= ~NFSD_WAITING; 2331 if (nd->nfsd_slp) 2332 panic("nfsd wakeup"); 2333 slp->ns_sref++; 2334 nd->nfsd_slp = slp; 2335 wakeup((caddr_t)nd); 2336 return; 2337 } 2338 } 2339 slp->ns_flag |= SLP_DOREC; 2340 nfsd_head_flag |= NFSD_CHECKSLP; 2341 } 2342 #endif /* NFS_NOSERVER */ 2343