1 /* 2 * Copyright (c) 1989, 1991 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * %sccs.include.redist.c% 9 * 10 * @(#)nfs_socket.c 7.36 (Berkeley) 07/12/92 11 */ 12 13 /* 14 * Socket operations for use by nfs 15 */ 16 17 #include <sys/param.h> 18 #include <sys/systm.h> 19 #include <sys/proc.h> 20 #include <sys/mount.h> 21 #include <sys/kernel.h> 22 #include <sys/mbuf.h> 23 #include <sys/vnode.h> 24 #include <sys/domain.h> 25 #include <sys/protosw.h> 26 #include <sys/socket.h> 27 #include <sys/socketvar.h> 28 #include <sys/syslog.h> 29 #include <sys/tprintf.h> 30 #include <netinet/in.h> 31 #include <netinet/tcp.h> 32 #include <nfs/rpcv2.h> 33 #include <nfs/nfsv2.h> 34 #include <nfs/nfs.h> 35 #include <nfs/xdr_subs.h> 36 #include <nfs/nfsm_subs.h> 37 #include <nfs/nfsmount.h> 38 #include <nfs/nfsnode.h> 39 #include <nfs/nfsrtt.h> 40 #include <nfs/nqnfs.h> 41 42 #define TRUE 1 43 #define FALSE 0 44 45 /* 46 * Estimate rto for an nfs rpc sent via. an unreliable datagram. 47 * Use the mean and mean deviation of rtt for the appropriate type of rpc 48 * for the frequent rpcs and a default for the others. 49 * The justification for doing "other" this way is that these rpcs 50 * happen so infrequently that timer est. would probably be stale. 51 * Also, since many of these rpcs are 52 * non-idempotent, a conservative timeout is desired. 53 * getattr, lookup - A+2D 54 * read, write - A+4D 55 * other - nm_timeo 56 */ 57 #define NFS_RTO(n, t) \ 58 ((t) == 0 ? (n)->nm_timeo : \ 59 ((t) < 3 ? \ 60 (((((n)->nm_srtt[t-1] + 3) >> 2) + (n)->nm_sdrtt[t-1] + 1) >> 1) : \ 61 ((((n)->nm_srtt[t-1] + 7) >> 3) + (n)->nm_sdrtt[t-1] + 1))) 62 #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum] - 1] 63 #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum] - 1] 64 /* 65 * External data, mostly RPC constants in XDR form 66 */ 67 extern u_long rpc_reply, rpc_msgdenied, rpc_mismatch, rpc_vers, rpc_auth_unix, 68 rpc_msgaccepted, rpc_call, rpc_autherr, rpc_rejectedcred, 69 rpc_auth_kerb; 70 extern u_long nfs_prog, nfs_vers, nqnfs_prog, nqnfs_vers; 71 extern time_t nqnfsstarttime; 72 extern int nonidempotent[NFS_NPROCS]; 73 74 /* 75 * Maps errno values to nfs error numbers. 76 * Use NFSERR_IO as the catch all for ones not specifically defined in 77 * RFC 1094. 78 */ 79 static int nfsrv_errmap[ELAST] = { 80 NFSERR_PERM, NFSERR_NOENT, NFSERR_IO, NFSERR_IO, NFSERR_IO, 81 NFSERR_NXIO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 82 NFSERR_IO, NFSERR_IO, NFSERR_ACCES, NFSERR_IO, NFSERR_IO, 83 NFSERR_IO, NFSERR_EXIST, NFSERR_IO, NFSERR_NODEV, NFSERR_NOTDIR, 84 NFSERR_ISDIR, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 85 NFSERR_IO, NFSERR_FBIG, NFSERR_NOSPC, NFSERR_IO, NFSERR_ROFS, 86 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 87 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 88 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 89 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 90 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 91 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 92 NFSERR_IO, NFSERR_IO, NFSERR_NAMETOL, NFSERR_IO, NFSERR_IO, 93 NFSERR_NOTEMPTY, NFSERR_IO, NFSERR_IO, NFSERR_DQUOT, NFSERR_STALE, 94 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 95 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 96 NFSERR_IO, 97 }; 98 99 /* 100 * Defines which timer to use for the procnum. 101 * 0 - default 102 * 1 - getattr 103 * 2 - lookup 104 * 3 - read 105 * 4 - write 106 */ 107 static int proct[NFS_NPROCS] = { 108 0, 1, 0, 0, 2, 3, 3, 0, 4, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 109 }; 110 111 /* 112 * There is a congestion window for outstanding rpcs maintained per mount 113 * point. The cwnd size is adjusted in roughly the way that: 114 * Van Jacobson, Congestion avoidance and Control, In "Proceedings of 115 * SIGCOMM '88". ACM, August 1988. 116 * describes for TCP. The cwnd size is chopped in half on a retransmit timeout 117 * and incremented by 1/cwnd when each rpc reply is received and a full cwnd 118 * of rpcs is in progress. 119 * (The sent count and cwnd are scaled for integer arith.) 120 * Variants of "slow start" were tried and were found to be too much of a 121 * performance hit (ave. rtt 3 times larger), 122 * I suspect due to the large rtt that nfs rpcs have. 123 */ 124 #define NFS_CWNDSCALE 256 125 #define NFS_MAXCWND (NFS_CWNDSCALE * 32) 126 static int nfs_backoff[8] = { 2, 4, 8, 16, 32, 64, 128, 256, }; 127 int nfs_sbwait(); 128 void nfs_disconnect(), nfs_realign(), nfsrv_wakenfsd(), nfs_sndunlock(); 129 void nfs_rcvunlock(), nqnfs_serverd(); 130 struct mbuf *nfsm_rpchead(); 131 int nfsrtton = 0; 132 struct nfsrtt nfsrtt; 133 struct nfsd nfsd_head; 134 135 int nfsrv_null(), 136 nfsrv_getattr(), 137 nfsrv_setattr(), 138 nfsrv_lookup(), 139 nfsrv_readlink(), 140 nfsrv_read(), 141 nfsrv_write(), 142 nfsrv_create(), 143 nfsrv_remove(), 144 nfsrv_rename(), 145 nfsrv_link(), 146 nfsrv_symlink(), 147 nfsrv_mkdir(), 148 nfsrv_rmdir(), 149 nfsrv_readdir(), 150 nfsrv_statfs(), 151 nfsrv_noop(), 152 nqnfsrv_readdirlook(), 153 nqnfsrv_getlease(), 154 nqnfsrv_vacated(); 155 156 int (*nfsrv_procs[NFS_NPROCS])() = { 157 nfsrv_null, 158 nfsrv_getattr, 159 nfsrv_setattr, 160 nfsrv_noop, 161 nfsrv_lookup, 162 nfsrv_readlink, 163 nfsrv_read, 164 nfsrv_noop, 165 nfsrv_write, 166 nfsrv_create, 167 nfsrv_remove, 168 nfsrv_rename, 169 nfsrv_link, 170 nfsrv_symlink, 171 nfsrv_mkdir, 172 nfsrv_rmdir, 173 nfsrv_readdir, 174 nfsrv_statfs, 175 nqnfsrv_readdirlook, 176 nqnfsrv_getlease, 177 nqnfsrv_vacated, 178 }; 179 180 struct nfsreq nfsreqh; 181 182 /* 183 * Initialize sockets and congestion for a new NFS connection. 184 * We do not free the sockaddr if error. 185 */ 186 nfs_connect(nmp, rep) 187 register struct nfsmount *nmp; 188 struct nfsreq *rep; 189 { 190 register struct socket *so; 191 int s, error, rcvreserve, sndreserve; 192 struct sockaddr *saddr; 193 struct sockaddr_in *sin; 194 struct mbuf *m; 195 u_short tport; 196 197 nmp->nm_so = (struct socket *)0; 198 saddr = mtod(nmp->nm_nam, struct sockaddr *); 199 if (error = socreate(saddr->sa_family, 200 &nmp->nm_so, nmp->nm_sotype, nmp->nm_soproto)) 201 goto bad; 202 so = nmp->nm_so; 203 nmp->nm_soflags = so->so_proto->pr_flags; 204 205 /* 206 * Some servers require that the client port be a reserved port number. 207 */ 208 if (saddr->sa_family == AF_INET && (nmp->nm_flag & NFSMNT_RESVPORT)) { 209 MGET(m, M_WAIT, MT_SONAME); 210 sin = mtod(m, struct sockaddr_in *); 211 sin->sin_len = m->m_len = sizeof (struct sockaddr_in); 212 sin->sin_family = AF_INET; 213 sin->sin_addr.s_addr = INADDR_ANY; 214 tport = IPPORT_RESERVED - 1; 215 sin->sin_port = htons(tport); 216 while ((error = sobind(so, m)) == EADDRINUSE && 217 --tport > IPPORT_RESERVED / 2) 218 sin->sin_port = htons(tport); 219 m_freem(m); 220 if (error) 221 goto bad; 222 } 223 224 /* 225 * Protocols that do not require connections may be optionally left 226 * unconnected for servers that reply from a port other than NFS_PORT. 227 */ 228 if (nmp->nm_flag & NFSMNT_NOCONN) { 229 if (nmp->nm_soflags & PR_CONNREQUIRED) { 230 error = ENOTCONN; 231 goto bad; 232 } 233 } else { 234 if (error = soconnect(so, nmp->nm_nam)) 235 goto bad; 236 237 /* 238 * Wait for the connection to complete. Cribbed from the 239 * connect system call but with the wait timing out so 240 * that interruptible mounts don't hang here for a long time. 241 */ 242 s = splnet(); 243 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) { 244 (void) tsleep((caddr_t)&so->so_timeo, PSOCK, 245 "nfscon", 2 * hz); 246 if ((so->so_state & SS_ISCONNECTING) && 247 so->so_error == 0 && rep && 248 (error = nfs_sigintr(nmp, rep, rep->r_procp))) { 249 so->so_state &= ~SS_ISCONNECTING; 250 splx(s); 251 goto bad; 252 } 253 } 254 if (so->so_error) { 255 error = so->so_error; 256 so->so_error = 0; 257 splx(s); 258 goto bad; 259 } 260 splx(s); 261 } 262 if (nmp->nm_flag & (NFSMNT_SOFT | NFSMNT_INT)) { 263 so->so_rcv.sb_timeo = (5 * hz); 264 so->so_snd.sb_timeo = (5 * hz); 265 } else { 266 so->so_rcv.sb_timeo = 0; 267 so->so_snd.sb_timeo = 0; 268 } 269 if (nmp->nm_sotype == SOCK_DGRAM) { 270 sndreserve = nmp->nm_wsize + NFS_MAXPKTHDR; 271 rcvreserve = nmp->nm_rsize + NFS_MAXPKTHDR; 272 } else if (nmp->nm_sotype == SOCK_SEQPACKET) { 273 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * 2; 274 rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR) * 2; 275 } else { 276 if (nmp->nm_sotype != SOCK_STREAM) 277 panic("nfscon sotype"); 278 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 279 MGET(m, M_WAIT, MT_SOOPTS); 280 *mtod(m, int *) = 1; 281 m->m_len = sizeof(int); 282 sosetopt(so, SOL_SOCKET, SO_KEEPALIVE, m); 283 } 284 if (so->so_proto->pr_protocol == IPPROTO_TCP) { 285 MGET(m, M_WAIT, MT_SOOPTS); 286 *mtod(m, int *) = 1; 287 m->m_len = sizeof(int); 288 sosetopt(so, IPPROTO_TCP, TCP_NODELAY, m); 289 } 290 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR + sizeof (u_long)) 291 * 2; 292 rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR + sizeof (u_long)) 293 * 2; 294 } 295 if (error = soreserve(so, sndreserve, rcvreserve)) 296 goto bad; 297 so->so_rcv.sb_flags |= SB_NOINTR; 298 so->so_snd.sb_flags |= SB_NOINTR; 299 300 /* Initialize other non-zero congestion variables */ 301 nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] = nmp->nm_srtt[3] = 302 nmp->nm_srtt[4] = (NFS_TIMEO << 3); 303 nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] = 304 nmp->nm_sdrtt[3] = nmp->nm_sdrtt[4] = 0; 305 nmp->nm_cwnd = NFS_MAXCWND / 2; /* Initial send window */ 306 nmp->nm_sent = 0; 307 nmp->nm_timeouts = 0; 308 return (0); 309 310 bad: 311 nfs_disconnect(nmp); 312 return (error); 313 } 314 315 /* 316 * Reconnect routine: 317 * Called when a connection is broken on a reliable protocol. 318 * - clean up the old socket 319 * - nfs_connect() again 320 * - set R_MUSTRESEND for all outstanding requests on mount point 321 * If this fails the mount point is DEAD! 322 * nb: Must be called with the nfs_sndlock() set on the mount point. 323 */ 324 nfs_reconnect(rep) 325 register struct nfsreq *rep; 326 { 327 register struct nfsreq *rp; 328 register struct nfsmount *nmp = rep->r_nmp; 329 int error; 330 331 nfs_disconnect(nmp); 332 while (error = nfs_connect(nmp, rep)) { 333 if (error == EINTR || error == ERESTART) 334 return (EINTR); 335 (void) tsleep((caddr_t)&lbolt, PSOCK, "nfscon", 0); 336 } 337 338 /* 339 * Loop through outstanding request list and fix up all requests 340 * on old socket. 341 */ 342 rp = nfsreqh.r_next; 343 while (rp != &nfsreqh) { 344 if (rp->r_nmp == nmp) 345 rp->r_flags |= R_MUSTRESEND; 346 rp = rp->r_next; 347 } 348 return (0); 349 } 350 351 /* 352 * NFS disconnect. Clean up and unlink. 353 */ 354 void 355 nfs_disconnect(nmp) 356 register struct nfsmount *nmp; 357 { 358 register struct socket *so; 359 360 if (nmp->nm_so) { 361 so = nmp->nm_so; 362 nmp->nm_so = (struct socket *)0; 363 soshutdown(so, 2); 364 soclose(so); 365 } 366 } 367 368 /* 369 * This is the nfs send routine. For connection based socket types, it 370 * must be called with an nfs_sndlock() on the socket. 371 * "rep == NULL" indicates that it has been called from a server. 372 * For the client side: 373 * - return EINTR if the RPC is terminated, 0 otherwise 374 * - set R_MUSTRESEND if the send fails for any reason 375 * - do any cleanup required by recoverable socket errors (???) 376 * For the server side: 377 * - return EINTR or ERESTART if interrupted by a signal 378 * - return EPIPE if a connection is lost for connection based sockets (TCP...) 379 * - do any cleanup required by recoverable socket errors (???) 380 */ 381 nfs_send(so, nam, top, rep) 382 register struct socket *so; 383 struct mbuf *nam; 384 register struct mbuf *top; 385 struct nfsreq *rep; 386 { 387 struct mbuf *sendnam; 388 int error, soflags, flags; 389 390 if (rep) { 391 if (rep->r_flags & R_SOFTTERM) { 392 m_freem(top); 393 return (EINTR); 394 } 395 if ((so = rep->r_nmp->nm_so) == NULL) { 396 rep->r_flags |= R_MUSTRESEND; 397 m_freem(top); 398 return (0); 399 } 400 rep->r_flags &= ~R_MUSTRESEND; 401 soflags = rep->r_nmp->nm_soflags; 402 } else 403 soflags = so->so_proto->pr_flags; 404 if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED)) 405 sendnam = (struct mbuf *)0; 406 else 407 sendnam = nam; 408 if (so->so_type == SOCK_SEQPACKET) 409 flags = MSG_EOR; 410 else 411 flags = 0; 412 413 error = sosend(so, sendnam, (struct uio *)0, top, 414 (struct mbuf *)0, flags); 415 if (error) { 416 if (rep) { 417 log(LOG_INFO, "nfs send error %d for server %s\n",error, 418 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); 419 /* 420 * Deal with errors for the client side. 421 */ 422 if (rep->r_flags & R_SOFTTERM) 423 error = EINTR; 424 else 425 rep->r_flags |= R_MUSTRESEND; 426 } else 427 log(LOG_INFO, "nfsd send error %d\n", error); 428 429 /* 430 * Handle any recoverable (soft) socket errors here. (???) 431 */ 432 if (error != EINTR && error != ERESTART && 433 error != EWOULDBLOCK && error != EPIPE) 434 error = 0; 435 } 436 return (error); 437 } 438 439 /* 440 * Receive a Sun RPC Request/Reply. For SOCK_DGRAM, the work is all 441 * done by soreceive(), but for SOCK_STREAM we must deal with the Record 442 * Mark and consolidate the data into a new mbuf list. 443 * nb: Sometimes TCP passes the data up to soreceive() in long lists of 444 * small mbufs. 445 * For SOCK_STREAM we must be very careful to read an entire record once 446 * we have read any of it, even if the system call has been interrupted. 447 */ 448 nfs_receive(rep, aname, mp) 449 register struct nfsreq *rep; 450 struct mbuf **aname; 451 struct mbuf **mp; 452 { 453 register struct socket *so; 454 struct uio auio; 455 struct iovec aio; 456 register struct mbuf *m; 457 struct mbuf *control; 458 u_long len; 459 struct mbuf **getnam; 460 int error, sotype, rcvflg; 461 struct proc *p = curproc; /* XXX */ 462 463 /* 464 * Set up arguments for soreceive() 465 */ 466 *mp = (struct mbuf *)0; 467 *aname = (struct mbuf *)0; 468 sotype = rep->r_nmp->nm_sotype; 469 470 /* 471 * For reliable protocols, lock against other senders/receivers 472 * in case a reconnect is necessary. 473 * For SOCK_STREAM, first get the Record Mark to find out how much 474 * more there is to get. 475 * We must lock the socket against other receivers 476 * until we have an entire rpc request/reply. 477 */ 478 if (sotype != SOCK_DGRAM) { 479 if (error = nfs_sndlock(&rep->r_nmp->nm_flag, rep)) 480 return (error); 481 tryagain: 482 /* 483 * Check for fatal errors and resending request. 484 */ 485 /* 486 * Ugh: If a reconnect attempt just happened, nm_so 487 * would have changed. NULL indicates a failed 488 * attempt that has essentially shut down this 489 * mount point. 490 */ 491 if (rep->r_mrep || (rep->r_flags & R_SOFTTERM)) { 492 nfs_sndunlock(&rep->r_nmp->nm_flag); 493 return (EINTR); 494 } 495 if ((so = rep->r_nmp->nm_so) == NULL) { 496 if (error = nfs_reconnect(rep)) { 497 nfs_sndunlock(&rep->r_nmp->nm_flag); 498 return (error); 499 } 500 goto tryagain; 501 } 502 while (rep->r_flags & R_MUSTRESEND) { 503 m = m_copym(rep->r_mreq, 0, M_COPYALL, M_WAIT); 504 nfsstats.rpcretries++; 505 if (error = nfs_send(so, rep->r_nmp->nm_nam, m, rep)) { 506 if (error == EINTR || error == ERESTART || 507 (error = nfs_reconnect(rep))) { 508 nfs_sndunlock(&rep->r_nmp->nm_flag); 509 return (error); 510 } 511 goto tryagain; 512 } 513 } 514 nfs_sndunlock(&rep->r_nmp->nm_flag); 515 if (sotype == SOCK_STREAM) { 516 aio.iov_base = (caddr_t) &len; 517 aio.iov_len = sizeof(u_long); 518 auio.uio_iov = &aio; 519 auio.uio_iovcnt = 1; 520 auio.uio_segflg = UIO_SYSSPACE; 521 auio.uio_rw = UIO_READ; 522 auio.uio_offset = 0; 523 auio.uio_resid = sizeof(u_long); 524 auio.uio_procp = p; 525 do { 526 rcvflg = MSG_WAITALL; 527 error = soreceive(so, (struct mbuf **)0, &auio, 528 (struct mbuf **)0, (struct mbuf **)0, &rcvflg); 529 if (error == EWOULDBLOCK && rep) { 530 if (rep->r_flags & R_SOFTTERM) 531 return (EINTR); 532 } 533 } while (error == EWOULDBLOCK); 534 if (!error && auio.uio_resid > 0) { 535 log(LOG_INFO, 536 "short receive (%d/%d) from nfs server %s\n", 537 sizeof(u_long) - auio.uio_resid, 538 sizeof(u_long), 539 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); 540 error = EPIPE; 541 } 542 if (error) 543 goto errout; 544 len = ntohl(len) & ~0x80000000; 545 /* 546 * This is SERIOUS! We are out of sync with the sender 547 * and forcing a disconnect/reconnect is all I can do. 548 */ 549 if (len > NFS_MAXPACKET) { 550 log(LOG_ERR, "%s (%d) from nfs server %s\n", 551 "impossible packet length", 552 len, 553 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); 554 error = EFBIG; 555 goto errout; 556 } 557 auio.uio_resid = len; 558 do { 559 rcvflg = MSG_WAITALL; 560 error = soreceive(so, (struct mbuf **)0, 561 &auio, mp, (struct mbuf **)0, &rcvflg); 562 } while (error == EWOULDBLOCK || error == EINTR || 563 error == ERESTART); 564 if (!error && auio.uio_resid > 0) { 565 log(LOG_INFO, 566 "short receive (%d/%d) from nfs server %s\n", 567 len - auio.uio_resid, len, 568 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); 569 error = EPIPE; 570 } 571 } else { 572 /* 573 * NB: Since uio_resid is big, MSG_WAITALL is ignored 574 * and soreceive() will return when it has either a 575 * control msg or a data msg. 576 * We have no use for control msg., but must grab them 577 * and then throw them away so we know what is going 578 * on. 579 */ 580 auio.uio_resid = len = 100000000; /* Anything Big */ 581 auio.uio_procp = p; 582 do { 583 rcvflg = 0; 584 error = soreceive(so, (struct mbuf **)0, 585 &auio, mp, &control, &rcvflg); 586 if (control) 587 m_freem(control); 588 if (error == EWOULDBLOCK && rep) { 589 if (rep->r_flags & R_SOFTTERM) 590 return (EINTR); 591 } 592 } while (error == EWOULDBLOCK || 593 (!error && *mp == NULL && control)); 594 if ((rcvflg & MSG_EOR) == 0) 595 printf("Egad!!\n"); 596 if (!error && *mp == NULL) 597 error = EPIPE; 598 len -= auio.uio_resid; 599 } 600 errout: 601 if (error && error != EINTR && error != ERESTART) { 602 m_freem(*mp); 603 *mp = (struct mbuf *)0; 604 if (error != EPIPE) 605 log(LOG_INFO, 606 "receive error %d from nfs server %s\n", 607 error, 608 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); 609 error = nfs_sndlock(&rep->r_nmp->nm_flag, rep); 610 if (!error) 611 error = nfs_reconnect(rep); 612 if (!error) 613 goto tryagain; 614 } 615 } else { 616 if ((so = rep->r_nmp->nm_so) == NULL) 617 return (EACCES); 618 if (so->so_state & SS_ISCONNECTED) 619 getnam = (struct mbuf **)0; 620 else 621 getnam = aname; 622 auio.uio_resid = len = 1000000; 623 auio.uio_procp = p; 624 do { 625 rcvflg = 0; 626 error = soreceive(so, getnam, &auio, mp, 627 (struct mbuf **)0, &rcvflg); 628 if (error == EWOULDBLOCK && 629 (rep->r_flags & R_SOFTTERM)) 630 return (EINTR); 631 } while (error == EWOULDBLOCK); 632 len -= auio.uio_resid; 633 } 634 if (error) { 635 m_freem(*mp); 636 *mp = (struct mbuf *)0; 637 } 638 /* 639 * Search for any mbufs that are not a multiple of 4 bytes long 640 * or with m_data not longword aligned. 641 * These could cause pointer alignment problems, so copy them to 642 * well aligned mbufs. 643 */ 644 nfs_realign(*mp, 5 * NFSX_UNSIGNED); 645 return (error); 646 } 647 648 /* 649 * Implement receipt of reply on a socket. 650 * We must search through the list of received datagrams matching them 651 * with outstanding requests using the xid, until ours is found. 652 */ 653 /* ARGSUSED */ 654 nfs_reply(myrep) 655 struct nfsreq *myrep; 656 { 657 register struct nfsreq *rep; 658 register struct nfsmount *nmp = myrep->r_nmp; 659 register long t1; 660 struct mbuf *mrep, *nam, *md; 661 u_long rxid, *tl; 662 caddr_t dpos, cp2; 663 int error; 664 665 /* 666 * Loop around until we get our own reply 667 */ 668 for (;;) { 669 /* 670 * Lock against other receivers so that I don't get stuck in 671 * sbwait() after someone else has received my reply for me. 672 * Also necessary for connection based protocols to avoid 673 * race conditions during a reconnect. 674 */ 675 if (error = nfs_rcvlock(myrep)) 676 return (error); 677 /* Already received, bye bye */ 678 if (myrep->r_mrep != NULL) { 679 nfs_rcvunlock(&nmp->nm_flag); 680 return (0); 681 } 682 /* 683 * Get the next Rpc reply off the socket 684 */ 685 error = nfs_receive(myrep, &nam, &mrep); 686 nfs_rcvunlock(&nmp->nm_flag); 687 if (error) printf("rcv err=%d\n",error); 688 if (error) { 689 690 /* 691 * Ignore routing errors on connectionless protocols?? 692 */ 693 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) { 694 nmp->nm_so->so_error = 0; 695 continue; 696 } 697 return (error); 698 } 699 if (nam) 700 m_freem(nam); 701 702 /* 703 * Get the xid and check that it is an rpc reply 704 */ 705 md = mrep; 706 dpos = mtod(md, caddr_t); 707 nfsm_dissect(tl, u_long *, 2*NFSX_UNSIGNED); 708 rxid = *tl++; 709 if (*tl != rpc_reply) { 710 if (nmp->nm_flag & NFSMNT_NQNFS) { 711 if (nqnfs_callback(nmp, mrep, md, dpos)) 712 nfsstats.rpcinvalid++; 713 } else { 714 nfsstats.rpcinvalid++; 715 m_freem(mrep); 716 } 717 nfsmout: 718 continue; 719 } 720 721 /* 722 * Loop through the request list to match up the reply 723 * Iff no match, just drop the datagram 724 */ 725 rep = nfsreqh.r_next; 726 while (rep != &nfsreqh) { 727 if (rep->r_mrep == NULL && rxid == rep->r_xid) { 728 /* Found it.. */ 729 rep->r_mrep = mrep; 730 rep->r_md = md; 731 rep->r_dpos = dpos; 732 if (nfsrtton) { 733 struct rttl *rt; 734 735 rt = &nfsrtt.rttl[nfsrtt.pos]; 736 rt->proc = rep->r_procnum; 737 rt->rto = NFS_RTO(nmp, proct[rep->r_procnum]); 738 rt->sent = nmp->nm_sent; 739 rt->cwnd = nmp->nm_cwnd; 740 rt->srtt = nmp->nm_srtt[proct[rep->r_procnum] - 1]; 741 rt->sdrtt = nmp->nm_sdrtt[proct[rep->r_procnum] - 1]; 742 rt->fsid = nmp->nm_mountp->mnt_stat.f_fsid; 743 rt->tstamp = time; 744 if (rep->r_flags & R_TIMING) 745 rt->rtt = rep->r_rtt; 746 else 747 rt->rtt = 1000000; 748 nfsrtt.pos = (nfsrtt.pos + 1) % NFSRTTLOGSIZ; 749 } 750 /* 751 * Update congestion window. 752 * Do the additive increase of 753 * one rpc/rtt. 754 */ 755 if (nmp->nm_cwnd <= nmp->nm_sent) { 756 nmp->nm_cwnd += 757 (NFS_CWNDSCALE * NFS_CWNDSCALE + 758 (nmp->nm_cwnd >> 1)) / nmp->nm_cwnd; 759 if (nmp->nm_cwnd > NFS_MAXCWND) 760 nmp->nm_cwnd = NFS_MAXCWND; 761 } 762 nmp->nm_sent -= NFS_CWNDSCALE; 763 /* 764 * Update rtt using a gain of 0.125 on the mean 765 * and a gain of 0.25 on the deviation. 766 */ 767 if (rep->r_flags & R_TIMING) { 768 /* 769 * Since the timer resolution of 770 * NFS_HZ is so course, it can often 771 * result in r_rtt == 0. Since 772 * r_rtt == N means that the actual 773 * rtt is between N+dt and N+2-dt ticks, 774 * add 1. 775 */ 776 t1 = rep->r_rtt + 1; 777 t1 -= (NFS_SRTT(rep) >> 3); 778 NFS_SRTT(rep) += t1; 779 if (t1 < 0) 780 t1 = -t1; 781 t1 -= (NFS_SDRTT(rep) >> 2); 782 NFS_SDRTT(rep) += t1; 783 } 784 nmp->nm_timeouts = 0; 785 break; 786 } 787 rep = rep->r_next; 788 } 789 /* 790 * If not matched to a request, drop it. 791 * If it's mine, get out. 792 */ 793 if (rep == &nfsreqh) { 794 nfsstats.rpcunexpected++; 795 m_freem(mrep); 796 } else if (rep == myrep) { 797 if (rep->r_mrep == NULL) 798 panic("nfsreply nil"); 799 return (0); 800 } 801 } 802 } 803 804 /* 805 * nfs_request - goes something like this 806 * - fill in request struct 807 * - links it into list 808 * - calls nfs_send() for first transmit 809 * - calls nfs_receive() to get reply 810 * - break down rpc header and return with nfs reply pointed to 811 * by mrep or error 812 * nb: always frees up mreq mbuf list 813 */ 814 nfs_request(vp, mrest, procnum, procp, cred, mrp, mdp, dposp) 815 struct vnode *vp; 816 struct mbuf *mrest; 817 int procnum; 818 struct proc *procp; 819 struct ucred *cred; 820 struct mbuf **mrp; 821 struct mbuf **mdp; 822 caddr_t *dposp; 823 { 824 register struct mbuf *m, *mrep; 825 register struct nfsreq *rep; 826 register u_long *tl; 827 register int i; 828 struct nfsmount *nmp; 829 struct mbuf *md, *mheadend; 830 struct nfsreq *reph; 831 struct nfsnode *tp, *np; 832 time_t reqtime, waituntil; 833 caddr_t dpos, cp2; 834 int t1, nqlflag, cachable, s, error = 0, mrest_len, auth_len, auth_type; 835 int trylater_delay = NQ_TRYLATERDEL, trylater_cnt = 0, failed_auth = 0; 836 u_long xid; 837 char *auth_str; 838 839 nmp = VFSTONFS(vp->v_mount); 840 MALLOC(rep, struct nfsreq *, sizeof(struct nfsreq), M_NFSREQ, M_WAITOK); 841 rep->r_nmp = nmp; 842 rep->r_vp = vp; 843 rep->r_procp = procp; 844 rep->r_procnum = procnum; 845 i = 0; 846 m = mrest; 847 while (m) { 848 i += m->m_len; 849 m = m->m_next; 850 } 851 mrest_len = i; 852 853 /* 854 * Get the RPC header with authorization. 855 */ 856 kerbauth: 857 auth_str = (char *)0; 858 if (nmp->nm_flag & NFSMNT_KERB) { 859 if (failed_auth) { 860 error = nfs_getauth(nmp, rep, cred, &auth_type, 861 &auth_str, &auth_len); 862 if (error) { 863 free((caddr_t)rep, M_NFSREQ); 864 m_freem(mrest); 865 return (error); 866 } 867 } else { 868 auth_type = RPCAUTH_UNIX; 869 auth_len = 5 * NFSX_UNSIGNED; 870 } 871 } else { 872 auth_type = RPCAUTH_UNIX; 873 if (cred->cr_ngroups < 1) 874 panic("nfsreq nogrps"); 875 auth_len = ((((cred->cr_ngroups - 1) > nmp->nm_numgrps) ? 876 nmp->nm_numgrps : (cred->cr_ngroups - 1)) << 2) + 877 5 * NFSX_UNSIGNED; 878 } 879 m = nfsm_rpchead(cred, (nmp->nm_flag & NFSMNT_NQNFS), procnum, 880 auth_type, auth_len, auth_str, mrest, mrest_len, &mheadend, &xid); 881 if (auth_str) 882 free(auth_str, M_TEMP); 883 884 /* 885 * For stream protocols, insert a Sun RPC Record Mark. 886 */ 887 if (nmp->nm_sotype == SOCK_STREAM) { 888 M_PREPEND(m, NFSX_UNSIGNED, M_WAIT); 889 *mtod(m, u_long *) = htonl(0x80000000 | 890 (m->m_pkthdr.len - NFSX_UNSIGNED)); 891 } 892 rep->r_mreq = m; 893 rep->r_xid = xid; 894 tryagain: 895 if (nmp->nm_flag & NFSMNT_SOFT) 896 rep->r_retry = nmp->nm_retry; 897 else 898 rep->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */ 899 rep->r_rtt = rep->r_rexmit = 0; 900 if (proct[procnum] > 0) 901 rep->r_flags = R_TIMING; 902 else 903 rep->r_flags = 0; 904 rep->r_mrep = NULL; 905 906 /* 907 * Do the client side RPC. 908 */ 909 nfsstats.rpcrequests++; 910 /* 911 * Chain request into list of outstanding requests. Be sure 912 * to put it LAST so timer finds oldest requests first. 913 */ 914 s = splsoftclock(); 915 reph = &nfsreqh; 916 reph->r_prev->r_next = rep; 917 rep->r_prev = reph->r_prev; 918 reph->r_prev = rep; 919 rep->r_next = reph; 920 921 /* Get send time for nqnfs */ 922 reqtime = time.tv_sec; 923 924 /* 925 * If backing off another request or avoiding congestion, don't 926 * send this one now but let timer do it. If not timing a request, 927 * do it now. 928 */ 929 if (nmp->nm_so && (nmp->nm_sotype != SOCK_DGRAM || 930 (nmp->nm_flag & NFSMNT_DUMBTIMR) || 931 nmp->nm_sent < nmp->nm_cwnd)) { 932 splx(s); 933 if (nmp->nm_soflags & PR_CONNREQUIRED) 934 error = nfs_sndlock(&nmp->nm_flag, rep); 935 if (!error) { 936 m = m_copym(m, 0, M_COPYALL, M_WAIT); 937 error = nfs_send(nmp->nm_so, nmp->nm_nam, m, rep); 938 if (nmp->nm_soflags & PR_CONNREQUIRED) 939 nfs_sndunlock(&nmp->nm_flag); 940 } 941 if (!error && (rep->r_flags & R_MUSTRESEND) == 0) { 942 nmp->nm_sent += NFS_CWNDSCALE; 943 rep->r_flags |= R_SENT; 944 } 945 } else { 946 splx(s); 947 rep->r_rtt = -1; 948 } 949 950 /* 951 * Wait for the reply from our send or the timer's. 952 */ 953 if (!error || error == EPIPE) 954 error = nfs_reply(rep); 955 956 /* 957 * RPC done, unlink the request. 958 */ 959 s = splsoftclock(); 960 rep->r_prev->r_next = rep->r_next; 961 rep->r_next->r_prev = rep->r_prev; 962 splx(s); 963 964 /* 965 * If there was a successful reply and a tprintf msg. 966 * tprintf a response. 967 */ 968 if (!error && (rep->r_flags & R_TPRINTFMSG)) 969 nfs_msg(rep->r_procp, nmp->nm_mountp->mnt_stat.f_mntfromname, 970 "is alive again"); 971 mrep = rep->r_mrep; 972 md = rep->r_md; 973 dpos = rep->r_dpos; 974 if (error) { 975 m_freem(rep->r_mreq); 976 free((caddr_t)rep, M_NFSREQ); 977 return (error); 978 } 979 980 /* 981 * break down the rpc header and check if ok 982 */ 983 nfsm_dissect(tl, u_long *, 3*NFSX_UNSIGNED); 984 if (*tl++ == rpc_msgdenied) { 985 if (*tl == rpc_mismatch) 986 error = EOPNOTSUPP; 987 else if ((nmp->nm_flag & NFSMNT_KERB) && *tl++ == rpc_autherr) { 988 if (*tl == rpc_rejectedcred && failed_auth == 0) { 989 failed_auth++; 990 mheadend->m_next = (struct mbuf *)0; 991 m_freem(mrep); 992 m_freem(rep->r_mreq); 993 goto kerbauth; 994 } else 995 error = EAUTH; 996 } else 997 error = EACCES; 998 m_freem(mrep); 999 m_freem(rep->r_mreq); 1000 free((caddr_t)rep, M_NFSREQ); 1001 return (error); 1002 } 1003 1004 /* 1005 * skip over the auth_verf, someday we may want to cache auth_short's 1006 * for nfs_reqhead(), but for now just dump it 1007 */ 1008 if (*++tl != 0) { 1009 i = nfsm_rndup(fxdr_unsigned(long, *tl)); 1010 nfsm_adv(i); 1011 } 1012 nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); 1013 /* 0 == ok */ 1014 if (*tl == 0) { 1015 nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); 1016 if (*tl != 0) { 1017 error = fxdr_unsigned(int, *tl); 1018 m_freem(mrep); 1019 if ((nmp->nm_flag & NFSMNT_NQNFS) && 1020 error == NQNFS_TRYLATER) { 1021 error = 0; 1022 waituntil = time.tv_sec + trylater_delay; 1023 while (time.tv_sec < waituntil) 1024 (void) tsleep((caddr_t)&lbolt, 1025 PSOCK, "nqnfstry", 0); 1026 trylater_delay *= nfs_backoff[trylater_cnt]; 1027 if (trylater_cnt < 7) 1028 trylater_cnt++; 1029 goto tryagain; 1030 } 1031 m_freem(rep->r_mreq); 1032 free((caddr_t)rep, M_NFSREQ); 1033 return (error); 1034 } 1035 1036 /* 1037 * For nqnfs, get any lease in reply 1038 */ 1039 if (nmp->nm_flag & NFSMNT_NQNFS) { 1040 nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); 1041 if (*tl) { 1042 np = VTONFS(vp); 1043 nqlflag = fxdr_unsigned(int, *tl); 1044 nfsm_dissect(tl, u_long *, 4*NFSX_UNSIGNED); 1045 cachable = fxdr_unsigned(int, *tl++); 1046 reqtime += fxdr_unsigned(int, *tl++); 1047 if (reqtime > time.tv_sec) { 1048 if (np->n_tnext) { 1049 if (np->n_tnext == (struct nfsnode *)nmp) 1050 nmp->nm_tprev = np->n_tprev; 1051 else 1052 np->n_tnext->n_tprev = np->n_tprev; 1053 if (np->n_tprev == (struct nfsnode *)nmp) 1054 nmp->nm_tnext = np->n_tnext; 1055 else 1056 np->n_tprev->n_tnext = np->n_tnext; 1057 if (nqlflag == NQL_WRITE) 1058 np->n_flag |= NQNFSWRITE; 1059 } else if (nqlflag == NQL_READ) 1060 np->n_flag &= ~NQNFSWRITE; 1061 else 1062 np->n_flag |= NQNFSWRITE; 1063 if (cachable) 1064 np->n_flag &= ~NQNFSNONCACHE; 1065 else 1066 np->n_flag |= NQNFSNONCACHE; 1067 np->n_expiry = reqtime; 1068 fxdr_hyper(tl, &np->n_lrev); 1069 tp = nmp->nm_tprev; 1070 while (tp != (struct nfsnode *)nmp && 1071 tp->n_expiry > np->n_expiry) 1072 tp = tp->n_tprev; 1073 if (tp == (struct nfsnode *)nmp) { 1074 np->n_tnext = nmp->nm_tnext; 1075 nmp->nm_tnext = np; 1076 } else { 1077 np->n_tnext = tp->n_tnext; 1078 tp->n_tnext = np; 1079 } 1080 np->n_tprev = tp; 1081 if (np->n_tnext == (struct nfsnode *)nmp) 1082 nmp->nm_tprev = np; 1083 else 1084 np->n_tnext->n_tprev = np; 1085 } 1086 } 1087 } 1088 *mrp = mrep; 1089 *mdp = md; 1090 *dposp = dpos; 1091 m_freem(rep->r_mreq); 1092 FREE((caddr_t)rep, M_NFSREQ); 1093 return (0); 1094 } 1095 m_freem(mrep); 1096 m_freem(rep->r_mreq); 1097 free((caddr_t)rep, M_NFSREQ); 1098 error = EPROTONOSUPPORT; 1099 nfsmout: 1100 return (error); 1101 } 1102 1103 /* 1104 * Generate the rpc reply header 1105 * siz arg. is used to decide if adding a cluster is worthwhile 1106 */ 1107 nfs_rephead(siz, nd, err, cache, frev, mrq, mbp, bposp) 1108 int siz; 1109 struct nfsd *nd; 1110 int err; 1111 int cache; 1112 u_quad_t *frev; 1113 struct mbuf **mrq; 1114 struct mbuf **mbp; 1115 caddr_t *bposp; 1116 { 1117 register u_long *tl; 1118 register struct mbuf *mreq; 1119 caddr_t bpos; 1120 struct mbuf *mb, *mb2; 1121 1122 MGETHDR(mreq, M_WAIT, MT_DATA); 1123 mb = mreq; 1124 /* 1125 * If this is a big reply, use a cluster else 1126 * try and leave leading space for the lower level headers. 1127 */ 1128 siz += RPC_REPLYSIZ; 1129 if (siz >= MINCLSIZE) { 1130 MCLGET(mreq, M_WAIT); 1131 } else 1132 mreq->m_data += max_hdr; 1133 tl = mtod(mreq, u_long *); 1134 mreq->m_len = 6*NFSX_UNSIGNED; 1135 bpos = ((caddr_t)tl)+mreq->m_len; 1136 *tl++ = nd->nd_retxid; 1137 *tl++ = rpc_reply; 1138 if (err == ERPCMISMATCH || err == NQNFS_AUTHERR) { 1139 *tl++ = rpc_msgdenied; 1140 if (err == NQNFS_AUTHERR) { 1141 *tl++ = rpc_autherr; 1142 *tl = rpc_rejectedcred; 1143 mreq->m_len -= NFSX_UNSIGNED; 1144 bpos -= NFSX_UNSIGNED; 1145 } else { 1146 *tl++ = rpc_mismatch; 1147 *tl++ = txdr_unsigned(2); 1148 *tl = txdr_unsigned(2); 1149 } 1150 } else { 1151 *tl++ = rpc_msgaccepted; 1152 *tl++ = 0; 1153 *tl++ = 0; 1154 switch (err) { 1155 case EPROGUNAVAIL: 1156 *tl = txdr_unsigned(RPC_PROGUNAVAIL); 1157 break; 1158 case EPROGMISMATCH: 1159 *tl = txdr_unsigned(RPC_PROGMISMATCH); 1160 nfsm_build(tl, u_long *, 2*NFSX_UNSIGNED); 1161 *tl++ = txdr_unsigned(2); 1162 *tl = txdr_unsigned(2); /* someday 3 */ 1163 break; 1164 case EPROCUNAVAIL: 1165 *tl = txdr_unsigned(RPC_PROCUNAVAIL); 1166 break; 1167 default: 1168 *tl = 0; 1169 if (err != VNOVAL) { 1170 nfsm_build(tl, u_long *, NFSX_UNSIGNED); 1171 if (err) 1172 *tl = txdr_unsigned(nfsrv_errmap[err - 1]); 1173 else 1174 *tl = 0; 1175 } 1176 break; 1177 }; 1178 } 1179 1180 /* 1181 * For nqnfs, piggyback lease as requested. 1182 */ 1183 if (nd->nd_nqlflag != NQL_NOVAL && err == 0) { 1184 if (nd->nd_nqlflag) { 1185 nfsm_build(tl, u_long *, 5*NFSX_UNSIGNED); 1186 *tl++ = txdr_unsigned(nd->nd_nqlflag); 1187 *tl++ = txdr_unsigned(cache); 1188 *tl++ = txdr_unsigned(nd->nd_duration); 1189 txdr_hyper(frev, tl); 1190 } else { 1191 if (nd->nd_nqlflag != 0) 1192 panic("nqreph"); 1193 nfsm_build(tl, u_long *, NFSX_UNSIGNED); 1194 *tl = 0; 1195 } 1196 } 1197 *mrq = mreq; 1198 *mbp = mb; 1199 *bposp = bpos; 1200 if (err != 0 && err != VNOVAL) 1201 nfsstats.srvrpc_errs++; 1202 return (0); 1203 } 1204 1205 /* 1206 * Nfs timer routine 1207 * Scan the nfsreq list and retranmit any requests that have timed out 1208 * To avoid retransmission attempts on STREAM sockets (in the future) make 1209 * sure to set the r_retry field to 0 (implies nm_retry == 0). 1210 */ 1211 void 1212 nfs_timer(arg) 1213 void *arg; 1214 { 1215 register struct nfsreq *rep; 1216 register struct mbuf *m; 1217 register struct socket *so; 1218 register struct nfsmount *nmp; 1219 register int timeo; 1220 static long lasttime = 0; 1221 int s, error; 1222 1223 s = splnet(); 1224 for (rep = nfsreqh.r_next; rep != &nfsreqh; rep = rep->r_next) { 1225 nmp = rep->r_nmp; 1226 if (rep->r_mrep || (rep->r_flags & R_SOFTTERM)) 1227 continue; 1228 if (nfs_sigintr(nmp, rep, rep->r_procp)) { 1229 rep->r_flags |= R_SOFTTERM; 1230 continue; 1231 } 1232 if (rep->r_rtt >= 0) { 1233 rep->r_rtt++; 1234 if (nmp->nm_flag & NFSMNT_DUMBTIMR) 1235 timeo = nmp->nm_timeo; 1236 else 1237 timeo = NFS_RTO(nmp, proct[rep->r_procnum]); 1238 if (nmp->nm_timeouts > 0) 1239 timeo *= nfs_backoff[nmp->nm_timeouts - 1]; 1240 if (rep->r_rtt <= timeo) 1241 continue; 1242 if (nmp->nm_timeouts < 8) 1243 nmp->nm_timeouts++; 1244 } 1245 /* 1246 * Check for server not responding 1247 */ 1248 if ((rep->r_flags & R_TPRINTFMSG) == 0 && 1249 rep->r_rexmit > nmp->nm_deadthresh) { 1250 nfs_msg(rep->r_procp, 1251 nmp->nm_mountp->mnt_stat.f_mntfromname, 1252 "not responding"); 1253 rep->r_flags |= R_TPRINTFMSG; 1254 } 1255 if (rep->r_rexmit >= rep->r_retry) { /* too many */ 1256 nfsstats.rpctimeouts++; 1257 rep->r_flags |= R_SOFTTERM; 1258 continue; 1259 } 1260 if (nmp->nm_sotype != SOCK_DGRAM) { 1261 if (++rep->r_rexmit > NFS_MAXREXMIT) 1262 rep->r_rexmit = NFS_MAXREXMIT; 1263 continue; 1264 } 1265 if ((so = nmp->nm_so) == NULL) 1266 continue; 1267 1268 /* 1269 * If there is enough space and the window allows.. 1270 * Resend it 1271 * Set r_rtt to -1 in case we fail to send it now. 1272 */ 1273 rep->r_rtt = -1; 1274 if (sbspace(&so->so_snd) >= rep->r_mreq->m_pkthdr.len && 1275 ((nmp->nm_flag & NFSMNT_DUMBTIMR) || 1276 (rep->r_flags & R_SENT) || 1277 nmp->nm_sent < nmp->nm_cwnd) && 1278 (m = m_copym(rep->r_mreq, 0, M_COPYALL, M_DONTWAIT))){ 1279 if ((nmp->nm_flag & NFSMNT_NOCONN) == 0) 1280 error = (*so->so_proto->pr_usrreq)(so, PRU_SEND, m, 1281 (struct mbuf *)0, (struct mbuf *)0); 1282 else 1283 error = (*so->so_proto->pr_usrreq)(so, PRU_SEND, m, 1284 nmp->nm_nam, (struct mbuf *)0); 1285 if (error) { 1286 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) 1287 so->so_error = 0; 1288 } else { 1289 /* 1290 * Iff first send, start timing 1291 * else turn timing off, backoff timer 1292 * and divide congestion window by 2. 1293 */ 1294 if (rep->r_flags & R_SENT) { 1295 rep->r_flags &= ~R_TIMING; 1296 if (++rep->r_rexmit > NFS_MAXREXMIT) 1297 rep->r_rexmit = NFS_MAXREXMIT; 1298 nmp->nm_cwnd >>= 1; 1299 if (nmp->nm_cwnd < NFS_CWNDSCALE) 1300 nmp->nm_cwnd = NFS_CWNDSCALE; 1301 nfsstats.rpcretries++; 1302 } else { 1303 rep->r_flags |= R_SENT; 1304 nmp->nm_sent += NFS_CWNDSCALE; 1305 } 1306 rep->r_rtt = 0; 1307 } 1308 } 1309 } 1310 1311 /* 1312 * Call the nqnfs server timer once a second to handle leases. 1313 */ 1314 if (lasttime != time.tv_sec) { 1315 lasttime = time.tv_sec; 1316 nqnfs_serverd(); 1317 } 1318 splx(s); 1319 timeout(nfs_timer, (caddr_t)0, hz/NFS_HZ); 1320 } 1321 1322 /* 1323 * Test for a termination condition pending on the process. 1324 * This is used for NFSMNT_INT mounts. 1325 */ 1326 nfs_sigintr(nmp, rep, p) 1327 struct nfsmount *nmp; 1328 struct nfsreq *rep; 1329 register struct proc *p; 1330 { 1331 1332 if (rep && (rep->r_flags & R_SOFTTERM)) 1333 return (EINTR); 1334 if (!(nmp->nm_flag & NFSMNT_INT)) 1335 return (0); 1336 if (p && p->p_sig && (((p->p_sig &~ p->p_sigmask) &~ p->p_sigignore) & 1337 NFSINT_SIGMASK)) 1338 return (EINTR); 1339 return (0); 1340 } 1341 1342 /* 1343 * Lock a socket against others. 1344 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply 1345 * and also to avoid race conditions between the processes with nfs requests 1346 * in progress when a reconnect is necessary. 1347 */ 1348 nfs_sndlock(flagp, rep) 1349 register int *flagp; 1350 struct nfsreq *rep; 1351 { 1352 struct proc *p; 1353 1354 if (rep) 1355 p = rep->r_procp; 1356 else 1357 p = (struct proc *)0; 1358 while (*flagp & NFSMNT_SNDLOCK) { 1359 if (nfs_sigintr(rep->r_nmp, rep, p)) 1360 return (EINTR); 1361 *flagp |= NFSMNT_WANTSND; 1362 (void) tsleep((caddr_t)flagp, PZERO-1, "nfsndlck", 0); 1363 } 1364 *flagp |= NFSMNT_SNDLOCK; 1365 return (0); 1366 } 1367 1368 /* 1369 * Unlock the stream socket for others. 1370 */ 1371 void 1372 nfs_sndunlock(flagp) 1373 register int *flagp; 1374 { 1375 1376 if ((*flagp & NFSMNT_SNDLOCK) == 0) 1377 panic("nfs sndunlock"); 1378 *flagp &= ~NFSMNT_SNDLOCK; 1379 if (*flagp & NFSMNT_WANTSND) { 1380 *flagp &= ~NFSMNT_WANTSND; 1381 wakeup((caddr_t)flagp); 1382 } 1383 } 1384 1385 nfs_rcvlock(rep) 1386 register struct nfsreq *rep; 1387 { 1388 register int *flagp = &rep->r_nmp->nm_flag; 1389 1390 while (*flagp & NFSMNT_RCVLOCK) { 1391 if (nfs_sigintr(rep->r_nmp, rep, rep->r_procp)) 1392 return (EINTR); 1393 *flagp |= NFSMNT_WANTRCV; 1394 (void) tsleep((caddr_t)flagp, PZERO-1, "nfsrcvlck", 0); 1395 } 1396 *flagp |= NFSMNT_RCVLOCK; 1397 return (0); 1398 } 1399 1400 /* 1401 * Unlock the stream socket for others. 1402 */ 1403 void 1404 nfs_rcvunlock(flagp) 1405 register int *flagp; 1406 { 1407 1408 if ((*flagp & NFSMNT_RCVLOCK) == 0) 1409 panic("nfs rcvunlock"); 1410 *flagp &= ~NFSMNT_RCVLOCK; 1411 if (*flagp & NFSMNT_WANTRCV) { 1412 *flagp &= ~NFSMNT_WANTRCV; 1413 wakeup((caddr_t)flagp); 1414 } 1415 } 1416 1417 /* 1418 * Check for badly aligned mbuf data areas and 1419 * realign data in an mbuf list by copying the data areas up, as required. 1420 */ 1421 void 1422 nfs_realign(m, hsiz) 1423 register struct mbuf *m; 1424 int hsiz; 1425 { 1426 register struct mbuf *m2; 1427 register int siz, mlen, olen; 1428 register caddr_t tcp, fcp; 1429 struct mbuf *mnew; 1430 1431 while (m) { 1432 /* 1433 * This never happens for UDP, rarely happens for TCP 1434 * but frequently happens for iso transport. 1435 */ 1436 if ((m->m_len & 0x3) || (mtod(m, int) & 0x3)) { 1437 olen = m->m_len; 1438 fcp = mtod(m, caddr_t); 1439 m->m_flags &= ~M_PKTHDR; 1440 if (m->m_flags & M_EXT) 1441 m->m_data = m->m_ext.ext_buf; 1442 else 1443 m->m_data = m->m_dat; 1444 m->m_len = 0; 1445 tcp = mtod(m, caddr_t); 1446 mnew = m; 1447 m2 = m->m_next; 1448 1449 /* 1450 * If possible, only put the first invariant part 1451 * of the RPC header in the first mbuf. 1452 */ 1453 if (olen <= hsiz) 1454 mlen = hsiz; 1455 else 1456 mlen = M_TRAILINGSPACE(m); 1457 1458 /* 1459 * Loop through the mbuf list consolidating data. 1460 */ 1461 while (m) { 1462 while (olen > 0) { 1463 if (mlen == 0) { 1464 m2->m_flags &= ~M_PKTHDR; 1465 if (m2->m_flags & M_EXT) 1466 m2->m_data = m2->m_ext.ext_buf; 1467 else 1468 m2->m_data = m2->m_dat; 1469 m2->m_len = 0; 1470 mlen = M_TRAILINGSPACE(m2); 1471 tcp = mtod(m2, caddr_t); 1472 mnew = m2; 1473 m2 = m2->m_next; 1474 } 1475 siz = min(mlen, olen); 1476 if (tcp != fcp) 1477 bcopy(fcp, tcp, siz); 1478 mnew->m_len += siz; 1479 mlen -= siz; 1480 olen -= siz; 1481 tcp += siz; 1482 fcp += siz; 1483 } 1484 m = m->m_next; 1485 if (m) { 1486 olen = m->m_len; 1487 fcp = mtod(m, caddr_t); 1488 } 1489 } 1490 1491 /* 1492 * Finally, set m_len == 0 for any trailing mbufs that have 1493 * been copied out of. 1494 */ 1495 while (m2) { 1496 m2->m_len = 0; 1497 m2 = m2->m_next; 1498 } 1499 return; 1500 } 1501 m = m->m_next; 1502 } 1503 } 1504 1505 /* 1506 * Socket upcall routine for the nfsd sockets. 1507 * The caddr_t arg is a pointer to the "struct nfssvc_sock". 1508 * Essentially do as much as possible non-blocking, else punt and it will 1509 * be called with M_WAIT from an nfsd. 1510 */ 1511 void 1512 nfsrv_rcv(so, arg, waitflag) 1513 struct socket *so; 1514 caddr_t arg; 1515 int waitflag; 1516 { 1517 register struct nfssvc_sock *slp = (struct nfssvc_sock *)arg; 1518 register struct mbuf *m; 1519 struct mbuf *mp, *nam; 1520 struct uio auio; 1521 int flags, error; 1522 1523 if ((slp->ns_flag & SLP_VALID) == 0) 1524 return; 1525 #ifdef notdef 1526 /* 1527 * Define this to test for nfsds handling this under heavy load. 1528 */ 1529 if (waitflag == M_DONTWAIT) { 1530 slp->ns_flag |= SLP_NEEDQ; goto dorecs; 1531 } 1532 #endif 1533 auio.uio_procp = NULL; 1534 if (so->so_type == SOCK_STREAM) { 1535 /* 1536 * If there are already records on the queue, defer soreceive() 1537 * to an nfsd so that there is feedback to the TCP layer that 1538 * the nfs servers are heavily loaded. 1539 */ 1540 if (slp->ns_rec && waitflag == M_DONTWAIT) { 1541 slp->ns_flag |= SLP_NEEDQ; 1542 goto dorecs; 1543 } 1544 1545 /* 1546 * Do soreceive(). 1547 */ 1548 auio.uio_resid = 1000000000; 1549 flags = MSG_DONTWAIT; 1550 error = soreceive(so, &nam, &auio, &mp, (struct mbuf **)0, &flags); 1551 if (error || mp == (struct mbuf *)0) { 1552 if (error == EWOULDBLOCK) 1553 slp->ns_flag |= SLP_NEEDQ; 1554 else 1555 slp->ns_flag |= SLP_DISCONN; 1556 goto dorecs; 1557 } 1558 m = mp; 1559 if (slp->ns_rawend) { 1560 slp->ns_rawend->m_next = m; 1561 slp->ns_cc += 1000000000 - auio.uio_resid; 1562 } else { 1563 slp->ns_raw = m; 1564 slp->ns_cc = 1000000000 - auio.uio_resid; 1565 } 1566 while (m->m_next) 1567 m = m->m_next; 1568 slp->ns_rawend = m; 1569 1570 /* 1571 * Now try and parse record(s) out of the raw stream data. 1572 */ 1573 if (error = nfsrv_getstream(slp, waitflag)) { 1574 if (error == EPERM) 1575 slp->ns_flag |= SLP_DISCONN; 1576 else 1577 slp->ns_flag |= SLP_NEEDQ; 1578 } 1579 } else { 1580 do { 1581 auio.uio_resid = 1000000000; 1582 flags = MSG_DONTWAIT; 1583 error = soreceive(so, &nam, &auio, &mp, 1584 (struct mbuf **)0, &flags); 1585 if (mp) { 1586 nfs_realign(mp, 10 * NFSX_UNSIGNED); 1587 if (nam) { 1588 m = nam; 1589 m->m_next = mp; 1590 } else 1591 m = mp; 1592 if (slp->ns_recend) 1593 slp->ns_recend->m_nextpkt = m; 1594 else 1595 slp->ns_rec = m; 1596 slp->ns_recend = m; 1597 m->m_nextpkt = (struct mbuf *)0; 1598 } 1599 if (error) { 1600 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) 1601 && error != EWOULDBLOCK) { 1602 slp->ns_flag |= SLP_DISCONN; 1603 goto dorecs; 1604 } 1605 } 1606 } while (mp); 1607 } 1608 1609 /* 1610 * Now try and process the request records, non-blocking. 1611 */ 1612 dorecs: 1613 if (waitflag == M_DONTWAIT && 1614 (slp->ns_rec || (slp->ns_flag & (SLP_NEEDQ | SLP_DISCONN)))) 1615 nfsrv_wakenfsd(slp); 1616 } 1617 1618 /* 1619 * Try and extract an RPC request from the mbuf data list received on a 1620 * stream socket. The "waitflag" argument indicates whether or not it 1621 * can sleep. 1622 */ 1623 nfsrv_getstream(slp, waitflag) 1624 register struct nfssvc_sock *slp; 1625 int waitflag; 1626 { 1627 register struct mbuf *m; 1628 register char *cp1, *cp2; 1629 register int len; 1630 struct mbuf *om, *m2, *recm; 1631 u_long recmark; 1632 1633 if (slp->ns_flag & SLP_GETSTREAM) 1634 panic("nfs getstream"); 1635 slp->ns_flag |= SLP_GETSTREAM; 1636 for (;;) { 1637 if (slp->ns_reclen == 0) { 1638 if (slp->ns_cc < NFSX_UNSIGNED) { 1639 slp->ns_flag &= ~SLP_GETSTREAM; 1640 return (0); 1641 } 1642 m = slp->ns_raw; 1643 if (m->m_len >= NFSX_UNSIGNED) { 1644 bcopy(mtod(m, caddr_t), (caddr_t)&recmark, NFSX_UNSIGNED); 1645 m->m_data += NFSX_UNSIGNED; 1646 m->m_len -= NFSX_UNSIGNED; 1647 } else { 1648 cp1 = (caddr_t)&recmark; 1649 cp2 = mtod(m, caddr_t); 1650 while (cp1 < ((caddr_t)&recmark) + NFSX_UNSIGNED) { 1651 while (m->m_len == 0) { 1652 m = m->m_next; 1653 cp2 = mtod(m, caddr_t); 1654 } 1655 *cp1++ = *cp2++; 1656 m->m_data++; 1657 m->m_len--; 1658 } 1659 } 1660 slp->ns_cc -= NFSX_UNSIGNED; 1661 slp->ns_reclen = ntohl(recmark) & ~0x80000000; 1662 if (slp->ns_reclen < NFS_MINPACKET || slp->ns_reclen > NFS_MAXPACKET) { 1663 slp->ns_flag &= ~SLP_GETSTREAM; 1664 return (EPERM); 1665 } 1666 } 1667 1668 /* 1669 * Now get the record part. 1670 */ 1671 if (slp->ns_cc == slp->ns_reclen) { 1672 recm = slp->ns_raw; 1673 slp->ns_raw = slp->ns_rawend = (struct mbuf *)0; 1674 slp->ns_cc = slp->ns_reclen = 0; 1675 } else if (slp->ns_cc > slp->ns_reclen) { 1676 len = 0; 1677 m = slp->ns_raw; 1678 om = (struct mbuf *)0; 1679 while (len < slp->ns_reclen) { 1680 if ((len + m->m_len) > slp->ns_reclen) { 1681 m2 = m_copym(m, 0, slp->ns_reclen - len, 1682 waitflag); 1683 if (m2) { 1684 if (om) { 1685 om->m_next = m2; 1686 recm = slp->ns_raw; 1687 } else 1688 recm = m2; 1689 m->m_data += slp->ns_reclen - len; 1690 m->m_len -= slp->ns_reclen - len; 1691 len = slp->ns_reclen; 1692 } else { 1693 slp->ns_flag &= ~SLP_GETSTREAM; 1694 return (EWOULDBLOCK); 1695 } 1696 } else if ((len + m->m_len) == slp->ns_reclen) { 1697 om = m; 1698 len += m->m_len; 1699 m = m->m_next; 1700 recm = slp->ns_raw; 1701 om->m_next = (struct mbuf *)0; 1702 } else { 1703 om = m; 1704 len += m->m_len; 1705 m = m->m_next; 1706 } 1707 } 1708 slp->ns_raw = m; 1709 slp->ns_cc -= len; 1710 slp->ns_reclen = 0; 1711 } else { 1712 slp->ns_flag &= ~SLP_GETSTREAM; 1713 return (0); 1714 } 1715 nfs_realign(recm, 10 * NFSX_UNSIGNED); 1716 if (slp->ns_recend) 1717 slp->ns_recend->m_nextpkt = recm; 1718 else 1719 slp->ns_rec = recm; 1720 slp->ns_recend = recm; 1721 } 1722 } 1723 1724 /* 1725 * Parse an RPC header. 1726 */ 1727 nfsrv_dorec(slp, nd) 1728 register struct nfssvc_sock *slp; 1729 register struct nfsd *nd; 1730 { 1731 register struct mbuf *m; 1732 int error; 1733 1734 if ((slp->ns_flag & SLP_VALID) == 0 || 1735 (m = slp->ns_rec) == (struct mbuf *)0) 1736 return (ENOBUFS); 1737 if (slp->ns_rec = m->m_nextpkt) 1738 m->m_nextpkt = (struct mbuf *)0; 1739 else 1740 slp->ns_recend = (struct mbuf *)0; 1741 if (m->m_type == MT_SONAME) { 1742 nd->nd_nam = m; 1743 nd->nd_md = nd->nd_mrep = m->m_next; 1744 m->m_next = (struct mbuf *)0; 1745 } else { 1746 nd->nd_nam = (struct mbuf *)0; 1747 nd->nd_md = nd->nd_mrep = m; 1748 } 1749 nd->nd_dpos = mtod(nd->nd_md, caddr_t); 1750 if (error = nfs_getreq(nd, TRUE)) { 1751 m_freem(nd->nd_nam); 1752 return (error); 1753 } 1754 return (0); 1755 } 1756 1757 /* 1758 * Parse an RPC request 1759 * - verify it 1760 * - fill in the cred struct. 1761 */ 1762 nfs_getreq(nd, has_header) 1763 register struct nfsd *nd; 1764 int has_header; 1765 { 1766 register int len, i; 1767 register u_long *tl; 1768 register long t1; 1769 struct uio uio; 1770 struct iovec iov; 1771 caddr_t dpos, cp2; 1772 u_long nfsvers, auth_type; 1773 int error = 0, nqnfs = 0; 1774 struct mbuf *mrep, *md; 1775 1776 mrep = nd->nd_mrep; 1777 md = nd->nd_md; 1778 dpos = nd->nd_dpos; 1779 if (has_header) { 1780 nfsm_dissect(tl, u_long *, 10*NFSX_UNSIGNED); 1781 nd->nd_retxid = *tl++; 1782 if (*tl++ != rpc_call) { 1783 m_freem(mrep); 1784 return (EBADRPC); 1785 } 1786 } else { 1787 nfsm_dissect(tl, u_long *, 8*NFSX_UNSIGNED); 1788 } 1789 nd->nd_repstat = 0; 1790 if (*tl++ != rpc_vers) { 1791 nd->nd_repstat = ERPCMISMATCH; 1792 nd->nd_procnum = NFSPROC_NOOP; 1793 return (0); 1794 } 1795 nfsvers = nfs_vers; 1796 if (*tl != nfs_prog) { 1797 if (*tl == nqnfs_prog) { 1798 nqnfs++; 1799 nfsvers = nqnfs_vers; 1800 } else { 1801 nd->nd_repstat = EPROGUNAVAIL; 1802 nd->nd_procnum = NFSPROC_NOOP; 1803 return (0); 1804 } 1805 } 1806 tl++; 1807 if (*tl++ != nfsvers) { 1808 nd->nd_repstat = EPROGMISMATCH; 1809 nd->nd_procnum = NFSPROC_NOOP; 1810 return (0); 1811 } 1812 nd->nd_procnum = fxdr_unsigned(u_long, *tl++); 1813 if (nd->nd_procnum == NFSPROC_NULL) 1814 return (0); 1815 if (nd->nd_procnum >= NFS_NPROCS || 1816 (!nqnfs && nd->nd_procnum > NFSPROC_STATFS) || 1817 (*tl != rpc_auth_unix && *tl != rpc_auth_kerb)) { 1818 nd->nd_repstat = EPROCUNAVAIL; 1819 nd->nd_procnum = NFSPROC_NOOP; 1820 return (0); 1821 } 1822 auth_type = *tl++; 1823 len = fxdr_unsigned(int, *tl++); 1824 if (len < 0 || len > RPCAUTH_MAXSIZ) { 1825 m_freem(mrep); 1826 return (EBADRPC); 1827 } 1828 1829 /* 1830 * Handle auth_unix or auth_kerb. 1831 */ 1832 if (auth_type == rpc_auth_unix) { 1833 len = fxdr_unsigned(int, *++tl); 1834 if (len < 0 || len > NFS_MAXNAMLEN) { 1835 m_freem(mrep); 1836 return (EBADRPC); 1837 } 1838 nfsm_adv(nfsm_rndup(len)); 1839 nfsm_dissect(tl, u_long *, 3*NFSX_UNSIGNED); 1840 nd->nd_cr.cr_uid = fxdr_unsigned(uid_t, *tl++); 1841 nd->nd_cr.cr_gid = fxdr_unsigned(gid_t, *tl++); 1842 len = fxdr_unsigned(int, *tl); 1843 if (len < 0 || len > RPCAUTH_UNIXGIDS) { 1844 m_freem(mrep); 1845 return (EBADRPC); 1846 } 1847 nfsm_dissect(tl, u_long *, (len + 2)*NFSX_UNSIGNED); 1848 for (i = 1; i <= len; i++) 1849 if (i < NGROUPS) 1850 nd->nd_cr.cr_groups[i] = fxdr_unsigned(gid_t, *tl++); 1851 else 1852 tl++; 1853 nd->nd_cr.cr_ngroups = (len >= NGROUPS) ? NGROUPS : (len + 1); 1854 } else if (auth_type == rpc_auth_kerb) { 1855 nd->nd_cr.cr_uid = fxdr_unsigned(uid_t, *tl++); 1856 nd->nd_authlen = fxdr_unsigned(int, *tl); 1857 iov.iov_len = uio.uio_resid = nfsm_rndup(nd->nd_authlen); 1858 if (uio.uio_resid > (len - 2*NFSX_UNSIGNED)) { 1859 m_freem(mrep); 1860 return (EBADRPC); 1861 } 1862 uio.uio_offset = 0; 1863 uio.uio_iov = &iov; 1864 uio.uio_iovcnt = 1; 1865 uio.uio_segflg = UIO_SYSSPACE; 1866 iov.iov_base = (caddr_t)nd->nd_authstr; 1867 nfsm_mtouio(&uio, uio.uio_resid); 1868 nfsm_dissect(tl, u_long *, 2*NFSX_UNSIGNED); 1869 nd->nd_flag |= NFSD_NEEDAUTH; 1870 } 1871 1872 /* 1873 * Do we have any use for the verifier. 1874 * According to the "Remote Procedure Call Protocol Spec." it 1875 * should be AUTH_NULL, but some clients make it AUTH_UNIX? 1876 * For now, just skip over it 1877 */ 1878 len = fxdr_unsigned(int, *++tl); 1879 if (len < 0 || len > RPCAUTH_MAXSIZ) { 1880 m_freem(mrep); 1881 return (EBADRPC); 1882 } 1883 if (len > 0) { 1884 nfsm_adv(nfsm_rndup(len)); 1885 } 1886 1887 /* 1888 * For nqnfs, get piggybacked lease request. 1889 */ 1890 if (nqnfs && nd->nd_procnum != NQNFSPROC_EVICTED) { 1891 nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); 1892 nd->nd_nqlflag = fxdr_unsigned(int, *tl); 1893 if (nd->nd_nqlflag) { 1894 nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); 1895 nd->nd_duration = fxdr_unsigned(int, *tl); 1896 } else 1897 nd->nd_duration = NQ_MINLEASE; 1898 } else { 1899 nd->nd_nqlflag = NQL_NOVAL; 1900 nd->nd_duration = NQ_MINLEASE; 1901 } 1902 nd->nd_md = md; 1903 nd->nd_dpos = dpos; 1904 return (0); 1905 nfsmout: 1906 return (error); 1907 } 1908 1909 /* 1910 * Search for a sleeping nfsd and wake it up. 1911 * SIDE EFFECT: If none found, set NFSD_CHECKSLP flag, so that one of the 1912 * running nfsds will go look for the work in the nfssvc_sock list. 1913 */ 1914 void 1915 nfsrv_wakenfsd(slp) 1916 struct nfssvc_sock *slp; 1917 { 1918 register struct nfsd *nd = nfsd_head.nd_next; 1919 1920 if ((slp->ns_flag & SLP_VALID) == 0) 1921 return; 1922 while (nd != (struct nfsd *)&nfsd_head) { 1923 if (nd->nd_flag & NFSD_WAITING) { 1924 nd->nd_flag &= ~NFSD_WAITING; 1925 if (nd->nd_slp) 1926 panic("nfsd wakeup"); 1927 slp->ns_sref++; 1928 nd->nd_slp = slp; 1929 wakeup((caddr_t)nd); 1930 return; 1931 } 1932 nd = nd->nd_next; 1933 } 1934 slp->ns_flag |= SLP_DOREC; 1935 nfsd_head.nd_flag |= NFSD_CHECKSLP; 1936 } 1937 1938 nfs_msg(p, server, msg) 1939 struct proc *p; 1940 char *server, *msg; 1941 { 1942 tpr_t tpr; 1943 1944 if (p) 1945 tpr = tprintf_open(p); 1946 else 1947 tpr = NULL; 1948 tprintf(tpr, "nfs server %s: %s\n", server, msg); 1949 tprintf_close(tpr); 1950 } 1951