1 /* 2 * Copyright (c) 1989, 1991 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * %sccs.include.redist.c% 9 * 10 * @(#)nfs_socket.c 7.40 (Berkeley) 10/22/92 11 */ 12 13 /* 14 * Socket operations for use by nfs 15 */ 16 17 #include <sys/param.h> 18 #include <sys/systm.h> 19 #include <sys/proc.h> 20 #include <sys/mount.h> 21 #include <sys/kernel.h> 22 #include <sys/mbuf.h> 23 #include <sys/vnode.h> 24 #include <sys/domain.h> 25 #include <sys/protosw.h> 26 #include <sys/socket.h> 27 #include <sys/socketvar.h> 28 #include <sys/syslog.h> 29 #include <sys/tprintf.h> 30 31 #include <netinet/in.h> 32 #include <netinet/tcp.h> 33 #include <nfs/rpcv2.h> 34 #include <nfs/nfsv2.h> 35 #include <nfs/nfs.h> 36 #include <nfs/xdr_subs.h> 37 #include <nfs/nfsm_subs.h> 38 #include <nfs/nfsmount.h> 39 #include <nfs/nfsnode.h> 40 #include <nfs/nfsrtt.h> 41 #include <nfs/nqnfs.h> 42 43 #define TRUE 1 44 #define FALSE 0 45 46 /* 47 * Estimate rto for an nfs rpc sent via. an unreliable datagram. 48 * Use the mean and mean deviation of rtt for the appropriate type of rpc 49 * for the frequent rpcs and a default for the others. 50 * The justification for doing "other" this way is that these rpcs 51 * happen so infrequently that timer est. would probably be stale. 52 * Also, since many of these rpcs are 53 * non-idempotent, a conservative timeout is desired. 54 * getattr, lookup - A+2D 55 * read, write - A+4D 56 * other - nm_timeo 57 */ 58 #define NFS_RTO(n, t) \ 59 ((t) == 0 ? (n)->nm_timeo : \ 60 ((t) < 3 ? \ 61 (((((n)->nm_srtt[t-1] + 3) >> 2) + (n)->nm_sdrtt[t-1] + 1) >> 1) : \ 62 ((((n)->nm_srtt[t-1] + 7) >> 3) + (n)->nm_sdrtt[t-1] + 1))) 63 #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum] - 1] 64 #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum] - 1] 65 /* 66 * External data, mostly RPC constants in XDR form 67 */ 68 extern u_long rpc_reply, rpc_msgdenied, rpc_mismatch, rpc_vers, rpc_auth_unix, 69 rpc_msgaccepted, rpc_call, rpc_autherr, rpc_rejectedcred, 70 rpc_auth_kerb; 71 extern u_long nfs_prog, nfs_vers, nqnfs_prog, nqnfs_vers; 72 extern time_t nqnfsstarttime; 73 extern int nonidempotent[NFS_NPROCS]; 74 75 /* 76 * Maps errno values to nfs error numbers. 77 * Use NFSERR_IO as the catch all for ones not specifically defined in 78 * RFC 1094. 79 */ 80 static int nfsrv_errmap[ELAST] = { 81 NFSERR_PERM, NFSERR_NOENT, NFSERR_IO, NFSERR_IO, NFSERR_IO, 82 NFSERR_NXIO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 83 NFSERR_IO, NFSERR_IO, NFSERR_ACCES, NFSERR_IO, NFSERR_IO, 84 NFSERR_IO, NFSERR_EXIST, NFSERR_IO, NFSERR_NODEV, NFSERR_NOTDIR, 85 NFSERR_ISDIR, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 86 NFSERR_IO, NFSERR_FBIG, NFSERR_NOSPC, NFSERR_IO, NFSERR_ROFS, 87 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 88 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 89 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 90 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 91 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 92 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 93 NFSERR_IO, NFSERR_IO, NFSERR_NAMETOL, NFSERR_IO, NFSERR_IO, 94 NFSERR_NOTEMPTY, NFSERR_IO, NFSERR_IO, NFSERR_DQUOT, NFSERR_STALE, 95 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 96 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 97 NFSERR_IO, 98 }; 99 100 /* 101 * Defines which timer to use for the procnum. 102 * 0 - default 103 * 1 - getattr 104 * 2 - lookup 105 * 3 - read 106 * 4 - write 107 */ 108 static int proct[NFS_NPROCS] = { 109 0, 1, 0, 0, 2, 3, 3, 0, 4, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 110 }; 111 112 /* 113 * There is a congestion window for outstanding rpcs maintained per mount 114 * point. The cwnd size is adjusted in roughly the way that: 115 * Van Jacobson, Congestion avoidance and Control, In "Proceedings of 116 * SIGCOMM '88". ACM, August 1988. 117 * describes for TCP. The cwnd size is chopped in half on a retransmit timeout 118 * and incremented by 1/cwnd when each rpc reply is received and a full cwnd 119 * of rpcs is in progress. 120 * (The sent count and cwnd are scaled for integer arith.) 121 * Variants of "slow start" were tried and were found to be too much of a 122 * performance hit (ave. rtt 3 times larger), 123 * I suspect due to the large rtt that nfs rpcs have. 124 */ 125 #define NFS_CWNDSCALE 256 126 #define NFS_MAXCWND (NFS_CWNDSCALE * 32) 127 static int nfs_backoff[8] = { 2, 4, 8, 16, 32, 64, 128, 256, }; 128 int nfs_sbwait(); 129 void nfs_disconnect(), nfs_realign(), nfsrv_wakenfsd(), nfs_sndunlock(); 130 void nfs_rcvunlock(), nqnfs_serverd(), nqnfs_clientlease(); 131 struct mbuf *nfsm_rpchead(); 132 int nfsrtton = 0; 133 struct nfsrtt nfsrtt; 134 struct nfsd nfsd_head; 135 136 int nfsrv_null(), 137 nfsrv_getattr(), 138 nfsrv_setattr(), 139 nfsrv_lookup(), 140 nfsrv_readlink(), 141 nfsrv_read(), 142 nfsrv_write(), 143 nfsrv_create(), 144 nfsrv_remove(), 145 nfsrv_rename(), 146 nfsrv_link(), 147 nfsrv_symlink(), 148 nfsrv_mkdir(), 149 nfsrv_rmdir(), 150 nfsrv_readdir(), 151 nfsrv_statfs(), 152 nfsrv_noop(), 153 nqnfsrv_readdirlook(), 154 nqnfsrv_getlease(), 155 nqnfsrv_vacated(), 156 nqnfsrv_access(); 157 158 int (*nfsrv_procs[NFS_NPROCS])() = { 159 nfsrv_null, 160 nfsrv_getattr, 161 nfsrv_setattr, 162 nfsrv_noop, 163 nfsrv_lookup, 164 nfsrv_readlink, 165 nfsrv_read, 166 nfsrv_noop, 167 nfsrv_write, 168 nfsrv_create, 169 nfsrv_remove, 170 nfsrv_rename, 171 nfsrv_link, 172 nfsrv_symlink, 173 nfsrv_mkdir, 174 nfsrv_rmdir, 175 nfsrv_readdir, 176 nfsrv_statfs, 177 nqnfsrv_readdirlook, 178 nqnfsrv_getlease, 179 nqnfsrv_vacated, 180 nfsrv_noop, 181 nqnfsrv_access, 182 }; 183 184 struct nfsreq nfsreqh; 185 186 /* 187 * Initialize sockets and congestion for a new NFS connection. 188 * We do not free the sockaddr if error. 189 */ 190 nfs_connect(nmp, rep) 191 register struct nfsmount *nmp; 192 struct nfsreq *rep; 193 { 194 register struct socket *so; 195 int s, error, rcvreserve, sndreserve; 196 struct sockaddr *saddr; 197 struct sockaddr_in *sin; 198 struct mbuf *m; 199 u_short tport; 200 201 nmp->nm_so = (struct socket *)0; 202 saddr = mtod(nmp->nm_nam, struct sockaddr *); 203 if (error = socreate(saddr->sa_family, 204 &nmp->nm_so, nmp->nm_sotype, nmp->nm_soproto)) 205 goto bad; 206 so = nmp->nm_so; 207 nmp->nm_soflags = so->so_proto->pr_flags; 208 209 /* 210 * Some servers require that the client port be a reserved port number. 211 */ 212 if (saddr->sa_family == AF_INET && (nmp->nm_flag & NFSMNT_RESVPORT)) { 213 MGET(m, M_WAIT, MT_SONAME); 214 sin = mtod(m, struct sockaddr_in *); 215 sin->sin_len = m->m_len = sizeof (struct sockaddr_in); 216 sin->sin_family = AF_INET; 217 sin->sin_addr.s_addr = INADDR_ANY; 218 tport = IPPORT_RESERVED - 1; 219 sin->sin_port = htons(tport); 220 while ((error = sobind(so, m)) == EADDRINUSE && 221 --tport > IPPORT_RESERVED / 2) 222 sin->sin_port = htons(tport); 223 m_freem(m); 224 if (error) 225 goto bad; 226 } 227 228 /* 229 * Protocols that do not require connections may be optionally left 230 * unconnected for servers that reply from a port other than NFS_PORT. 231 */ 232 if (nmp->nm_flag & NFSMNT_NOCONN) { 233 if (nmp->nm_soflags & PR_CONNREQUIRED) { 234 error = ENOTCONN; 235 goto bad; 236 } 237 } else { 238 if (error = soconnect(so, nmp->nm_nam)) 239 goto bad; 240 241 /* 242 * Wait for the connection to complete. Cribbed from the 243 * connect system call but with the wait timing out so 244 * that interruptible mounts don't hang here for a long time. 245 */ 246 s = splnet(); 247 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) { 248 (void) tsleep((caddr_t)&so->so_timeo, PSOCK, 249 "nfscon", 2 * hz); 250 if ((so->so_state & SS_ISCONNECTING) && 251 so->so_error == 0 && rep && 252 (error = nfs_sigintr(nmp, rep, rep->r_procp))) { 253 so->so_state &= ~SS_ISCONNECTING; 254 splx(s); 255 goto bad; 256 } 257 } 258 if (so->so_error) { 259 error = so->so_error; 260 so->so_error = 0; 261 splx(s); 262 goto bad; 263 } 264 splx(s); 265 } 266 if (nmp->nm_flag & (NFSMNT_SOFT | NFSMNT_INT)) { 267 so->so_rcv.sb_timeo = (5 * hz); 268 so->so_snd.sb_timeo = (5 * hz); 269 } else { 270 so->so_rcv.sb_timeo = 0; 271 so->so_snd.sb_timeo = 0; 272 } 273 if (nmp->nm_sotype == SOCK_DGRAM) { 274 sndreserve = nmp->nm_wsize + NFS_MAXPKTHDR; 275 rcvreserve = nmp->nm_rsize + NFS_MAXPKTHDR; 276 } else if (nmp->nm_sotype == SOCK_SEQPACKET) { 277 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * 2; 278 rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR) * 2; 279 } else { 280 if (nmp->nm_sotype != SOCK_STREAM) 281 panic("nfscon sotype"); 282 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 283 MGET(m, M_WAIT, MT_SOOPTS); 284 *mtod(m, int *) = 1; 285 m->m_len = sizeof(int); 286 sosetopt(so, SOL_SOCKET, SO_KEEPALIVE, m); 287 } 288 if (so->so_proto->pr_protocol == IPPROTO_TCP) { 289 MGET(m, M_WAIT, MT_SOOPTS); 290 *mtod(m, int *) = 1; 291 m->m_len = sizeof(int); 292 sosetopt(so, IPPROTO_TCP, TCP_NODELAY, m); 293 } 294 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR + sizeof (u_long)) 295 * 2; 296 rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR + sizeof (u_long)) 297 * 2; 298 } 299 if (error = soreserve(so, sndreserve, rcvreserve)) 300 goto bad; 301 so->so_rcv.sb_flags |= SB_NOINTR; 302 so->so_snd.sb_flags |= SB_NOINTR; 303 304 /* Initialize other non-zero congestion variables */ 305 nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] = nmp->nm_srtt[3] = 306 nmp->nm_srtt[4] = (NFS_TIMEO << 3); 307 nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] = 308 nmp->nm_sdrtt[3] = nmp->nm_sdrtt[4] = 0; 309 nmp->nm_cwnd = NFS_MAXCWND / 2; /* Initial send window */ 310 nmp->nm_sent = 0; 311 nmp->nm_timeouts = 0; 312 return (0); 313 314 bad: 315 nfs_disconnect(nmp); 316 return (error); 317 } 318 319 /* 320 * Reconnect routine: 321 * Called when a connection is broken on a reliable protocol. 322 * - clean up the old socket 323 * - nfs_connect() again 324 * - set R_MUSTRESEND for all outstanding requests on mount point 325 * If this fails the mount point is DEAD! 326 * nb: Must be called with the nfs_sndlock() set on the mount point. 327 */ 328 nfs_reconnect(rep) 329 register struct nfsreq *rep; 330 { 331 register struct nfsreq *rp; 332 register struct nfsmount *nmp = rep->r_nmp; 333 int error; 334 335 nfs_disconnect(nmp); 336 while (error = nfs_connect(nmp, rep)) { 337 if (error == EINTR || error == ERESTART) 338 return (EINTR); 339 (void) tsleep((caddr_t)&lbolt, PSOCK, "nfscon", 0); 340 } 341 342 /* 343 * Loop through outstanding request list and fix up all requests 344 * on old socket. 345 */ 346 rp = nfsreqh.r_next; 347 while (rp != &nfsreqh) { 348 if (rp->r_nmp == nmp) 349 rp->r_flags |= R_MUSTRESEND; 350 rp = rp->r_next; 351 } 352 return (0); 353 } 354 355 /* 356 * NFS disconnect. Clean up and unlink. 357 */ 358 void 359 nfs_disconnect(nmp) 360 register struct nfsmount *nmp; 361 { 362 register struct socket *so; 363 364 if (nmp->nm_so) { 365 so = nmp->nm_so; 366 nmp->nm_so = (struct socket *)0; 367 soshutdown(so, 2); 368 soclose(so); 369 } 370 } 371 372 /* 373 * This is the nfs send routine. For connection based socket types, it 374 * must be called with an nfs_sndlock() on the socket. 375 * "rep == NULL" indicates that it has been called from a server. 376 * For the client side: 377 * - return EINTR if the RPC is terminated, 0 otherwise 378 * - set R_MUSTRESEND if the send fails for any reason 379 * - do any cleanup required by recoverable socket errors (???) 380 * For the server side: 381 * - return EINTR or ERESTART if interrupted by a signal 382 * - return EPIPE if a connection is lost for connection based sockets (TCP...) 383 * - do any cleanup required by recoverable socket errors (???) 384 */ 385 nfs_send(so, nam, top, rep) 386 register struct socket *so; 387 struct mbuf *nam; 388 register struct mbuf *top; 389 struct nfsreq *rep; 390 { 391 struct mbuf *sendnam; 392 int error, soflags, flags; 393 394 if (rep) { 395 if (rep->r_flags & R_SOFTTERM) { 396 m_freem(top); 397 return (EINTR); 398 } 399 if ((so = rep->r_nmp->nm_so) == NULL) { 400 rep->r_flags |= R_MUSTRESEND; 401 m_freem(top); 402 return (0); 403 } 404 rep->r_flags &= ~R_MUSTRESEND; 405 soflags = rep->r_nmp->nm_soflags; 406 } else 407 soflags = so->so_proto->pr_flags; 408 if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED)) 409 sendnam = (struct mbuf *)0; 410 else 411 sendnam = nam; 412 if (so->so_type == SOCK_SEQPACKET) 413 flags = MSG_EOR; 414 else 415 flags = 0; 416 417 error = sosend(so, sendnam, (struct uio *)0, top, 418 (struct mbuf *)0, flags); 419 if (error) { 420 if (rep) { 421 log(LOG_INFO, "nfs send error %d for server %s\n",error, 422 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); 423 /* 424 * Deal with errors for the client side. 425 */ 426 if (rep->r_flags & R_SOFTTERM) 427 error = EINTR; 428 else 429 rep->r_flags |= R_MUSTRESEND; 430 } else 431 log(LOG_INFO, "nfsd send error %d\n", error); 432 433 /* 434 * Handle any recoverable (soft) socket errors here. (???) 435 */ 436 if (error != EINTR && error != ERESTART && 437 error != EWOULDBLOCK && error != EPIPE) 438 error = 0; 439 } 440 return (error); 441 } 442 443 /* 444 * Receive a Sun RPC Request/Reply. For SOCK_DGRAM, the work is all 445 * done by soreceive(), but for SOCK_STREAM we must deal with the Record 446 * Mark and consolidate the data into a new mbuf list. 447 * nb: Sometimes TCP passes the data up to soreceive() in long lists of 448 * small mbufs. 449 * For SOCK_STREAM we must be very careful to read an entire record once 450 * we have read any of it, even if the system call has been interrupted. 451 */ 452 nfs_receive(rep, aname, mp) 453 register struct nfsreq *rep; 454 struct mbuf **aname; 455 struct mbuf **mp; 456 { 457 register struct socket *so; 458 struct uio auio; 459 struct iovec aio; 460 register struct mbuf *m; 461 struct mbuf *control; 462 u_long len; 463 struct mbuf **getnam; 464 int error, sotype, rcvflg; 465 struct proc *p = curproc; /* XXX */ 466 467 /* 468 * Set up arguments for soreceive() 469 */ 470 *mp = (struct mbuf *)0; 471 *aname = (struct mbuf *)0; 472 sotype = rep->r_nmp->nm_sotype; 473 474 /* 475 * For reliable protocols, lock against other senders/receivers 476 * in case a reconnect is necessary. 477 * For SOCK_STREAM, first get the Record Mark to find out how much 478 * more there is to get. 479 * We must lock the socket against other receivers 480 * until we have an entire rpc request/reply. 481 */ 482 if (sotype != SOCK_DGRAM) { 483 if (error = nfs_sndlock(&rep->r_nmp->nm_flag, rep)) 484 return (error); 485 tryagain: 486 /* 487 * Check for fatal errors and resending request. 488 */ 489 /* 490 * Ugh: If a reconnect attempt just happened, nm_so 491 * would have changed. NULL indicates a failed 492 * attempt that has essentially shut down this 493 * mount point. 494 */ 495 if (rep->r_mrep || (rep->r_flags & R_SOFTTERM)) { 496 nfs_sndunlock(&rep->r_nmp->nm_flag); 497 return (EINTR); 498 } 499 if ((so = rep->r_nmp->nm_so) == NULL) { 500 if (error = nfs_reconnect(rep)) { 501 nfs_sndunlock(&rep->r_nmp->nm_flag); 502 return (error); 503 } 504 goto tryagain; 505 } 506 while (rep->r_flags & R_MUSTRESEND) { 507 m = m_copym(rep->r_mreq, 0, M_COPYALL, M_WAIT); 508 nfsstats.rpcretries++; 509 if (error = nfs_send(so, rep->r_nmp->nm_nam, m, rep)) { 510 if (error == EINTR || error == ERESTART || 511 (error = nfs_reconnect(rep))) { 512 nfs_sndunlock(&rep->r_nmp->nm_flag); 513 return (error); 514 } 515 goto tryagain; 516 } 517 } 518 nfs_sndunlock(&rep->r_nmp->nm_flag); 519 if (sotype == SOCK_STREAM) { 520 aio.iov_base = (caddr_t) &len; 521 aio.iov_len = sizeof(u_long); 522 auio.uio_iov = &aio; 523 auio.uio_iovcnt = 1; 524 auio.uio_segflg = UIO_SYSSPACE; 525 auio.uio_rw = UIO_READ; 526 auio.uio_offset = 0; 527 auio.uio_resid = sizeof(u_long); 528 auio.uio_procp = p; 529 do { 530 rcvflg = MSG_WAITALL; 531 error = soreceive(so, (struct mbuf **)0, &auio, 532 (struct mbuf **)0, (struct mbuf **)0, &rcvflg); 533 if (error == EWOULDBLOCK && rep) { 534 if (rep->r_flags & R_SOFTTERM) 535 return (EINTR); 536 } 537 } while (error == EWOULDBLOCK); 538 if (!error && auio.uio_resid > 0) { 539 log(LOG_INFO, 540 "short receive (%d/%d) from nfs server %s\n", 541 sizeof(u_long) - auio.uio_resid, 542 sizeof(u_long), 543 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); 544 error = EPIPE; 545 } 546 if (error) 547 goto errout; 548 len = ntohl(len) & ~0x80000000; 549 /* 550 * This is SERIOUS! We are out of sync with the sender 551 * and forcing a disconnect/reconnect is all I can do. 552 */ 553 if (len > NFS_MAXPACKET) { 554 log(LOG_ERR, "%s (%d) from nfs server %s\n", 555 "impossible packet length", 556 len, 557 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); 558 error = EFBIG; 559 goto errout; 560 } 561 auio.uio_resid = len; 562 do { 563 rcvflg = MSG_WAITALL; 564 error = soreceive(so, (struct mbuf **)0, 565 &auio, mp, (struct mbuf **)0, &rcvflg); 566 } while (error == EWOULDBLOCK || error == EINTR || 567 error == ERESTART); 568 if (!error && auio.uio_resid > 0) { 569 log(LOG_INFO, 570 "short receive (%d/%d) from nfs server %s\n", 571 len - auio.uio_resid, len, 572 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); 573 error = EPIPE; 574 } 575 } else { 576 /* 577 * NB: Since uio_resid is big, MSG_WAITALL is ignored 578 * and soreceive() will return when it has either a 579 * control msg or a data msg. 580 * We have no use for control msg., but must grab them 581 * and then throw them away so we know what is going 582 * on. 583 */ 584 auio.uio_resid = len = 100000000; /* Anything Big */ 585 auio.uio_procp = p; 586 do { 587 rcvflg = 0; 588 error = soreceive(so, (struct mbuf **)0, 589 &auio, mp, &control, &rcvflg); 590 if (control) 591 m_freem(control); 592 if (error == EWOULDBLOCK && rep) { 593 if (rep->r_flags & R_SOFTTERM) 594 return (EINTR); 595 } 596 } while (error == EWOULDBLOCK || 597 (!error && *mp == NULL && control)); 598 if ((rcvflg & MSG_EOR) == 0) 599 printf("Egad!!\n"); 600 if (!error && *mp == NULL) 601 error = EPIPE; 602 len -= auio.uio_resid; 603 } 604 errout: 605 if (error && error != EINTR && error != ERESTART) { 606 m_freem(*mp); 607 *mp = (struct mbuf *)0; 608 if (error != EPIPE) 609 log(LOG_INFO, 610 "receive error %d from nfs server %s\n", 611 error, 612 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); 613 error = nfs_sndlock(&rep->r_nmp->nm_flag, rep); 614 if (!error) 615 error = nfs_reconnect(rep); 616 if (!error) 617 goto tryagain; 618 } 619 } else { 620 if ((so = rep->r_nmp->nm_so) == NULL) 621 return (EACCES); 622 if (so->so_state & SS_ISCONNECTED) 623 getnam = (struct mbuf **)0; 624 else 625 getnam = aname; 626 auio.uio_resid = len = 1000000; 627 auio.uio_procp = p; 628 do { 629 rcvflg = 0; 630 error = soreceive(so, getnam, &auio, mp, 631 (struct mbuf **)0, &rcvflg); 632 if (error == EWOULDBLOCK && 633 (rep->r_flags & R_SOFTTERM)) 634 return (EINTR); 635 } while (error == EWOULDBLOCK); 636 len -= auio.uio_resid; 637 } 638 if (error) { 639 m_freem(*mp); 640 *mp = (struct mbuf *)0; 641 } 642 /* 643 * Search for any mbufs that are not a multiple of 4 bytes long 644 * or with m_data not longword aligned. 645 * These could cause pointer alignment problems, so copy them to 646 * well aligned mbufs. 647 */ 648 nfs_realign(*mp, 5 * NFSX_UNSIGNED); 649 return (error); 650 } 651 652 /* 653 * Implement receipt of reply on a socket. 654 * We must search through the list of received datagrams matching them 655 * with outstanding requests using the xid, until ours is found. 656 */ 657 /* ARGSUSED */ 658 nfs_reply(myrep) 659 struct nfsreq *myrep; 660 { 661 register struct nfsreq *rep; 662 register struct nfsmount *nmp = myrep->r_nmp; 663 register long t1; 664 struct mbuf *mrep, *nam, *md; 665 u_long rxid, *tl; 666 caddr_t dpos, cp2; 667 int error; 668 669 /* 670 * Loop around until we get our own reply 671 */ 672 for (;;) { 673 /* 674 * Lock against other receivers so that I don't get stuck in 675 * sbwait() after someone else has received my reply for me. 676 * Also necessary for connection based protocols to avoid 677 * race conditions during a reconnect. 678 */ 679 if (error = nfs_rcvlock(myrep)) 680 return (error); 681 /* Already received, bye bye */ 682 if (myrep->r_mrep != NULL) { 683 nfs_rcvunlock(&nmp->nm_flag); 684 return (0); 685 } 686 /* 687 * Get the next Rpc reply off the socket 688 */ 689 error = nfs_receive(myrep, &nam, &mrep); 690 nfs_rcvunlock(&nmp->nm_flag); 691 if (error) printf("rcv err=%d\n",error); 692 if (error) { 693 694 /* 695 * Ignore routing errors on connectionless protocols?? 696 */ 697 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) { 698 nmp->nm_so->so_error = 0; 699 continue; 700 } 701 return (error); 702 } 703 if (nam) 704 m_freem(nam); 705 706 /* 707 * Get the xid and check that it is an rpc reply 708 */ 709 md = mrep; 710 dpos = mtod(md, caddr_t); 711 nfsm_dissect(tl, u_long *, 2*NFSX_UNSIGNED); 712 rxid = *tl++; 713 if (*tl != rpc_reply) { 714 if (nmp->nm_flag & NFSMNT_NQNFS) { 715 if (nqnfs_callback(nmp, mrep, md, dpos)) 716 nfsstats.rpcinvalid++; 717 } else { 718 nfsstats.rpcinvalid++; 719 m_freem(mrep); 720 } 721 nfsmout: 722 continue; 723 } 724 725 /* 726 * Loop through the request list to match up the reply 727 * Iff no match, just drop the datagram 728 */ 729 rep = nfsreqh.r_next; 730 while (rep != &nfsreqh) { 731 if (rep->r_mrep == NULL && rxid == rep->r_xid) { 732 /* Found it.. */ 733 rep->r_mrep = mrep; 734 rep->r_md = md; 735 rep->r_dpos = dpos; 736 if (nfsrtton) { 737 struct rttl *rt; 738 739 rt = &nfsrtt.rttl[nfsrtt.pos]; 740 rt->proc = rep->r_procnum; 741 rt->rto = NFS_RTO(nmp, proct[rep->r_procnum]); 742 rt->sent = nmp->nm_sent; 743 rt->cwnd = nmp->nm_cwnd; 744 rt->srtt = nmp->nm_srtt[proct[rep->r_procnum] - 1]; 745 rt->sdrtt = nmp->nm_sdrtt[proct[rep->r_procnum] - 1]; 746 rt->fsid = nmp->nm_mountp->mnt_stat.f_fsid; 747 rt->tstamp = time; 748 if (rep->r_flags & R_TIMING) 749 rt->rtt = rep->r_rtt; 750 else 751 rt->rtt = 1000000; 752 nfsrtt.pos = (nfsrtt.pos + 1) % NFSRTTLOGSIZ; 753 } 754 /* 755 * Update congestion window. 756 * Do the additive increase of 757 * one rpc/rtt. 758 */ 759 if (nmp->nm_cwnd <= nmp->nm_sent) { 760 nmp->nm_cwnd += 761 (NFS_CWNDSCALE * NFS_CWNDSCALE + 762 (nmp->nm_cwnd >> 1)) / nmp->nm_cwnd; 763 if (nmp->nm_cwnd > NFS_MAXCWND) 764 nmp->nm_cwnd = NFS_MAXCWND; 765 } 766 /* 767 * Update rtt using a gain of 0.125 on the mean 768 * and a gain of 0.25 on the deviation. 769 */ 770 if (rep->r_flags & R_TIMING) { 771 /* 772 * Since the timer resolution of 773 * NFS_HZ is so course, it can often 774 * result in r_rtt == 0. Since 775 * r_rtt == N means that the actual 776 * rtt is between N+dt and N+2-dt ticks, 777 * add 1. 778 */ 779 t1 = rep->r_rtt + 1; 780 t1 -= (NFS_SRTT(rep) >> 3); 781 NFS_SRTT(rep) += t1; 782 if (t1 < 0) 783 t1 = -t1; 784 t1 -= (NFS_SDRTT(rep) >> 2); 785 NFS_SDRTT(rep) += t1; 786 } 787 nmp->nm_timeouts = 0; 788 break; 789 } 790 rep = rep->r_next; 791 } 792 /* 793 * If not matched to a request, drop it. 794 * If it's mine, get out. 795 */ 796 if (rep == &nfsreqh) { 797 nfsstats.rpcunexpected++; 798 m_freem(mrep); 799 } else if (rep == myrep) { 800 if (rep->r_mrep == NULL) 801 panic("nfsreply nil"); 802 return (0); 803 } 804 } 805 } 806 807 /* 808 * nfs_request - goes something like this 809 * - fill in request struct 810 * - links it into list 811 * - calls nfs_send() for first transmit 812 * - calls nfs_receive() to get reply 813 * - break down rpc header and return with nfs reply pointed to 814 * by mrep or error 815 * nb: always frees up mreq mbuf list 816 */ 817 nfs_request(vp, mrest, procnum, procp, cred, mrp, mdp, dposp) 818 struct vnode *vp; 819 struct mbuf *mrest; 820 int procnum; 821 struct proc *procp; 822 struct ucred *cred; 823 struct mbuf **mrp; 824 struct mbuf **mdp; 825 caddr_t *dposp; 826 { 827 register struct mbuf *m, *mrep; 828 register struct nfsreq *rep; 829 register u_long *tl; 830 register int i; 831 struct nfsmount *nmp; 832 struct mbuf *md, *mheadend; 833 struct nfsreq *reph; 834 struct nfsnode *tp, *np; 835 time_t reqtime, waituntil; 836 caddr_t dpos, cp2; 837 int t1, nqlflag, cachable, s, error = 0, mrest_len, auth_len, auth_type; 838 int trylater_delay = NQ_TRYLATERDEL, trylater_cnt = 0, failed_auth = 0; 839 u_long xid; 840 u_quad_t frev; 841 char *auth_str; 842 843 nmp = VFSTONFS(vp->v_mount); 844 MALLOC(rep, struct nfsreq *, sizeof(struct nfsreq), M_NFSREQ, M_WAITOK); 845 rep->r_nmp = nmp; 846 rep->r_vp = vp; 847 rep->r_procp = procp; 848 rep->r_procnum = procnum; 849 i = 0; 850 m = mrest; 851 while (m) { 852 i += m->m_len; 853 m = m->m_next; 854 } 855 mrest_len = i; 856 857 /* 858 * Get the RPC header with authorization. 859 */ 860 kerbauth: 861 auth_str = (char *)0; 862 if (nmp->nm_flag & NFSMNT_KERB) { 863 if (failed_auth) { 864 error = nfs_getauth(nmp, rep, cred, &auth_type, 865 &auth_str, &auth_len); 866 if (error) { 867 free((caddr_t)rep, M_NFSREQ); 868 m_freem(mrest); 869 return (error); 870 } 871 } else { 872 auth_type = RPCAUTH_UNIX; 873 auth_len = 5 * NFSX_UNSIGNED; 874 } 875 } else { 876 auth_type = RPCAUTH_UNIX; 877 if (cred->cr_ngroups < 1) 878 panic("nfsreq nogrps"); 879 auth_len = ((((cred->cr_ngroups - 1) > nmp->nm_numgrps) ? 880 nmp->nm_numgrps : (cred->cr_ngroups - 1)) << 2) + 881 5 * NFSX_UNSIGNED; 882 } 883 m = nfsm_rpchead(cred, (nmp->nm_flag & NFSMNT_NQNFS), procnum, 884 auth_type, auth_len, auth_str, mrest, mrest_len, &mheadend, &xid); 885 if (auth_str) 886 free(auth_str, M_TEMP); 887 888 /* 889 * For stream protocols, insert a Sun RPC Record Mark. 890 */ 891 if (nmp->nm_sotype == SOCK_STREAM) { 892 M_PREPEND(m, NFSX_UNSIGNED, M_WAIT); 893 *mtod(m, u_long *) = htonl(0x80000000 | 894 (m->m_pkthdr.len - NFSX_UNSIGNED)); 895 } 896 rep->r_mreq = m; 897 rep->r_xid = xid; 898 tryagain: 899 if (nmp->nm_flag & NFSMNT_SOFT) 900 rep->r_retry = nmp->nm_retry; 901 else 902 rep->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */ 903 rep->r_rtt = rep->r_rexmit = 0; 904 if (proct[procnum] > 0) 905 rep->r_flags = R_TIMING; 906 else 907 rep->r_flags = 0; 908 rep->r_mrep = NULL; 909 910 /* 911 * Do the client side RPC. 912 */ 913 nfsstats.rpcrequests++; 914 /* 915 * Chain request into list of outstanding requests. Be sure 916 * to put it LAST so timer finds oldest requests first. 917 */ 918 s = splsoftclock(); 919 reph = &nfsreqh; 920 reph->r_prev->r_next = rep; 921 rep->r_prev = reph->r_prev; 922 reph->r_prev = rep; 923 rep->r_next = reph; 924 925 /* Get send time for nqnfs */ 926 reqtime = time.tv_sec; 927 928 /* 929 * If backing off another request or avoiding congestion, don't 930 * send this one now but let timer do it. If not timing a request, 931 * do it now. 932 */ 933 if (nmp->nm_so && (nmp->nm_sotype != SOCK_DGRAM || 934 (nmp->nm_flag & NFSMNT_DUMBTIMR) || 935 nmp->nm_sent < nmp->nm_cwnd)) { 936 splx(s); 937 if (nmp->nm_soflags & PR_CONNREQUIRED) 938 error = nfs_sndlock(&nmp->nm_flag, rep); 939 if (!error) { 940 m = m_copym(m, 0, M_COPYALL, M_WAIT); 941 error = nfs_send(nmp->nm_so, nmp->nm_nam, m, rep); 942 if (nmp->nm_soflags & PR_CONNREQUIRED) 943 nfs_sndunlock(&nmp->nm_flag); 944 } 945 if (!error && (rep->r_flags & R_MUSTRESEND) == 0) { 946 nmp->nm_sent += NFS_CWNDSCALE; 947 rep->r_flags |= R_SENT; 948 } 949 } else { 950 splx(s); 951 rep->r_rtt = -1; 952 } 953 954 /* 955 * Wait for the reply from our send or the timer's. 956 */ 957 if (!error || error == EPIPE) 958 error = nfs_reply(rep); 959 960 /* 961 * RPC done, unlink the request. 962 */ 963 s = splsoftclock(); 964 rep->r_prev->r_next = rep->r_next; 965 rep->r_next->r_prev = rep->r_prev; 966 splx(s); 967 968 /* 969 * Decrement the outstanding request count. 970 */ 971 if (rep->r_flags & R_SENT) 972 nmp->nm_sent -= NFS_CWNDSCALE; 973 974 /* 975 * If there was a successful reply and a tprintf msg. 976 * tprintf a response. 977 */ 978 if (!error && (rep->r_flags & R_TPRINTFMSG)) 979 nfs_msg(rep->r_procp, nmp->nm_mountp->mnt_stat.f_mntfromname, 980 "is alive again"); 981 mrep = rep->r_mrep; 982 md = rep->r_md; 983 dpos = rep->r_dpos; 984 if (error) { 985 m_freem(rep->r_mreq); 986 free((caddr_t)rep, M_NFSREQ); 987 return (error); 988 } 989 990 /* 991 * break down the rpc header and check if ok 992 */ 993 nfsm_dissect(tl, u_long *, 3*NFSX_UNSIGNED); 994 if (*tl++ == rpc_msgdenied) { 995 if (*tl == rpc_mismatch) 996 error = EOPNOTSUPP; 997 else if ((nmp->nm_flag & NFSMNT_KERB) && *tl++ == rpc_autherr) { 998 if (*tl == rpc_rejectedcred && failed_auth == 0) { 999 failed_auth++; 1000 mheadend->m_next = (struct mbuf *)0; 1001 m_freem(mrep); 1002 m_freem(rep->r_mreq); 1003 goto kerbauth; 1004 } else 1005 error = EAUTH; 1006 } else 1007 error = EACCES; 1008 m_freem(mrep); 1009 m_freem(rep->r_mreq); 1010 free((caddr_t)rep, M_NFSREQ); 1011 return (error); 1012 } 1013 1014 /* 1015 * skip over the auth_verf, someday we may want to cache auth_short's 1016 * for nfs_reqhead(), but for now just dump it 1017 */ 1018 if (*++tl != 0) { 1019 i = nfsm_rndup(fxdr_unsigned(long, *tl)); 1020 nfsm_adv(i); 1021 } 1022 nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); 1023 /* 0 == ok */ 1024 if (*tl == 0) { 1025 nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); 1026 if (*tl != 0) { 1027 error = fxdr_unsigned(int, *tl); 1028 m_freem(mrep); 1029 if ((nmp->nm_flag & NFSMNT_NQNFS) && 1030 error == NQNFS_TRYLATER) { 1031 error = 0; 1032 waituntil = time.tv_sec + trylater_delay; 1033 while (time.tv_sec < waituntil) 1034 (void) tsleep((caddr_t)&lbolt, 1035 PSOCK, "nqnfstry", 0); 1036 trylater_delay *= nfs_backoff[trylater_cnt]; 1037 if (trylater_cnt < 7) 1038 trylater_cnt++; 1039 goto tryagain; 1040 } 1041 1042 /* 1043 * If the File Handle was stale, invalidate the 1044 * lookup cache, just in case. 1045 */ 1046 if (error == ESTALE) 1047 cache_purge(vp); 1048 m_freem(rep->r_mreq); 1049 free((caddr_t)rep, M_NFSREQ); 1050 return (error); 1051 } 1052 1053 /* 1054 * For nqnfs, get any lease in reply 1055 */ 1056 if (nmp->nm_flag & NFSMNT_NQNFS) { 1057 nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); 1058 if (*tl) { 1059 np = VTONFS(vp); 1060 nqlflag = fxdr_unsigned(int, *tl); 1061 nfsm_dissect(tl, u_long *, 4*NFSX_UNSIGNED); 1062 cachable = fxdr_unsigned(int, *tl++); 1063 reqtime += fxdr_unsigned(int, *tl++); 1064 if (reqtime > time.tv_sec) { 1065 fxdr_hyper(tl, &frev); 1066 nqnfs_clientlease(nmp, np, nqlflag, 1067 cachable, reqtime, frev); 1068 } 1069 } 1070 } 1071 *mrp = mrep; 1072 *mdp = md; 1073 *dposp = dpos; 1074 m_freem(rep->r_mreq); 1075 FREE((caddr_t)rep, M_NFSREQ); 1076 return (0); 1077 } 1078 m_freem(mrep); 1079 m_freem(rep->r_mreq); 1080 free((caddr_t)rep, M_NFSREQ); 1081 error = EPROTONOSUPPORT; 1082 nfsmout: 1083 return (error); 1084 } 1085 1086 /* 1087 * Generate the rpc reply header 1088 * siz arg. is used to decide if adding a cluster is worthwhile 1089 */ 1090 nfs_rephead(siz, nd, err, cache, frev, mrq, mbp, bposp) 1091 int siz; 1092 struct nfsd *nd; 1093 int err; 1094 int cache; 1095 u_quad_t *frev; 1096 struct mbuf **mrq; 1097 struct mbuf **mbp; 1098 caddr_t *bposp; 1099 { 1100 register u_long *tl; 1101 register struct mbuf *mreq; 1102 caddr_t bpos; 1103 struct mbuf *mb, *mb2; 1104 1105 MGETHDR(mreq, M_WAIT, MT_DATA); 1106 mb = mreq; 1107 /* 1108 * If this is a big reply, use a cluster else 1109 * try and leave leading space for the lower level headers. 1110 */ 1111 siz += RPC_REPLYSIZ; 1112 if (siz >= MINCLSIZE) { 1113 MCLGET(mreq, M_WAIT); 1114 } else 1115 mreq->m_data += max_hdr; 1116 tl = mtod(mreq, u_long *); 1117 mreq->m_len = 6*NFSX_UNSIGNED; 1118 bpos = ((caddr_t)tl)+mreq->m_len; 1119 *tl++ = nd->nd_retxid; 1120 *tl++ = rpc_reply; 1121 if (err == ERPCMISMATCH || err == NQNFS_AUTHERR) { 1122 *tl++ = rpc_msgdenied; 1123 if (err == NQNFS_AUTHERR) { 1124 *tl++ = rpc_autherr; 1125 *tl = rpc_rejectedcred; 1126 mreq->m_len -= NFSX_UNSIGNED; 1127 bpos -= NFSX_UNSIGNED; 1128 } else { 1129 *tl++ = rpc_mismatch; 1130 *tl++ = txdr_unsigned(2); 1131 *tl = txdr_unsigned(2); 1132 } 1133 } else { 1134 *tl++ = rpc_msgaccepted; 1135 *tl++ = 0; 1136 *tl++ = 0; 1137 switch (err) { 1138 case EPROGUNAVAIL: 1139 *tl = txdr_unsigned(RPC_PROGUNAVAIL); 1140 break; 1141 case EPROGMISMATCH: 1142 *tl = txdr_unsigned(RPC_PROGMISMATCH); 1143 nfsm_build(tl, u_long *, 2*NFSX_UNSIGNED); 1144 *tl++ = txdr_unsigned(2); 1145 *tl = txdr_unsigned(2); /* someday 3 */ 1146 break; 1147 case EPROCUNAVAIL: 1148 *tl = txdr_unsigned(RPC_PROCUNAVAIL); 1149 break; 1150 default: 1151 *tl = 0; 1152 if (err != VNOVAL) { 1153 nfsm_build(tl, u_long *, NFSX_UNSIGNED); 1154 if (err) 1155 *tl = txdr_unsigned(nfsrv_errmap[err - 1]); 1156 else 1157 *tl = 0; 1158 } 1159 break; 1160 }; 1161 } 1162 1163 /* 1164 * For nqnfs, piggyback lease as requested. 1165 */ 1166 if (nd->nd_nqlflag != NQL_NOVAL && err == 0) { 1167 if (nd->nd_nqlflag) { 1168 nfsm_build(tl, u_long *, 5*NFSX_UNSIGNED); 1169 *tl++ = txdr_unsigned(nd->nd_nqlflag); 1170 *tl++ = txdr_unsigned(cache); 1171 *tl++ = txdr_unsigned(nd->nd_duration); 1172 txdr_hyper(frev, tl); 1173 } else { 1174 if (nd->nd_nqlflag != 0) 1175 panic("nqreph"); 1176 nfsm_build(tl, u_long *, NFSX_UNSIGNED); 1177 *tl = 0; 1178 } 1179 } 1180 *mrq = mreq; 1181 *mbp = mb; 1182 *bposp = bpos; 1183 if (err != 0 && err != VNOVAL) 1184 nfsstats.srvrpc_errs++; 1185 return (0); 1186 } 1187 1188 /* 1189 * Nfs timer routine 1190 * Scan the nfsreq list and retranmit any requests that have timed out 1191 * To avoid retransmission attempts on STREAM sockets (in the future) make 1192 * sure to set the r_retry field to 0 (implies nm_retry == 0). 1193 */ 1194 void 1195 nfs_timer(arg) 1196 void *arg; 1197 { 1198 register struct nfsreq *rep; 1199 register struct mbuf *m; 1200 register struct socket *so; 1201 register struct nfsmount *nmp; 1202 register int timeo; 1203 static long lasttime = 0; 1204 int s, error; 1205 1206 s = splnet(); 1207 for (rep = nfsreqh.r_next; rep != &nfsreqh; rep = rep->r_next) { 1208 nmp = rep->r_nmp; 1209 if (rep->r_mrep || (rep->r_flags & R_SOFTTERM)) 1210 continue; 1211 if (nfs_sigintr(nmp, rep, rep->r_procp)) { 1212 rep->r_flags |= R_SOFTTERM; 1213 continue; 1214 } 1215 if (rep->r_rtt >= 0) { 1216 rep->r_rtt++; 1217 if (nmp->nm_flag & NFSMNT_DUMBTIMR) 1218 timeo = nmp->nm_timeo; 1219 else 1220 timeo = NFS_RTO(nmp, proct[rep->r_procnum]); 1221 if (nmp->nm_timeouts > 0) 1222 timeo *= nfs_backoff[nmp->nm_timeouts - 1]; 1223 if (rep->r_rtt <= timeo) 1224 continue; 1225 if (nmp->nm_timeouts < 8) 1226 nmp->nm_timeouts++; 1227 } 1228 /* 1229 * Check for server not responding 1230 */ 1231 if ((rep->r_flags & R_TPRINTFMSG) == 0 && 1232 rep->r_rexmit > nmp->nm_deadthresh) { 1233 nfs_msg(rep->r_procp, 1234 nmp->nm_mountp->mnt_stat.f_mntfromname, 1235 "not responding"); 1236 rep->r_flags |= R_TPRINTFMSG; 1237 } 1238 if (rep->r_rexmit >= rep->r_retry) { /* too many */ 1239 nfsstats.rpctimeouts++; 1240 rep->r_flags |= R_SOFTTERM; 1241 continue; 1242 } 1243 if (nmp->nm_sotype != SOCK_DGRAM) { 1244 if (++rep->r_rexmit > NFS_MAXREXMIT) 1245 rep->r_rexmit = NFS_MAXREXMIT; 1246 continue; 1247 } 1248 if ((so = nmp->nm_so) == NULL) 1249 continue; 1250 1251 /* 1252 * If there is enough space and the window allows.. 1253 * Resend it 1254 * Set r_rtt to -1 in case we fail to send it now. 1255 */ 1256 rep->r_rtt = -1; 1257 if (sbspace(&so->so_snd) >= rep->r_mreq->m_pkthdr.len && 1258 ((nmp->nm_flag & NFSMNT_DUMBTIMR) || 1259 (rep->r_flags & R_SENT) || 1260 nmp->nm_sent < nmp->nm_cwnd) && 1261 (m = m_copym(rep->r_mreq, 0, M_COPYALL, M_DONTWAIT))){ 1262 if ((nmp->nm_flag & NFSMNT_NOCONN) == 0) 1263 error = (*so->so_proto->pr_usrreq)(so, PRU_SEND, m, 1264 (struct mbuf *)0, (struct mbuf *)0); 1265 else 1266 error = (*so->so_proto->pr_usrreq)(so, PRU_SEND, m, 1267 nmp->nm_nam, (struct mbuf *)0); 1268 if (error) { 1269 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) 1270 so->so_error = 0; 1271 } else { 1272 /* 1273 * Iff first send, start timing 1274 * else turn timing off, backoff timer 1275 * and divide congestion window by 2. 1276 */ 1277 if (rep->r_flags & R_SENT) { 1278 rep->r_flags &= ~R_TIMING; 1279 if (++rep->r_rexmit > NFS_MAXREXMIT) 1280 rep->r_rexmit = NFS_MAXREXMIT; 1281 nmp->nm_cwnd >>= 1; 1282 if (nmp->nm_cwnd < NFS_CWNDSCALE) 1283 nmp->nm_cwnd = NFS_CWNDSCALE; 1284 nfsstats.rpcretries++; 1285 } else { 1286 rep->r_flags |= R_SENT; 1287 nmp->nm_sent += NFS_CWNDSCALE; 1288 } 1289 rep->r_rtt = 0; 1290 } 1291 } 1292 } 1293 1294 /* 1295 * Call the nqnfs server timer once a second to handle leases. 1296 */ 1297 if (lasttime != time.tv_sec) { 1298 lasttime = time.tv_sec; 1299 nqnfs_serverd(); 1300 } 1301 splx(s); 1302 timeout(nfs_timer, (caddr_t)0, hz/NFS_HZ); 1303 } 1304 1305 /* 1306 * Test for a termination condition pending on the process. 1307 * This is used for NFSMNT_INT mounts. 1308 */ 1309 nfs_sigintr(nmp, rep, p) 1310 struct nfsmount *nmp; 1311 struct nfsreq *rep; 1312 register struct proc *p; 1313 { 1314 1315 if (rep && (rep->r_flags & R_SOFTTERM)) 1316 return (EINTR); 1317 if (!(nmp->nm_flag & NFSMNT_INT)) 1318 return (0); 1319 if (p && p->p_sig && (((p->p_sig &~ p->p_sigmask) &~ p->p_sigignore) & 1320 NFSINT_SIGMASK)) 1321 return (EINTR); 1322 return (0); 1323 } 1324 1325 /* 1326 * Lock a socket against others. 1327 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply 1328 * and also to avoid race conditions between the processes with nfs requests 1329 * in progress when a reconnect is necessary. 1330 */ 1331 nfs_sndlock(flagp, rep) 1332 register int *flagp; 1333 struct nfsreq *rep; 1334 { 1335 struct proc *p; 1336 1337 if (rep) 1338 p = rep->r_procp; 1339 else 1340 p = (struct proc *)0; 1341 while (*flagp & NFSMNT_SNDLOCK) { 1342 if (nfs_sigintr(rep->r_nmp, rep, p)) 1343 return (EINTR); 1344 *flagp |= NFSMNT_WANTSND; 1345 (void) tsleep((caddr_t)flagp, PZERO-1, "nfsndlck", 0); 1346 } 1347 *flagp |= NFSMNT_SNDLOCK; 1348 return (0); 1349 } 1350 1351 /* 1352 * Unlock the stream socket for others. 1353 */ 1354 void 1355 nfs_sndunlock(flagp) 1356 register int *flagp; 1357 { 1358 1359 if ((*flagp & NFSMNT_SNDLOCK) == 0) 1360 panic("nfs sndunlock"); 1361 *flagp &= ~NFSMNT_SNDLOCK; 1362 if (*flagp & NFSMNT_WANTSND) { 1363 *flagp &= ~NFSMNT_WANTSND; 1364 wakeup((caddr_t)flagp); 1365 } 1366 } 1367 1368 nfs_rcvlock(rep) 1369 register struct nfsreq *rep; 1370 { 1371 register int *flagp = &rep->r_nmp->nm_flag; 1372 1373 while (*flagp & NFSMNT_RCVLOCK) { 1374 if (nfs_sigintr(rep->r_nmp, rep, rep->r_procp)) 1375 return (EINTR); 1376 *flagp |= NFSMNT_WANTRCV; 1377 (void) tsleep((caddr_t)flagp, PZERO-1, "nfsrcvlck", 0); 1378 } 1379 *flagp |= NFSMNT_RCVLOCK; 1380 return (0); 1381 } 1382 1383 /* 1384 * Unlock the stream socket for others. 1385 */ 1386 void 1387 nfs_rcvunlock(flagp) 1388 register int *flagp; 1389 { 1390 1391 if ((*flagp & NFSMNT_RCVLOCK) == 0) 1392 panic("nfs rcvunlock"); 1393 *flagp &= ~NFSMNT_RCVLOCK; 1394 if (*flagp & NFSMNT_WANTRCV) { 1395 *flagp &= ~NFSMNT_WANTRCV; 1396 wakeup((caddr_t)flagp); 1397 } 1398 } 1399 1400 /* 1401 * Check for badly aligned mbuf data areas and 1402 * realign data in an mbuf list by copying the data areas up, as required. 1403 */ 1404 void 1405 nfs_realign(m, hsiz) 1406 register struct mbuf *m; 1407 int hsiz; 1408 { 1409 register struct mbuf *m2; 1410 register int siz, mlen, olen; 1411 register caddr_t tcp, fcp; 1412 struct mbuf *mnew; 1413 1414 while (m) { 1415 /* 1416 * This never happens for UDP, rarely happens for TCP 1417 * but frequently happens for iso transport. 1418 */ 1419 if ((m->m_len & 0x3) || (mtod(m, int) & 0x3)) { 1420 olen = m->m_len; 1421 fcp = mtod(m, caddr_t); 1422 m->m_flags &= ~M_PKTHDR; 1423 if (m->m_flags & M_EXT) 1424 m->m_data = m->m_ext.ext_buf; 1425 else 1426 m->m_data = m->m_dat; 1427 m->m_len = 0; 1428 tcp = mtod(m, caddr_t); 1429 mnew = m; 1430 m2 = m->m_next; 1431 1432 /* 1433 * If possible, only put the first invariant part 1434 * of the RPC header in the first mbuf. 1435 */ 1436 if (olen <= hsiz) 1437 mlen = hsiz; 1438 else 1439 mlen = M_TRAILINGSPACE(m); 1440 1441 /* 1442 * Loop through the mbuf list consolidating data. 1443 */ 1444 while (m) { 1445 while (olen > 0) { 1446 if (mlen == 0) { 1447 m2->m_flags &= ~M_PKTHDR; 1448 if (m2->m_flags & M_EXT) 1449 m2->m_data = m2->m_ext.ext_buf; 1450 else 1451 m2->m_data = m2->m_dat; 1452 m2->m_len = 0; 1453 mlen = M_TRAILINGSPACE(m2); 1454 tcp = mtod(m2, caddr_t); 1455 mnew = m2; 1456 m2 = m2->m_next; 1457 } 1458 siz = min(mlen, olen); 1459 if (tcp != fcp) 1460 bcopy(fcp, tcp, siz); 1461 mnew->m_len += siz; 1462 mlen -= siz; 1463 olen -= siz; 1464 tcp += siz; 1465 fcp += siz; 1466 } 1467 m = m->m_next; 1468 if (m) { 1469 olen = m->m_len; 1470 fcp = mtod(m, caddr_t); 1471 } 1472 } 1473 1474 /* 1475 * Finally, set m_len == 0 for any trailing mbufs that have 1476 * been copied out of. 1477 */ 1478 while (m2) { 1479 m2->m_len = 0; 1480 m2 = m2->m_next; 1481 } 1482 return; 1483 } 1484 m = m->m_next; 1485 } 1486 } 1487 1488 /* 1489 * Socket upcall routine for the nfsd sockets. 1490 * The caddr_t arg is a pointer to the "struct nfssvc_sock". 1491 * Essentially do as much as possible non-blocking, else punt and it will 1492 * be called with M_WAIT from an nfsd. 1493 */ 1494 void 1495 nfsrv_rcv(so, arg, waitflag) 1496 struct socket *so; 1497 caddr_t arg; 1498 int waitflag; 1499 { 1500 register struct nfssvc_sock *slp = (struct nfssvc_sock *)arg; 1501 register struct mbuf *m; 1502 struct mbuf *mp, *nam; 1503 struct uio auio; 1504 int flags, error; 1505 1506 if ((slp->ns_flag & SLP_VALID) == 0) 1507 return; 1508 #ifdef notdef 1509 /* 1510 * Define this to test for nfsds handling this under heavy load. 1511 */ 1512 if (waitflag == M_DONTWAIT) { 1513 slp->ns_flag |= SLP_NEEDQ; goto dorecs; 1514 } 1515 #endif 1516 auio.uio_procp = NULL; 1517 if (so->so_type == SOCK_STREAM) { 1518 /* 1519 * If there are already records on the queue, defer soreceive() 1520 * to an nfsd so that there is feedback to the TCP layer that 1521 * the nfs servers are heavily loaded. 1522 */ 1523 if (slp->ns_rec && waitflag == M_DONTWAIT) { 1524 slp->ns_flag |= SLP_NEEDQ; 1525 goto dorecs; 1526 } 1527 1528 /* 1529 * Do soreceive(). 1530 */ 1531 auio.uio_resid = 1000000000; 1532 flags = MSG_DONTWAIT; 1533 error = soreceive(so, &nam, &auio, &mp, (struct mbuf **)0, &flags); 1534 if (error || mp == (struct mbuf *)0) { 1535 if (error == EWOULDBLOCK) 1536 slp->ns_flag |= SLP_NEEDQ; 1537 else 1538 slp->ns_flag |= SLP_DISCONN; 1539 goto dorecs; 1540 } 1541 m = mp; 1542 if (slp->ns_rawend) { 1543 slp->ns_rawend->m_next = m; 1544 slp->ns_cc += 1000000000 - auio.uio_resid; 1545 } else { 1546 slp->ns_raw = m; 1547 slp->ns_cc = 1000000000 - auio.uio_resid; 1548 } 1549 while (m->m_next) 1550 m = m->m_next; 1551 slp->ns_rawend = m; 1552 1553 /* 1554 * Now try and parse record(s) out of the raw stream data. 1555 */ 1556 if (error = nfsrv_getstream(slp, waitflag)) { 1557 if (error == EPERM) 1558 slp->ns_flag |= SLP_DISCONN; 1559 else 1560 slp->ns_flag |= SLP_NEEDQ; 1561 } 1562 } else { 1563 do { 1564 auio.uio_resid = 1000000000; 1565 flags = MSG_DONTWAIT; 1566 error = soreceive(so, &nam, &auio, &mp, 1567 (struct mbuf **)0, &flags); 1568 if (mp) { 1569 nfs_realign(mp, 10 * NFSX_UNSIGNED); 1570 if (nam) { 1571 m = nam; 1572 m->m_next = mp; 1573 } else 1574 m = mp; 1575 if (slp->ns_recend) 1576 slp->ns_recend->m_nextpkt = m; 1577 else 1578 slp->ns_rec = m; 1579 slp->ns_recend = m; 1580 m->m_nextpkt = (struct mbuf *)0; 1581 } 1582 if (error) { 1583 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) 1584 && error != EWOULDBLOCK) { 1585 slp->ns_flag |= SLP_DISCONN; 1586 goto dorecs; 1587 } 1588 } 1589 } while (mp); 1590 } 1591 1592 /* 1593 * Now try and process the request records, non-blocking. 1594 */ 1595 dorecs: 1596 if (waitflag == M_DONTWAIT && 1597 (slp->ns_rec || (slp->ns_flag & (SLP_NEEDQ | SLP_DISCONN)))) 1598 nfsrv_wakenfsd(slp); 1599 } 1600 1601 /* 1602 * Try and extract an RPC request from the mbuf data list received on a 1603 * stream socket. The "waitflag" argument indicates whether or not it 1604 * can sleep. 1605 */ 1606 nfsrv_getstream(slp, waitflag) 1607 register struct nfssvc_sock *slp; 1608 int waitflag; 1609 { 1610 register struct mbuf *m; 1611 register char *cp1, *cp2; 1612 register int len; 1613 struct mbuf *om, *m2, *recm; 1614 u_long recmark; 1615 1616 if (slp->ns_flag & SLP_GETSTREAM) 1617 panic("nfs getstream"); 1618 slp->ns_flag |= SLP_GETSTREAM; 1619 for (;;) { 1620 if (slp->ns_reclen == 0) { 1621 if (slp->ns_cc < NFSX_UNSIGNED) { 1622 slp->ns_flag &= ~SLP_GETSTREAM; 1623 return (0); 1624 } 1625 m = slp->ns_raw; 1626 if (m->m_len >= NFSX_UNSIGNED) { 1627 bcopy(mtod(m, caddr_t), (caddr_t)&recmark, NFSX_UNSIGNED); 1628 m->m_data += NFSX_UNSIGNED; 1629 m->m_len -= NFSX_UNSIGNED; 1630 } else { 1631 cp1 = (caddr_t)&recmark; 1632 cp2 = mtod(m, caddr_t); 1633 while (cp1 < ((caddr_t)&recmark) + NFSX_UNSIGNED) { 1634 while (m->m_len == 0) { 1635 m = m->m_next; 1636 cp2 = mtod(m, caddr_t); 1637 } 1638 *cp1++ = *cp2++; 1639 m->m_data++; 1640 m->m_len--; 1641 } 1642 } 1643 slp->ns_cc -= NFSX_UNSIGNED; 1644 slp->ns_reclen = ntohl(recmark) & ~0x80000000; 1645 if (slp->ns_reclen < NFS_MINPACKET || slp->ns_reclen > NFS_MAXPACKET) { 1646 slp->ns_flag &= ~SLP_GETSTREAM; 1647 return (EPERM); 1648 } 1649 } 1650 1651 /* 1652 * Now get the record part. 1653 */ 1654 if (slp->ns_cc == slp->ns_reclen) { 1655 recm = slp->ns_raw; 1656 slp->ns_raw = slp->ns_rawend = (struct mbuf *)0; 1657 slp->ns_cc = slp->ns_reclen = 0; 1658 } else if (slp->ns_cc > slp->ns_reclen) { 1659 len = 0; 1660 m = slp->ns_raw; 1661 om = (struct mbuf *)0; 1662 while (len < slp->ns_reclen) { 1663 if ((len + m->m_len) > slp->ns_reclen) { 1664 m2 = m_copym(m, 0, slp->ns_reclen - len, 1665 waitflag); 1666 if (m2) { 1667 if (om) { 1668 om->m_next = m2; 1669 recm = slp->ns_raw; 1670 } else 1671 recm = m2; 1672 m->m_data += slp->ns_reclen - len; 1673 m->m_len -= slp->ns_reclen - len; 1674 len = slp->ns_reclen; 1675 } else { 1676 slp->ns_flag &= ~SLP_GETSTREAM; 1677 return (EWOULDBLOCK); 1678 } 1679 } else if ((len + m->m_len) == slp->ns_reclen) { 1680 om = m; 1681 len += m->m_len; 1682 m = m->m_next; 1683 recm = slp->ns_raw; 1684 om->m_next = (struct mbuf *)0; 1685 } else { 1686 om = m; 1687 len += m->m_len; 1688 m = m->m_next; 1689 } 1690 } 1691 slp->ns_raw = m; 1692 slp->ns_cc -= len; 1693 slp->ns_reclen = 0; 1694 } else { 1695 slp->ns_flag &= ~SLP_GETSTREAM; 1696 return (0); 1697 } 1698 nfs_realign(recm, 10 * NFSX_UNSIGNED); 1699 if (slp->ns_recend) 1700 slp->ns_recend->m_nextpkt = recm; 1701 else 1702 slp->ns_rec = recm; 1703 slp->ns_recend = recm; 1704 } 1705 } 1706 1707 /* 1708 * Parse an RPC header. 1709 */ 1710 nfsrv_dorec(slp, nd) 1711 register struct nfssvc_sock *slp; 1712 register struct nfsd *nd; 1713 { 1714 register struct mbuf *m; 1715 int error; 1716 1717 if ((slp->ns_flag & SLP_VALID) == 0 || 1718 (m = slp->ns_rec) == (struct mbuf *)0) 1719 return (ENOBUFS); 1720 if (slp->ns_rec = m->m_nextpkt) 1721 m->m_nextpkt = (struct mbuf *)0; 1722 else 1723 slp->ns_recend = (struct mbuf *)0; 1724 if (m->m_type == MT_SONAME) { 1725 nd->nd_nam = m; 1726 nd->nd_md = nd->nd_mrep = m->m_next; 1727 m->m_next = (struct mbuf *)0; 1728 } else { 1729 nd->nd_nam = (struct mbuf *)0; 1730 nd->nd_md = nd->nd_mrep = m; 1731 } 1732 nd->nd_dpos = mtod(nd->nd_md, caddr_t); 1733 if (error = nfs_getreq(nd, TRUE)) { 1734 m_freem(nd->nd_nam); 1735 return (error); 1736 } 1737 return (0); 1738 } 1739 1740 /* 1741 * Parse an RPC request 1742 * - verify it 1743 * - fill in the cred struct. 1744 */ 1745 nfs_getreq(nd, has_header) 1746 register struct nfsd *nd; 1747 int has_header; 1748 { 1749 register int len, i; 1750 register u_long *tl; 1751 register long t1; 1752 struct uio uio; 1753 struct iovec iov; 1754 caddr_t dpos, cp2; 1755 u_long nfsvers, auth_type; 1756 int error = 0, nqnfs = 0; 1757 struct mbuf *mrep, *md; 1758 1759 mrep = nd->nd_mrep; 1760 md = nd->nd_md; 1761 dpos = nd->nd_dpos; 1762 if (has_header) { 1763 nfsm_dissect(tl, u_long *, 10*NFSX_UNSIGNED); 1764 nd->nd_retxid = *tl++; 1765 if (*tl++ != rpc_call) { 1766 m_freem(mrep); 1767 return (EBADRPC); 1768 } 1769 } else { 1770 nfsm_dissect(tl, u_long *, 8*NFSX_UNSIGNED); 1771 } 1772 nd->nd_repstat = 0; 1773 if (*tl++ != rpc_vers) { 1774 nd->nd_repstat = ERPCMISMATCH; 1775 nd->nd_procnum = NFSPROC_NOOP; 1776 return (0); 1777 } 1778 nfsvers = nfs_vers; 1779 if (*tl != nfs_prog) { 1780 if (*tl == nqnfs_prog) { 1781 nqnfs++; 1782 nfsvers = nqnfs_vers; 1783 } else { 1784 nd->nd_repstat = EPROGUNAVAIL; 1785 nd->nd_procnum = NFSPROC_NOOP; 1786 return (0); 1787 } 1788 } 1789 tl++; 1790 if (*tl++ != nfsvers) { 1791 nd->nd_repstat = EPROGMISMATCH; 1792 nd->nd_procnum = NFSPROC_NOOP; 1793 return (0); 1794 } 1795 nd->nd_procnum = fxdr_unsigned(u_long, *tl++); 1796 if (nd->nd_procnum == NFSPROC_NULL) 1797 return (0); 1798 if (nd->nd_procnum >= NFS_NPROCS || 1799 (!nqnfs && nd->nd_procnum > NFSPROC_STATFS) || 1800 (*tl != rpc_auth_unix && *tl != rpc_auth_kerb)) { 1801 nd->nd_repstat = EPROCUNAVAIL; 1802 nd->nd_procnum = NFSPROC_NOOP; 1803 return (0); 1804 } 1805 auth_type = *tl++; 1806 len = fxdr_unsigned(int, *tl++); 1807 if (len < 0 || len > RPCAUTH_MAXSIZ) { 1808 m_freem(mrep); 1809 return (EBADRPC); 1810 } 1811 1812 /* 1813 * Handle auth_unix or auth_kerb. 1814 */ 1815 if (auth_type == rpc_auth_unix) { 1816 len = fxdr_unsigned(int, *++tl); 1817 if (len < 0 || len > NFS_MAXNAMLEN) { 1818 m_freem(mrep); 1819 return (EBADRPC); 1820 } 1821 nfsm_adv(nfsm_rndup(len)); 1822 nfsm_dissect(tl, u_long *, 3*NFSX_UNSIGNED); 1823 nd->nd_cr.cr_uid = fxdr_unsigned(uid_t, *tl++); 1824 nd->nd_cr.cr_gid = fxdr_unsigned(gid_t, *tl++); 1825 len = fxdr_unsigned(int, *tl); 1826 if (len < 0 || len > RPCAUTH_UNIXGIDS) { 1827 m_freem(mrep); 1828 return (EBADRPC); 1829 } 1830 nfsm_dissect(tl, u_long *, (len + 2)*NFSX_UNSIGNED); 1831 for (i = 1; i <= len; i++) 1832 if (i < NGROUPS) 1833 nd->nd_cr.cr_groups[i] = fxdr_unsigned(gid_t, *tl++); 1834 else 1835 tl++; 1836 nd->nd_cr.cr_ngroups = (len >= NGROUPS) ? NGROUPS : (len + 1); 1837 } else if (auth_type == rpc_auth_kerb) { 1838 nd->nd_cr.cr_uid = fxdr_unsigned(uid_t, *tl++); 1839 nd->nd_authlen = fxdr_unsigned(int, *tl); 1840 iov.iov_len = uio.uio_resid = nfsm_rndup(nd->nd_authlen); 1841 if (uio.uio_resid > (len - 2*NFSX_UNSIGNED)) { 1842 m_freem(mrep); 1843 return (EBADRPC); 1844 } 1845 uio.uio_offset = 0; 1846 uio.uio_iov = &iov; 1847 uio.uio_iovcnt = 1; 1848 uio.uio_segflg = UIO_SYSSPACE; 1849 iov.iov_base = (caddr_t)nd->nd_authstr; 1850 nfsm_mtouio(&uio, uio.uio_resid); 1851 nfsm_dissect(tl, u_long *, 2*NFSX_UNSIGNED); 1852 nd->nd_flag |= NFSD_NEEDAUTH; 1853 } 1854 1855 /* 1856 * Do we have any use for the verifier. 1857 * According to the "Remote Procedure Call Protocol Spec." it 1858 * should be AUTH_NULL, but some clients make it AUTH_UNIX? 1859 * For now, just skip over it 1860 */ 1861 len = fxdr_unsigned(int, *++tl); 1862 if (len < 0 || len > RPCAUTH_MAXSIZ) { 1863 m_freem(mrep); 1864 return (EBADRPC); 1865 } 1866 if (len > 0) { 1867 nfsm_adv(nfsm_rndup(len)); 1868 } 1869 1870 /* 1871 * For nqnfs, get piggybacked lease request. 1872 */ 1873 if (nqnfs && nd->nd_procnum != NQNFSPROC_EVICTED) { 1874 nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); 1875 nd->nd_nqlflag = fxdr_unsigned(int, *tl); 1876 if (nd->nd_nqlflag) { 1877 nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); 1878 nd->nd_duration = fxdr_unsigned(int, *tl); 1879 } else 1880 nd->nd_duration = NQ_MINLEASE; 1881 } else { 1882 nd->nd_nqlflag = NQL_NOVAL; 1883 nd->nd_duration = NQ_MINLEASE; 1884 } 1885 nd->nd_md = md; 1886 nd->nd_dpos = dpos; 1887 return (0); 1888 nfsmout: 1889 return (error); 1890 } 1891 1892 /* 1893 * Search for a sleeping nfsd and wake it up. 1894 * SIDE EFFECT: If none found, set NFSD_CHECKSLP flag, so that one of the 1895 * running nfsds will go look for the work in the nfssvc_sock list. 1896 */ 1897 void 1898 nfsrv_wakenfsd(slp) 1899 struct nfssvc_sock *slp; 1900 { 1901 register struct nfsd *nd = nfsd_head.nd_next; 1902 1903 if ((slp->ns_flag & SLP_VALID) == 0) 1904 return; 1905 while (nd != (struct nfsd *)&nfsd_head) { 1906 if (nd->nd_flag & NFSD_WAITING) { 1907 nd->nd_flag &= ~NFSD_WAITING; 1908 if (nd->nd_slp) 1909 panic("nfsd wakeup"); 1910 slp->ns_sref++; 1911 nd->nd_slp = slp; 1912 wakeup((caddr_t)nd); 1913 return; 1914 } 1915 nd = nd->nd_next; 1916 } 1917 slp->ns_flag |= SLP_DOREC; 1918 nfsd_head.nd_flag |= NFSD_CHECKSLP; 1919 } 1920 1921 nfs_msg(p, server, msg) 1922 struct proc *p; 1923 char *server, *msg; 1924 { 1925 tpr_t tpr; 1926 1927 if (p) 1928 tpr = tprintf_open(p); 1929 else 1930 tpr = NULL; 1931 tprintf(tpr, "nfs server %s: %s\n", server, msg); 1932 tprintf_close(tpr); 1933 } 1934