1 /* 2 * Copyright (c) 1989, 1991 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * %sccs.include.redist.c% 9 * 10 * @(#)nfs_socket.c 7.29 (Berkeley) 03/17/92 11 */ 12 13 /* 14 * Socket operations for use by nfs 15 */ 16 17 #include "types.h" 18 #include "param.h" 19 #include "uio.h" 20 #include "proc.h" 21 #include "signal.h" 22 #include "mount.h" 23 #include "kernel.h" 24 #include "malloc.h" 25 #include "mbuf.h" 26 #include "vnode.h" 27 #include "domain.h" 28 #include "protosw.h" 29 #include "socket.h" 30 #include "socketvar.h" 31 #include "syslog.h" 32 #include "tprintf.h" 33 #include "machine/endian.h" 34 #include "netinet/in.h" 35 #include "netinet/tcp.h" 36 #ifdef ISO 37 #include "netiso/iso.h" 38 #endif 39 #include "ufs/ufs/quota.h" 40 #include "ufs/ufs/ufsmount.h" 41 #include "rpcv2.h" 42 #include "nfsv2.h" 43 #include "nfs.h" 44 #include "xdr_subs.h" 45 #include "nfsm_subs.h" 46 #include "nfsmount.h" 47 #include "nfsnode.h" 48 #include "nfsrtt.h" 49 #include "nqnfs.h" 50 51 #define TRUE 1 52 #define FALSE 0 53 54 int netnetnet = sizeof (struct netaddrhash); 55 /* 56 * Estimate rto for an nfs rpc sent via. an unreliable datagram. 57 * Use the mean and mean deviation of rtt for the appropriate type of rpc 58 * for the frequent rpcs and a default for the others. 59 * The justification for doing "other" this way is that these rpcs 60 * happen so infrequently that timer est. would probably be stale. 61 * Also, since many of these rpcs are 62 * non-idempotent, a conservative timeout is desired. 63 * getattr, lookup - A+2D 64 * read, write - A+4D 65 * other - nm_timeo 66 */ 67 #define NFS_RTO(n, t) \ 68 ((t) == 0 ? (n)->nm_timeo : \ 69 ((t) < 3 ? \ 70 (((((n)->nm_srtt[t-1] + 3) >> 2) + (n)->nm_sdrtt[t-1] + 1) >> 1) : \ 71 ((((n)->nm_srtt[t-1] + 7) >> 3) + (n)->nm_sdrtt[t-1] + 1))) 72 #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum] - 1] 73 #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum] - 1] 74 /* 75 * External data, mostly RPC constants in XDR form 76 */ 77 extern u_long rpc_reply, rpc_msgdenied, rpc_mismatch, rpc_vers, rpc_auth_unix, 78 rpc_msgaccepted, rpc_call, rpc_autherr, rpc_rejectedcred, 79 rpc_auth_kerb; 80 extern u_long nfs_prog, nfs_vers, nqnfs_prog, nqnfs_vers; 81 extern time_t nqnfsstarttime; 82 extern int nonidempotent[NFS_NPROCS]; 83 84 /* 85 * Maps errno values to nfs error numbers. 86 * Use NFSERR_IO as the catch all for ones not specifically defined in 87 * RFC 1094. 88 */ 89 static int nfsrv_errmap[ELAST] = { 90 NFSERR_PERM, NFSERR_NOENT, NFSERR_IO, NFSERR_IO, NFSERR_IO, 91 NFSERR_NXIO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 92 NFSERR_IO, NFSERR_IO, NFSERR_ACCES, NFSERR_IO, NFSERR_IO, 93 NFSERR_IO, NFSERR_EXIST, NFSERR_IO, NFSERR_NODEV, NFSERR_NOTDIR, 94 NFSERR_ISDIR, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 95 NFSERR_IO, NFSERR_FBIG, NFSERR_NOSPC, NFSERR_IO, NFSERR_ROFS, 96 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 97 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 98 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 99 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 100 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 101 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 102 NFSERR_IO, NFSERR_IO, NFSERR_NAMETOL, NFSERR_IO, NFSERR_IO, 103 NFSERR_NOTEMPTY, NFSERR_IO, NFSERR_IO, NFSERR_DQUOT, NFSERR_STALE, 104 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 105 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 106 NFSERR_IO, 107 }; 108 109 /* 110 * Defines which timer to use for the procnum. 111 * 0 - default 112 * 1 - getattr 113 * 2 - lookup 114 * 3 - read 115 * 4 - write 116 */ 117 static int proct[NFS_NPROCS] = { 118 0, 1, 0, 0, 2, 3, 3, 0, 4, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 119 }; 120 121 /* 122 * There is a congestion window for outstanding rpcs maintained per mount 123 * point. The cwnd size is adjusted in roughly the way that: 124 * Van Jacobson, Congestion avoidance and Control, In "Proceedings of 125 * SIGCOMM '88". ACM, August 1988. 126 * describes for TCP. The cwnd size is chopped in half on a retransmit timeout 127 * and incremented by 1/cwnd when each rpc reply is received and a full cwnd 128 * of rpcs is in progress. 129 * (The sent count and cwnd are scaled for integer arith.) 130 * Variants of "slow start" were tried and were found to be too much of a 131 * performance hit (ave. rtt 3 times larger), 132 * I suspect due to the large rtt that nfs rpcs have. 133 */ 134 #define NFS_CWNDSCALE 256 135 #define NFS_MAXCWND (NFS_CWNDSCALE * 32) 136 static int nfs_backoff[8] = { 2, 4, 8, 16, 32, 64, 128, 256, }; 137 int nfs_sbwait(); 138 void nfs_disconnect(), nfs_realign(), nfsrv_wakenfsd(), nfs_sndunlock(); 139 void nfs_rcvunlock(), nqnfs_serverd(); 140 struct mbuf *nfsm_rpchead(); 141 int nfsrtton = 0; 142 struct nfsrtt nfsrtt; 143 struct nfsd nfsd_head; 144 145 int nfsrv_null(), 146 nfsrv_getattr(), 147 nfsrv_setattr(), 148 nfsrv_lookup(), 149 nfsrv_readlink(), 150 nfsrv_read(), 151 nfsrv_write(), 152 nfsrv_create(), 153 nfsrv_remove(), 154 nfsrv_rename(), 155 nfsrv_link(), 156 nfsrv_symlink(), 157 nfsrv_mkdir(), 158 nfsrv_rmdir(), 159 nfsrv_readdir(), 160 nfsrv_statfs(), 161 nfsrv_noop(), 162 nqnfsrv_readdirlook(), 163 nqnfsrv_getlease(), 164 nqnfsrv_vacated(); 165 166 int (*nfsrv_procs[NFS_NPROCS])() = { 167 nfsrv_null, 168 nfsrv_getattr, 169 nfsrv_setattr, 170 nfsrv_noop, 171 nfsrv_lookup, 172 nfsrv_readlink, 173 nfsrv_read, 174 nfsrv_noop, 175 nfsrv_write, 176 nfsrv_create, 177 nfsrv_remove, 178 nfsrv_rename, 179 nfsrv_link, 180 nfsrv_symlink, 181 nfsrv_mkdir, 182 nfsrv_rmdir, 183 nfsrv_readdir, 184 nfsrv_statfs, 185 nqnfsrv_readdirlook, 186 nqnfsrv_getlease, 187 nqnfsrv_vacated, 188 }; 189 190 struct nfsreq nfsreqh; 191 192 /* 193 * Initialize sockets and congestion for a new NFS connection. 194 * We do not free the sockaddr if error. 195 */ 196 nfs_connect(nmp, rep) 197 register struct nfsmount *nmp; 198 struct nfsreq *rep; 199 { 200 register struct socket *so; 201 int s, error, rcvreserve, sndreserve; 202 struct sockaddr *saddr; 203 struct sockaddr_in *sin; 204 struct mbuf *m; 205 u_short tport; 206 207 nmp->nm_so = (struct socket *)0; 208 saddr = mtod(nmp->nm_nam, struct sockaddr *); 209 if (error = socreate(saddr->sa_family, 210 &nmp->nm_so, nmp->nm_sotype, nmp->nm_soproto)) 211 goto bad; 212 so = nmp->nm_so; 213 nmp->nm_soflags = so->so_proto->pr_flags; 214 215 /* 216 * Some servers require that the client port be a reserved port number. 217 */ 218 if (saddr->sa_family == AF_INET && (nmp->nm_flag & NFSMNT_RESVPORT)) { 219 MGET(m, M_WAIT, MT_SONAME); 220 sin = mtod(m, struct sockaddr_in *); 221 sin->sin_len = m->m_len = sizeof (struct sockaddr_in); 222 sin->sin_family = AF_INET; 223 sin->sin_addr.s_addr = INADDR_ANY; 224 tport = IPPORT_RESERVED - 1; 225 sin->sin_port = htons(tport); 226 while ((error = sobind(so, m)) == EADDRINUSE && 227 --tport > IPPORT_RESERVED / 2) 228 sin->sin_port = htons(tport); 229 m_freem(m); 230 if (error) 231 goto bad; 232 } 233 234 /* 235 * Protocols that do not require connections may be optionally left 236 * unconnected for servers that reply from a port other than NFS_PORT. 237 */ 238 if (nmp->nm_flag & NFSMNT_NOCONN) { 239 if (nmp->nm_soflags & PR_CONNREQUIRED) { 240 error = ENOTCONN; 241 goto bad; 242 } 243 } else { 244 if (error = soconnect(so, nmp->nm_nam)) 245 goto bad; 246 247 /* 248 * Wait for the connection to complete. Cribbed from the 249 * connect system call but with the wait timing out so 250 * that interruptible mounts don't hang here for a long time. 251 */ 252 s = splnet(); 253 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) { 254 (void) tsleep((caddr_t)&so->so_timeo, PSOCK, 255 "nfscon", 2 * hz); 256 if ((so->so_state & SS_ISCONNECTING) && 257 so->so_error == 0 && rep && 258 (error = nfs_sigintr(nmp, rep, rep->r_procp))) { 259 so->so_state &= ~SS_ISCONNECTING; 260 splx(s); 261 goto bad; 262 } 263 } 264 if (so->so_error) { 265 error = so->so_error; 266 so->so_error = 0; 267 splx(s); 268 goto bad; 269 } 270 splx(s); 271 } 272 if (nmp->nm_flag & (NFSMNT_SOFT | NFSMNT_INT)) { 273 so->so_rcv.sb_timeo = (5 * hz); 274 so->so_snd.sb_timeo = (5 * hz); 275 } else { 276 so->so_rcv.sb_timeo = 0; 277 so->so_snd.sb_timeo = 0; 278 } 279 if (nmp->nm_sotype == SOCK_DGRAM) { 280 sndreserve = nmp->nm_wsize + NFS_MAXPKTHDR; 281 rcvreserve = nmp->nm_rsize + NFS_MAXPKTHDR; 282 } else if (nmp->nm_sotype == SOCK_SEQPACKET) { 283 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * 2; 284 rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR) * 2; 285 } else { 286 if (nmp->nm_sotype != SOCK_STREAM) 287 panic("nfscon sotype"); 288 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 289 MGET(m, M_WAIT, MT_SOOPTS); 290 *mtod(m, int *) = 1; 291 m->m_len = sizeof(int); 292 sosetopt(so, SOL_SOCKET, SO_KEEPALIVE, m); 293 } 294 if (so->so_proto->pr_protocol == IPPROTO_TCP) { 295 MGET(m, M_WAIT, MT_SOOPTS); 296 *mtod(m, int *) = 1; 297 m->m_len = sizeof(int); 298 sosetopt(so, IPPROTO_TCP, TCP_NODELAY, m); 299 } 300 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR + sizeof (u_long)) 301 * 2; 302 rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR + sizeof (u_long)) 303 * 2; 304 } 305 if (error = soreserve(so, sndreserve, rcvreserve)) 306 goto bad; 307 so->so_rcv.sb_flags |= SB_NOINTR; 308 so->so_snd.sb_flags |= SB_NOINTR; 309 310 /* Initialize other non-zero congestion variables */ 311 nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] = nmp->nm_srtt[3] = 312 nmp->nm_srtt[4] = (NFS_TIMEO << 3); 313 nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] = 314 nmp->nm_sdrtt[3] = nmp->nm_sdrtt[4] = 0; 315 nmp->nm_cwnd = NFS_MAXCWND / 2; /* Initial send window */ 316 nmp->nm_sent = 0; 317 nmp->nm_timeouts = 0; 318 return (0); 319 320 bad: 321 nfs_disconnect(nmp); 322 return (error); 323 } 324 325 /* 326 * Reconnect routine: 327 * Called when a connection is broken on a reliable protocol. 328 * - clean up the old socket 329 * - nfs_connect() again 330 * - set R_MUSTRESEND for all outstanding requests on mount point 331 * If this fails the mount point is DEAD! 332 * nb: Must be called with the nfs_sndlock() set on the mount point. 333 */ 334 nfs_reconnect(rep) 335 register struct nfsreq *rep; 336 { 337 register struct nfsreq *rp; 338 register struct nfsmount *nmp = rep->r_nmp; 339 int error; 340 341 nfs_disconnect(nmp); 342 while (error = nfs_connect(nmp, rep)) { 343 if (error == EINTR || error == ERESTART) 344 return (EINTR); 345 (void) tsleep((caddr_t)&lbolt, PSOCK, "nfscon", 0); 346 } 347 348 /* 349 * Loop through outstanding request list and fix up all requests 350 * on old socket. 351 */ 352 rp = nfsreqh.r_next; 353 while (rp != &nfsreqh) { 354 if (rp->r_nmp == nmp) 355 rp->r_flags |= R_MUSTRESEND; 356 rp = rp->r_next; 357 } 358 return (0); 359 } 360 361 /* 362 * NFS disconnect. Clean up and unlink. 363 */ 364 void 365 nfs_disconnect(nmp) 366 register struct nfsmount *nmp; 367 { 368 register struct socket *so; 369 370 if (nmp->nm_so) { 371 so = nmp->nm_so; 372 nmp->nm_so = (struct socket *)0; 373 soshutdown(so, 2); 374 soclose(so); 375 } 376 } 377 378 /* 379 * This is the nfs send routine. For connection based socket types, it 380 * must be called with an nfs_sndlock() on the socket. 381 * "rep == NULL" indicates that it has been called from a server. 382 * For the client side: 383 * - return EINTR if the RPC is terminated, 0 otherwise 384 * - set R_MUSTRESEND if the send fails for any reason 385 * - do any cleanup required by recoverable socket errors (???) 386 * For the server side: 387 * - return EINTR or ERESTART if interrupted by a signal 388 * - return EPIPE if a connection is lost for connection based sockets (TCP...) 389 * - do any cleanup required by recoverable socket errors (???) 390 */ 391 nfs_send(so, nam, top, rep) 392 register struct socket *so; 393 struct mbuf *nam; 394 register struct mbuf *top; 395 struct nfsreq *rep; 396 { 397 struct mbuf *sendnam; 398 int error, soflags, flags; 399 400 if (rep) { 401 if (rep->r_flags & R_SOFTTERM) { 402 m_freem(top); 403 return (EINTR); 404 } 405 if ((so = rep->r_nmp->nm_so) == NULL) { 406 rep->r_flags |= R_MUSTRESEND; 407 m_freem(top); 408 return (0); 409 } 410 rep->r_flags &= ~R_MUSTRESEND; 411 soflags = rep->r_nmp->nm_soflags; 412 } else 413 soflags = so->so_proto->pr_flags; 414 if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED)) 415 sendnam = (struct mbuf *)0; 416 else 417 sendnam = nam; 418 if (so->so_type == SOCK_SEQPACKET) 419 flags = MSG_EOR; 420 else 421 flags = 0; 422 423 error = sosend(so, sendnam, (struct uio *)0, top, 424 (struct mbuf *)0, flags); 425 if (error) { 426 if (rep) { 427 log(LOG_INFO, "nfs send error %d for server %s\n",error, 428 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); 429 /* 430 * Deal with errors for the client side. 431 */ 432 if (rep->r_flags & R_SOFTTERM) 433 error = EINTR; 434 else 435 rep->r_flags |= R_MUSTRESEND; 436 } else 437 log(LOG_INFO, "nfsd send error %d\n", error); 438 439 /* 440 * Handle any recoverable (soft) socket errors here. (???) 441 */ 442 if (error != EINTR && error != ERESTART && 443 error != EWOULDBLOCK && error != EPIPE) 444 error = 0; 445 } 446 return (error); 447 } 448 449 /* 450 * Receive a Sun RPC Request/Reply. For SOCK_DGRAM, the work is all 451 * done by soreceive(), but for SOCK_STREAM we must deal with the Record 452 * Mark and consolidate the data into a new mbuf list. 453 * nb: Sometimes TCP passes the data up to soreceive() in long lists of 454 * small mbufs. 455 * For SOCK_STREAM we must be very careful to read an entire record once 456 * we have read any of it, even if the system call has been interrupted. 457 */ 458 nfs_receive(rep, aname, mp) 459 register struct nfsreq *rep; 460 struct mbuf **aname; 461 struct mbuf **mp; 462 { 463 register struct socket *so; 464 struct uio auio; 465 struct iovec aio; 466 register struct mbuf *m; 467 struct mbuf *control; 468 u_long len; 469 struct mbuf **getnam; 470 int error, sotype, rcvflg; 471 struct proc *p = curproc; /* XXX */ 472 473 /* 474 * Set up arguments for soreceive() 475 */ 476 *mp = (struct mbuf *)0; 477 *aname = (struct mbuf *)0; 478 sotype = rep->r_nmp->nm_sotype; 479 480 /* 481 * For reliable protocols, lock against other senders/receivers 482 * in case a reconnect is necessary. 483 * For SOCK_STREAM, first get the Record Mark to find out how much 484 * more there is to get. 485 * We must lock the socket against other receivers 486 * until we have an entire rpc request/reply. 487 */ 488 if (sotype != SOCK_DGRAM) { 489 if (error = nfs_sndlock(&rep->r_nmp->nm_flag, rep)) 490 return (error); 491 tryagain: 492 /* 493 * Check for fatal errors and resending request. 494 */ 495 /* 496 * Ugh: If a reconnect attempt just happened, nm_so 497 * would have changed. NULL indicates a failed 498 * attempt that has essentially shut down this 499 * mount point. 500 */ 501 if (rep->r_mrep || (rep->r_flags & R_SOFTTERM)) { 502 nfs_sndunlock(&rep->r_nmp->nm_flag); 503 return (EINTR); 504 } 505 if ((so = rep->r_nmp->nm_so) == NULL) { 506 if (error = nfs_reconnect(rep)) { 507 nfs_sndunlock(&rep->r_nmp->nm_flag); 508 return (error); 509 } 510 goto tryagain; 511 } 512 while (rep->r_flags & R_MUSTRESEND) { 513 m = m_copym(rep->r_mreq, 0, M_COPYALL, M_WAIT); 514 nfsstats.rpcretries++; 515 if (error = nfs_send(so, rep->r_nmp->nm_nam, m, rep)) { 516 if (error == EINTR || error == ERESTART || 517 (error = nfs_reconnect(rep))) { 518 nfs_sndunlock(&rep->r_nmp->nm_flag); 519 return (error); 520 } 521 goto tryagain; 522 } 523 } 524 nfs_sndunlock(&rep->r_nmp->nm_flag); 525 if (sotype == SOCK_STREAM) { 526 aio.iov_base = (caddr_t) &len; 527 aio.iov_len = sizeof(u_long); 528 auio.uio_iov = &aio; 529 auio.uio_iovcnt = 1; 530 auio.uio_segflg = UIO_SYSSPACE; 531 auio.uio_rw = UIO_READ; 532 auio.uio_offset = 0; 533 auio.uio_resid = sizeof(u_long); 534 auio.uio_procp = p; 535 do { 536 rcvflg = MSG_WAITALL; 537 error = soreceive(so, (struct mbuf **)0, &auio, 538 (struct mbuf **)0, (struct mbuf **)0, &rcvflg); 539 if (error == EWOULDBLOCK && rep) { 540 if (rep->r_flags & R_SOFTTERM) 541 return (EINTR); 542 } 543 } while (error == EWOULDBLOCK); 544 if (!error && auio.uio_resid > 0) { 545 log(LOG_INFO, 546 "short receive (%d/%d) from nfs server %s\n", 547 sizeof(u_long) - auio.uio_resid, 548 sizeof(u_long), 549 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); 550 error = EPIPE; 551 } 552 if (error) 553 goto errout; 554 len = ntohl(len) & ~0x80000000; 555 /* 556 * This is SERIOUS! We are out of sync with the sender 557 * and forcing a disconnect/reconnect is all I can do. 558 */ 559 if (len > NFS_MAXPACKET) { 560 log(LOG_ERR, "%s (%d) from nfs server %s\n", 561 "impossible packet length", 562 len, 563 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); 564 error = EFBIG; 565 goto errout; 566 } 567 auio.uio_resid = len; 568 do { 569 rcvflg = MSG_WAITALL; 570 error = soreceive(so, (struct mbuf **)0, 571 &auio, mp, (struct mbuf **)0, &rcvflg); 572 } while (error == EWOULDBLOCK || error == EINTR || 573 error == ERESTART); 574 if (!error && auio.uio_resid > 0) { 575 log(LOG_INFO, 576 "short receive (%d/%d) from nfs server %s\n", 577 len - auio.uio_resid, len, 578 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); 579 error = EPIPE; 580 } 581 } else { 582 /* 583 * NB: Since uio_resid is big, MSG_WAITALL is ignored 584 * and soreceive() will return when it has either a 585 * control msg or a data msg. 586 * We have no use for control msg., but must grab them 587 * and then throw them away so we know what is going 588 * on. 589 */ 590 auio.uio_resid = len = 100000000; /* Anything Big */ 591 auio.uio_procp = p; 592 do { 593 rcvflg = 0; 594 error = soreceive(so, (struct mbuf **)0, 595 &auio, mp, &control, &rcvflg); 596 if (control) 597 m_freem(control); 598 if (error == EWOULDBLOCK && rep) { 599 if (rep->r_flags & R_SOFTTERM) 600 return (EINTR); 601 } 602 } while (error == EWOULDBLOCK || 603 (!error && *mp == NULL && control)); 604 if ((rcvflg & MSG_EOR) == 0) 605 printf("Egad!!\n"); 606 if (!error && *mp == NULL) 607 error = EPIPE; 608 len -= auio.uio_resid; 609 } 610 errout: 611 if (error && error != EINTR && error != ERESTART) { 612 m_freem(*mp); 613 *mp = (struct mbuf *)0; 614 if (error != EPIPE) 615 log(LOG_INFO, 616 "receive error %d from nfs server %s\n", 617 error, 618 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); 619 error = nfs_sndlock(&rep->r_nmp->nm_flag, rep); 620 if (!error) 621 error = nfs_reconnect(rep); 622 if (!error) 623 goto tryagain; 624 } 625 } else { 626 if ((so = rep->r_nmp->nm_so) == NULL) 627 return (EACCES); 628 if (so->so_state & SS_ISCONNECTED) 629 getnam = (struct mbuf **)0; 630 else 631 getnam = aname; 632 auio.uio_resid = len = 1000000; 633 auio.uio_procp = p; 634 do { 635 rcvflg = 0; 636 error = soreceive(so, getnam, &auio, mp, 637 (struct mbuf **)0, &rcvflg); 638 if (error == EWOULDBLOCK && 639 (rep->r_flags & R_SOFTTERM)) 640 return (EINTR); 641 } while (error == EWOULDBLOCK); 642 len -= auio.uio_resid; 643 } 644 if (error) { 645 m_freem(*mp); 646 *mp = (struct mbuf *)0; 647 } 648 /* 649 * Search for any mbufs that are not a multiple of 4 bytes long 650 * or with m_data not longword aligned. 651 * These could cause pointer alignment problems, so copy them to 652 * well aligned mbufs. 653 */ 654 nfs_realign(*mp, 5 * NFSX_UNSIGNED); 655 return (error); 656 } 657 658 /* 659 * Implement receipt of reply on a socket. 660 * We must search through the list of received datagrams matching them 661 * with outstanding requests using the xid, until ours is found. 662 */ 663 /* ARGSUSED */ 664 nfs_reply(myrep) 665 struct nfsreq *myrep; 666 { 667 register struct nfsreq *rep; 668 register struct nfsmount *nmp = myrep->r_nmp; 669 register long t1; 670 struct mbuf *mrep, *nam, *md; 671 u_long rxid, *tl; 672 caddr_t dpos, cp2; 673 int error; 674 675 /* 676 * Loop around until we get our own reply 677 */ 678 for (;;) { 679 /* 680 * Lock against other receivers so that I don't get stuck in 681 * sbwait() after someone else has received my reply for me. 682 * Also necessary for connection based protocols to avoid 683 * race conditions during a reconnect. 684 */ 685 if (error = nfs_rcvlock(myrep)) 686 return (error); 687 /* Already received, bye bye */ 688 if (myrep->r_mrep != NULL) { 689 nfs_rcvunlock(&nmp->nm_flag); 690 return (0); 691 } 692 /* 693 * Get the next Rpc reply off the socket 694 */ 695 error = nfs_receive(myrep, &nam, &mrep); 696 nfs_rcvunlock(&nmp->nm_flag); 697 if (error) printf("rcv err=%d\n",error); 698 if (error) { 699 700 /* 701 * Ignore routing errors on connectionless protocols?? 702 */ 703 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) { 704 nmp->nm_so->so_error = 0; 705 continue; 706 } 707 return (error); 708 } 709 if (nam) 710 m_freem(nam); 711 712 /* 713 * Get the xid and check that it is an rpc reply 714 */ 715 md = mrep; 716 dpos = mtod(md, caddr_t); 717 nfsm_dissect(tl, u_long *, 2*NFSX_UNSIGNED); 718 rxid = *tl++; 719 if (*tl != rpc_reply) { 720 if (nmp->nm_flag & NFSMNT_NQNFS) { 721 if (nqnfs_callback(nmp, mrep, md, dpos)) 722 nfsstats.rpcinvalid++; 723 } else { 724 nfsstats.rpcinvalid++; 725 m_freem(mrep); 726 } 727 nfsmout: 728 continue; 729 } 730 731 /* 732 * Loop through the request list to match up the reply 733 * Iff no match, just drop the datagram 734 */ 735 rep = nfsreqh.r_next; 736 while (rep != &nfsreqh) { 737 if (rep->r_mrep == NULL && rxid == rep->r_xid) { 738 /* Found it.. */ 739 rep->r_mrep = mrep; 740 rep->r_md = md; 741 rep->r_dpos = dpos; 742 if (nfsrtton) { 743 struct rttl *rt; 744 745 rt = &nfsrtt.rttl[nfsrtt.pos]; 746 rt->proc = rep->r_procnum; 747 rt->rto = NFS_RTO(nmp, proct[rep->r_procnum]); 748 rt->sent = nmp->nm_sent; 749 rt->cwnd = nmp->nm_cwnd; 750 rt->srtt = nmp->nm_srtt[proct[rep->r_procnum] - 1]; 751 rt->sdrtt = nmp->nm_sdrtt[proct[rep->r_procnum] - 1]; 752 rt->fsid = nmp->nm_mountp->mnt_stat.f_fsid; 753 rt->tstamp = time; 754 if (rep->r_flags & R_TIMING) 755 rt->rtt = rep->r_rtt; 756 else 757 rt->rtt = 1000000; 758 nfsrtt.pos = (nfsrtt.pos + 1) % NFSRTTLOGSIZ; 759 } 760 /* 761 * Update congestion window. 762 * Do the additive increase of 763 * one rpc/rtt. 764 */ 765 if (nmp->nm_cwnd <= nmp->nm_sent) { 766 nmp->nm_cwnd += 767 (NFS_CWNDSCALE * NFS_CWNDSCALE + 768 (nmp->nm_cwnd >> 1)) / nmp->nm_cwnd; 769 if (nmp->nm_cwnd > NFS_MAXCWND) 770 nmp->nm_cwnd = NFS_MAXCWND; 771 } 772 nmp->nm_sent -= NFS_CWNDSCALE; 773 /* 774 * Update rtt using a gain of 0.125 on the mean 775 * and a gain of 0.25 on the deviation. 776 */ 777 if (rep->r_flags & R_TIMING) { 778 /* 779 * Since the timer resolution of 780 * NFS_HZ is so course, it can often 781 * result in r_rtt == 0. Since 782 * r_rtt == N means that the actual 783 * rtt is between N+dt and N+2-dt ticks, 784 * add 1. 785 */ 786 t1 = rep->r_rtt + 1; 787 t1 -= (NFS_SRTT(rep) >> 3); 788 NFS_SRTT(rep) += t1; 789 if (t1 < 0) 790 t1 = -t1; 791 t1 -= (NFS_SDRTT(rep) >> 2); 792 NFS_SDRTT(rep) += t1; 793 } 794 nmp->nm_timeouts = 0; 795 break; 796 } 797 rep = rep->r_next; 798 } 799 /* 800 * If not matched to a request, drop it. 801 * If it's mine, get out. 802 */ 803 if (rep == &nfsreqh) { 804 nfsstats.rpcunexpected++; 805 m_freem(mrep); 806 } else if (rep == myrep) 807 return (0); 808 } 809 } 810 811 /* 812 * nfs_request - goes something like this 813 * - fill in request struct 814 * - links it into list 815 * - calls nfs_send() for first transmit 816 * - calls nfs_receive() to get reply 817 * - break down rpc header and return with nfs reply pointed to 818 * by mrep or error 819 * nb: always frees up mreq mbuf list 820 */ 821 nfs_request(vp, mrest, procnum, procp, cred, mrp, mdp, dposp) 822 struct vnode *vp; 823 struct mbuf *mrest; 824 int procnum; 825 struct proc *procp; 826 struct ucred *cred; 827 struct mbuf **mrp; 828 struct mbuf **mdp; 829 caddr_t *dposp; 830 { 831 register struct mbuf *m, *mrep; 832 register struct nfsreq *rep; 833 register u_long *tl; 834 register int i; 835 struct nfsmount *nmp; 836 struct mbuf *md, *mheadend; 837 struct nfsreq *reph; 838 struct nfsnode *tp, *np; 839 time_t reqtime, waituntil; 840 caddr_t dpos, cp2; 841 int t1, nqlflag, cachable, s, error = 0, mrest_len, auth_len, auth_type; 842 int trylater_delay = NQ_TRYLATERDEL, trylater_cnt = 0, failed_auth = 0; 843 u_long xid; 844 char *auth_str; 845 846 nmp = VFSTONFS(vp->v_mount); 847 MALLOC(rep, struct nfsreq *, sizeof(struct nfsreq), M_NFSREQ, M_WAITOK); 848 rep->r_nmp = nmp; 849 rep->r_vp = vp; 850 rep->r_procp = procp; 851 rep->r_procnum = procnum; 852 i = 0; 853 m = mrest; 854 while (m) { 855 i += m->m_len; 856 m = m->m_next; 857 } 858 mrest_len = i; 859 860 /* 861 * Get the RPC header with authorization. 862 */ 863 kerbauth: 864 auth_str = (char *)0; 865 if (nmp->nm_flag & NFSMNT_KERB) { 866 if (failed_auth) { 867 error = nfs_getauth(nmp, rep, cred, &auth_type, 868 &auth_str, &auth_len); 869 if (error) { 870 free((caddr_t)rep, M_NFSREQ); 871 m_freem(mrest); 872 return (error); 873 } 874 } else { 875 auth_type = RPCAUTH_UNIX; 876 auth_len = 5 * NFSX_UNSIGNED; 877 } 878 } else { 879 auth_type = RPCAUTH_UNIX; 880 auth_len = ((((cred->cr_ngroups - 1) > nmp->nm_numgrps) ? 881 nmp->nm_numgrps : (cred->cr_ngroups - 1)) << 2) + 882 5 * NFSX_UNSIGNED; 883 } 884 m = nfsm_rpchead(cred, (nmp->nm_flag & NFSMNT_NQNFS), procnum, 885 auth_type, auth_len, auth_str, mrest, mrest_len, &mheadend, &xid); 886 if (auth_str) 887 free(auth_str, M_TEMP); 888 889 /* 890 * For stream protocols, insert a Sun RPC Record Mark. 891 */ 892 if (nmp->nm_sotype == SOCK_STREAM) { 893 M_PREPEND(m, NFSX_UNSIGNED, M_WAIT); 894 *mtod(m, u_long *) = htonl(0x80000000 | 895 (m->m_pkthdr.len - NFSX_UNSIGNED)); 896 } 897 rep->r_mreq = m; 898 rep->r_xid = xid; 899 tryagain: 900 if (nmp->nm_flag & NFSMNT_SOFT) 901 rep->r_retry = nmp->nm_retry; 902 else 903 rep->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */ 904 rep->r_rtt = rep->r_rexmit = 0; 905 if (proct[procnum] > 0) 906 rep->r_flags = R_TIMING; 907 else 908 rep->r_flags = 0; 909 rep->r_mrep = NULL; 910 911 /* 912 * Do the client side RPC. 913 */ 914 nfsstats.rpcrequests++; 915 /* 916 * Chain request into list of outstanding requests. Be sure 917 * to put it LAST so timer finds oldest requests first. 918 */ 919 s = splsoftclock(); 920 reph = &nfsreqh; 921 reph->r_prev->r_next = rep; 922 rep->r_prev = reph->r_prev; 923 reph->r_prev = rep; 924 rep->r_next = reph; 925 926 /* Get send time for nqnfs */ 927 reqtime = time.tv_sec; 928 929 /* 930 * If backing off another request or avoiding congestion, don't 931 * send this one now but let timer do it. If not timing a request, 932 * do it now. 933 */ 934 if (nmp->nm_so && (nmp->nm_sotype != SOCK_DGRAM || 935 (nmp->nm_flag & NFSMNT_DUMBTIMR) || 936 nmp->nm_sent < nmp->nm_cwnd)) { 937 splx(s); 938 if (nmp->nm_soflags & PR_CONNREQUIRED) 939 error = nfs_sndlock(&nmp->nm_flag, rep); 940 if (!error) { 941 m = m_copym(m, 0, M_COPYALL, M_WAIT); 942 error = nfs_send(nmp->nm_so, nmp->nm_nam, m, rep); 943 if (nmp->nm_soflags & PR_CONNREQUIRED) 944 nfs_sndunlock(&nmp->nm_flag); 945 } 946 if (!error && (rep->r_flags & R_MUSTRESEND) == 0) { 947 nmp->nm_sent += NFS_CWNDSCALE; 948 rep->r_flags |= R_SENT; 949 } 950 } else { 951 splx(s); 952 rep->r_rtt = -1; 953 } 954 955 /* 956 * Wait for the reply from our send or the timer's. 957 */ 958 if (!error) 959 error = nfs_reply(rep); 960 961 /* 962 * RPC done, unlink the request. 963 */ 964 s = splsoftclock(); 965 rep->r_prev->r_next = rep->r_next; 966 rep->r_next->r_prev = rep->r_prev; 967 splx(s); 968 969 /* 970 * If there was a successful reply and a tprintf msg. 971 * tprintf a response. 972 */ 973 if (!error && (rep->r_flags & R_TPRINTFMSG)) 974 nfs_msg(rep->r_procp, nmp->nm_mountp->mnt_stat.f_mntfromname, 975 "is alive again"); 976 mrep = rep->r_mrep; 977 md = rep->r_md; 978 dpos = rep->r_dpos; 979 if (error) { 980 m_freem(rep->r_mreq); 981 free((caddr_t)rep, M_NFSREQ); 982 return (error); 983 } 984 985 /* 986 * break down the rpc header and check if ok 987 */ 988 nfsm_dissect(tl, u_long *, 3*NFSX_UNSIGNED); 989 if (*tl++ == rpc_msgdenied) { 990 if (*tl == rpc_mismatch) 991 error = EOPNOTSUPP; 992 else if ((nmp->nm_flag & NFSMNT_KERB) && *tl++ == rpc_autherr) { 993 if (*tl == rpc_rejectedcred && failed_auth == 0) { 994 failed_auth++; 995 mheadend->m_next = (struct mbuf *)0; 996 m_freem(mrep); 997 m_freem(rep->r_mreq); 998 goto kerbauth; 999 } else 1000 error = EAUTH; 1001 } else 1002 error = EACCES; 1003 m_freem(mrep); 1004 m_freem(rep->r_mreq); 1005 free((caddr_t)rep, M_NFSREQ); 1006 return (error); 1007 } 1008 1009 /* 1010 * skip over the auth_verf, someday we may want to cache auth_short's 1011 * for nfs_reqhead(), but for now just dump it 1012 */ 1013 if (*++tl != 0) { 1014 i = nfsm_rndup(fxdr_unsigned(long, *tl)); 1015 nfsm_adv(i); 1016 } 1017 nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); 1018 /* 0 == ok */ 1019 if (*tl == 0) { 1020 nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); 1021 if (*tl != 0) { 1022 error = fxdr_unsigned(int, *tl); 1023 m_freem(mrep); 1024 if ((nmp->nm_flag & NFSMNT_NQNFS) && 1025 error == NQNFS_TRYLATER) { 1026 error = 0; 1027 waituntil = time.tv_sec + trylater_delay; 1028 while (time.tv_sec < waituntil) 1029 (void) tsleep((caddr_t)&lbolt, 1030 PSOCK, "nqnfstry", 0); 1031 trylater_delay *= nfs_backoff[trylater_cnt]; 1032 if (trylater_cnt < 7) 1033 trylater_cnt++; 1034 goto tryagain; 1035 } 1036 m_freem(rep->r_mreq); 1037 free((caddr_t)rep, M_NFSREQ); 1038 return (error); 1039 } 1040 1041 /* 1042 * For nqnfs, get any lease in reply 1043 */ 1044 if (nmp->nm_flag & NFSMNT_NQNFS) { 1045 nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); 1046 if (*tl) { 1047 np = VTONFS(vp); 1048 nqlflag = fxdr_unsigned(int, *tl); 1049 nfsm_dissect(tl, u_long *, 4*NFSX_UNSIGNED); 1050 cachable = fxdr_unsigned(int, *tl++); 1051 reqtime += fxdr_unsigned(int, *tl++); 1052 if (reqtime > time.tv_sec) { 1053 if (np->n_tnext) { 1054 if (np->n_tnext == (struct nfsnode *)nmp) 1055 nmp->nm_tprev = np->n_tprev; 1056 else 1057 np->n_tnext->n_tprev = np->n_tprev; 1058 if (np->n_tprev == (struct nfsnode *)nmp) 1059 nmp->nm_tnext = np->n_tnext; 1060 else 1061 np->n_tprev->n_tnext = np->n_tnext; 1062 if (nqlflag == NQL_WRITE) 1063 np->n_flag |= NQNFSWRITE; 1064 } else if (nqlflag == NQL_READ) 1065 np->n_flag &= ~NQNFSWRITE; 1066 else 1067 np->n_flag |= NQNFSWRITE; 1068 if (cachable) 1069 np->n_flag &= ~NQNFSNONCACHE; 1070 else 1071 np->n_flag |= NQNFSNONCACHE; 1072 np->n_expiry = reqtime; 1073 fxdr_hyper(tl, &np->n_lrev); 1074 tp = nmp->nm_tprev; 1075 while (tp != (struct nfsnode *)nmp && 1076 tp->n_expiry > np->n_expiry) 1077 tp = tp->n_tprev; 1078 if (tp == (struct nfsnode *)nmp) { 1079 np->n_tnext = nmp->nm_tnext; 1080 nmp->nm_tnext = np; 1081 } else { 1082 np->n_tnext = tp->n_tnext; 1083 tp->n_tnext = np; 1084 } 1085 np->n_tprev = tp; 1086 if (np->n_tnext == (struct nfsnode *)nmp) 1087 nmp->nm_tprev = np; 1088 else 1089 np->n_tnext->n_tprev = np; 1090 } 1091 } 1092 } 1093 *mrp = mrep; 1094 *mdp = md; 1095 *dposp = dpos; 1096 m_freem(rep->r_mreq); 1097 FREE((caddr_t)rep, M_NFSREQ); 1098 return (0); 1099 } 1100 m_freem(mrep); 1101 m_freem(rep->r_mreq); 1102 free((caddr_t)rep, M_NFSREQ); 1103 error = EPROTONOSUPPORT; 1104 nfsmout: 1105 return (error); 1106 } 1107 1108 /* 1109 * Generate the rpc reply header 1110 * siz arg. is used to decide if adding a cluster is worthwhile 1111 */ 1112 nfs_rephead(siz, nd, err, cache, frev, mrq, mbp, bposp) 1113 int siz; 1114 struct nfsd *nd; 1115 int err; 1116 int cache; 1117 u_quad_t *frev; 1118 struct mbuf **mrq; 1119 struct mbuf **mbp; 1120 caddr_t *bposp; 1121 { 1122 register u_long *tl; 1123 register struct mbuf *mreq; 1124 caddr_t bpos; 1125 struct mbuf *mb, *mb2; 1126 1127 MGETHDR(mreq, M_WAIT, MT_DATA); 1128 mb = mreq; 1129 /* 1130 * If this is a big reply, use a cluster else 1131 * try and leave leading space for the lower level headers. 1132 */ 1133 siz += RPC_REPLYSIZ; 1134 if (siz >= MINCLSIZE) { 1135 MCLGET(mreq, M_WAIT); 1136 } else 1137 mreq->m_data += max_hdr; 1138 tl = mtod(mreq, u_long *); 1139 mreq->m_len = 6*NFSX_UNSIGNED; 1140 bpos = ((caddr_t)tl)+mreq->m_len; 1141 *tl++ = nd->nd_retxid; 1142 *tl++ = rpc_reply; 1143 if (err == ERPCMISMATCH || err == NQNFS_AUTHERR) { 1144 *tl++ = rpc_msgdenied; 1145 if (err == NQNFS_AUTHERR) { 1146 *tl++ = rpc_autherr; 1147 *tl = rpc_rejectedcred; 1148 mreq->m_len -= NFSX_UNSIGNED; 1149 bpos -= NFSX_UNSIGNED; 1150 } else { 1151 *tl++ = rpc_mismatch; 1152 *tl++ = txdr_unsigned(2); 1153 *tl = txdr_unsigned(2); 1154 } 1155 } else { 1156 *tl++ = rpc_msgaccepted; 1157 *tl++ = 0; 1158 *tl++ = 0; 1159 switch (err) { 1160 case EPROGUNAVAIL: 1161 *tl = txdr_unsigned(RPC_PROGUNAVAIL); 1162 break; 1163 case EPROGMISMATCH: 1164 *tl = txdr_unsigned(RPC_PROGMISMATCH); 1165 nfsm_build(tl, u_long *, 2*NFSX_UNSIGNED); 1166 *tl++ = txdr_unsigned(2); 1167 *tl = txdr_unsigned(2); /* someday 3 */ 1168 break; 1169 case EPROCUNAVAIL: 1170 *tl = txdr_unsigned(RPC_PROCUNAVAIL); 1171 break; 1172 default: 1173 *tl = 0; 1174 if (err != VNOVAL) { 1175 nfsm_build(tl, u_long *, NFSX_UNSIGNED); 1176 if (err) 1177 *tl = txdr_unsigned(nfsrv_errmap[err - 1]); 1178 else 1179 *tl = 0; 1180 } 1181 break; 1182 }; 1183 } 1184 1185 /* 1186 * For nqnfs, piggyback lease as requested. 1187 */ 1188 if (nd->nd_nqlflag != NQL_NOVAL && err == 0) { 1189 if (nd->nd_nqlflag) { 1190 nfsm_build(tl, u_long *, 5*NFSX_UNSIGNED); 1191 *tl++ = txdr_unsigned(nd->nd_nqlflag); 1192 *tl++ = txdr_unsigned(cache); 1193 *tl++ = txdr_unsigned(nd->nd_duration); 1194 txdr_hyper(frev, tl); 1195 } else { 1196 if (nd->nd_nqlflag != 0) 1197 panic("nqreph"); 1198 nfsm_build(tl, u_long *, NFSX_UNSIGNED); 1199 *tl = 0; 1200 } 1201 } 1202 *mrq = mreq; 1203 *mbp = mb; 1204 *bposp = bpos; 1205 if (err != 0 && err != VNOVAL) 1206 nfsstats.srvrpc_errs++; 1207 return (0); 1208 } 1209 1210 /* 1211 * Nfs timer routine 1212 * Scan the nfsreq list and retranmit any requests that have timed out 1213 * To avoid retransmission attempts on STREAM sockets (in the future) make 1214 * sure to set the r_retry field to 0 (implies nm_retry == 0). 1215 */ 1216 nfs_timer() 1217 { 1218 register struct nfsreq *rep; 1219 register struct mbuf *m; 1220 register struct socket *so; 1221 register struct nfsmount *nmp; 1222 register int timeo; 1223 static long lasttime = 0; 1224 int s, error; 1225 1226 s = splnet(); 1227 for (rep = nfsreqh.r_next; rep != &nfsreqh; rep = rep->r_next) { 1228 nmp = rep->r_nmp; 1229 if (rep->r_mrep || (rep->r_flags & R_SOFTTERM)) 1230 continue; 1231 if (nfs_sigintr(nmp, rep, rep->r_procp)) { 1232 rep->r_flags |= R_SOFTTERM; 1233 continue; 1234 } 1235 if (rep->r_rtt >= 0) { 1236 rep->r_rtt++; 1237 if (nmp->nm_flag & NFSMNT_DUMBTIMR) 1238 timeo = nmp->nm_timeo; 1239 else 1240 timeo = NFS_RTO(nmp, proct[rep->r_procnum]); 1241 if (nmp->nm_timeouts > 0) 1242 timeo *= nfs_backoff[nmp->nm_timeouts - 1]; 1243 if (rep->r_rtt <= timeo) 1244 continue; 1245 if (nmp->nm_timeouts < 8) 1246 nmp->nm_timeouts++; 1247 } 1248 /* 1249 * Check for server not responding 1250 */ 1251 if ((rep->r_flags & R_TPRINTFMSG) == 0 && 1252 rep->r_rexmit > nmp->nm_deadthresh) { 1253 nfs_msg(rep->r_procp, 1254 nmp->nm_mountp->mnt_stat.f_mntfromname, 1255 "not responding"); 1256 rep->r_flags |= R_TPRINTFMSG; 1257 } 1258 if (rep->r_rexmit >= rep->r_retry) { /* too many */ 1259 nfsstats.rpctimeouts++; 1260 rep->r_flags |= R_SOFTTERM; 1261 continue; 1262 } 1263 if (nmp->nm_sotype != SOCK_DGRAM) { 1264 if (++rep->r_rexmit > NFS_MAXREXMIT) 1265 rep->r_rexmit = NFS_MAXREXMIT; 1266 continue; 1267 } 1268 if ((so = nmp->nm_so) == NULL) 1269 continue; 1270 1271 /* 1272 * If there is enough space and the window allows.. 1273 * Resend it 1274 * Set r_rtt to -1 in case we fail to send it now. 1275 */ 1276 rep->r_rtt = -1; 1277 if (sbspace(&so->so_snd) >= rep->r_mreq->m_pkthdr.len && 1278 ((nmp->nm_flag & NFSMNT_DUMBTIMR) || 1279 (rep->r_flags & R_SENT) || 1280 nmp->nm_sent < nmp->nm_cwnd) && 1281 (m = m_copym(rep->r_mreq, 0, M_COPYALL, M_DONTWAIT))){ 1282 if ((nmp->nm_flag & NFSMNT_NOCONN) == 0) 1283 error = (*so->so_proto->pr_usrreq)(so, PRU_SEND, m, 1284 (struct mbuf *)0, (struct mbuf *)0); 1285 else 1286 error = (*so->so_proto->pr_usrreq)(so, PRU_SEND, m, 1287 nmp->nm_nam, (struct mbuf *)0); 1288 if (error) { 1289 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) 1290 so->so_error = 0; 1291 } else { 1292 /* 1293 * Iff first send, start timing 1294 * else turn timing off, backoff timer 1295 * and divide congestion window by 2. 1296 */ 1297 if (rep->r_flags & R_SENT) { 1298 rep->r_flags &= ~R_TIMING; 1299 if (++rep->r_rexmit > NFS_MAXREXMIT) 1300 rep->r_rexmit = NFS_MAXREXMIT; 1301 nmp->nm_cwnd >>= 1; 1302 if (nmp->nm_cwnd < NFS_CWNDSCALE) 1303 nmp->nm_cwnd = NFS_CWNDSCALE; 1304 nfsstats.rpcretries++; 1305 } else { 1306 rep->r_flags |= R_SENT; 1307 nmp->nm_sent += NFS_CWNDSCALE; 1308 } 1309 rep->r_rtt = 0; 1310 } 1311 } 1312 } 1313 1314 /* 1315 * Call the nqnfs server timer once a second to handle leases. 1316 */ 1317 if (lasttime != time.tv_sec) { 1318 lasttime = time.tv_sec; 1319 nqnfs_serverd(); 1320 } 1321 splx(s); 1322 timeout(nfs_timer, (caddr_t)0, hz/NFS_HZ); 1323 } 1324 1325 /* 1326 * Test for a termination condition pending on the process. 1327 * This is used for NFSMNT_INT mounts. 1328 */ 1329 nfs_sigintr(nmp, rep, p) 1330 struct nfsmount *nmp; 1331 struct nfsreq *rep; 1332 register struct proc *p; 1333 { 1334 1335 if (rep && (rep->r_flags & R_SOFTTERM)) 1336 return (EINTR); 1337 if (!(nmp->nm_flag & NFSMNT_INT)) 1338 return (0); 1339 if (p && p->p_sig && (((p->p_sig &~ p->p_sigmask) &~ p->p_sigignore) & 1340 NFSINT_SIGMASK)) 1341 return (EINTR); 1342 return (0); 1343 } 1344 1345 /* 1346 * Lock a socket against others. 1347 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply 1348 * and also to avoid race conditions between the processes with nfs requests 1349 * in progress when a reconnect is necessary. 1350 */ 1351 nfs_sndlock(flagp, rep) 1352 register int *flagp; 1353 struct nfsreq *rep; 1354 { 1355 struct proc *p; 1356 1357 if (rep) 1358 p = rep->r_procp; 1359 else 1360 p = (struct proc *)0; 1361 while (*flagp & NFSMNT_SNDLOCK) { 1362 if (nfs_sigintr(rep->r_nmp, rep, p)) 1363 return (EINTR); 1364 *flagp |= NFSMNT_WANTSND; 1365 (void) tsleep((caddr_t)flagp, PZERO-1, "nfsndlck", 0); 1366 } 1367 *flagp |= NFSMNT_SNDLOCK; 1368 return (0); 1369 } 1370 1371 /* 1372 * Unlock the stream socket for others. 1373 */ 1374 void 1375 nfs_sndunlock(flagp) 1376 register int *flagp; 1377 { 1378 1379 if ((*flagp & NFSMNT_SNDLOCK) == 0) 1380 panic("nfs sndunlock"); 1381 *flagp &= ~NFSMNT_SNDLOCK; 1382 if (*flagp & NFSMNT_WANTSND) { 1383 *flagp &= ~NFSMNT_WANTSND; 1384 wakeup((caddr_t)flagp); 1385 } 1386 } 1387 1388 nfs_rcvlock(rep) 1389 register struct nfsreq *rep; 1390 { 1391 register int *flagp = &rep->r_nmp->nm_flag; 1392 1393 while (*flagp & NFSMNT_RCVLOCK) { 1394 if (nfs_sigintr(rep->r_nmp, rep, rep->r_procp)) 1395 return (EINTR); 1396 *flagp |= NFSMNT_WANTRCV; 1397 (void) tsleep((caddr_t)flagp, PZERO-1, "nfsrcvlck", 0); 1398 } 1399 *flagp |= NFSMNT_RCVLOCK; 1400 return (0); 1401 } 1402 1403 /* 1404 * Unlock the stream socket for others. 1405 */ 1406 void 1407 nfs_rcvunlock(flagp) 1408 register int *flagp; 1409 { 1410 1411 if ((*flagp & NFSMNT_RCVLOCK) == 0) 1412 panic("nfs rcvunlock"); 1413 *flagp &= ~NFSMNT_RCVLOCK; 1414 if (*flagp & NFSMNT_WANTRCV) { 1415 *flagp &= ~NFSMNT_WANTRCV; 1416 wakeup((caddr_t)flagp); 1417 } 1418 } 1419 1420 /* 1421 * This function compares two net addresses by family and returns TRUE 1422 * if they are the same host. 1423 * If there is any doubt, return FALSE. 1424 * The AF_INET family is handled as a special case so that address mbufs 1425 * don't need to be saved to store "struct in_addr", which is only 4 bytes. 1426 */ 1427 nfs_netaddr_match(family, haddr, hmask, nam) 1428 int family; 1429 union nethostaddr *haddr; 1430 union nethostaddr *hmask; 1431 struct mbuf *nam; 1432 { 1433 register struct sockaddr_in *inetaddr; 1434 #ifdef ISO 1435 register struct sockaddr_iso *isoaddr1, *isoaddr2; 1436 #endif 1437 1438 1439 switch (family) { 1440 case AF_INET: 1441 inetaddr = mtod(nam, struct sockaddr_in *); 1442 if (inetaddr->sin_family != AF_INET) 1443 return (0); 1444 if (hmask) { 1445 if ((inetaddr->sin_addr.s_addr & hmask->had_inetaddr) == 1446 (haddr->had_inetaddr & hmask->had_inetaddr)) 1447 return (1); 1448 } else if (inetaddr->sin_addr.s_addr == haddr->had_inetaddr) 1449 return (1); 1450 break; 1451 #ifdef ISO 1452 case AF_ISO: 1453 isoaddr1 = mtod(nam, struct sockaddr_iso *); 1454 if (isoaddr1->siso_family != AF_ISO) 1455 return (0); 1456 isoaddr2 = mtod(haddr->had_nam, struct sockaddr_iso *); 1457 if (isoaddr1->siso_nlen > 0 && 1458 isoaddr1->siso_nlen == isoaddr2->siso_nlen && 1459 SAME_ISOADDR(isoaddr1, isoaddr2)) 1460 return (1); 1461 break; 1462 #endif /* ISO */ 1463 default: 1464 break; 1465 }; 1466 return (0); 1467 } 1468 1469 /* 1470 * Build hash lists of net addresses and hang them off the mount point. 1471 * Called by ufs_mount() to set up the lists of export addresses. 1472 */ 1473 hang_addrlist(mp, argp) 1474 struct mount *mp; 1475 struct ufs_args *argp; 1476 { 1477 register struct netaddrhash *np, **hnp; 1478 register int i; 1479 struct ufsmount *ump; 1480 struct sockaddr *saddr; 1481 struct mbuf *nam, *msk = (struct mbuf *)0; 1482 union nethostaddr netmsk; 1483 int error; 1484 1485 if (error = sockargs(&nam, (caddr_t)argp->saddr, argp->slen, 1486 MT_SONAME)) 1487 return (error); 1488 saddr = mtod(nam, struct sockaddr *); 1489 ump = VFSTOUFS(mp); 1490 if (saddr->sa_family == AF_INET && 1491 ((struct sockaddr_in *)saddr)->sin_addr.s_addr == INADDR_ANY) { 1492 m_freem(nam); 1493 if (mp->mnt_flag & MNT_DEFEXPORTED) 1494 return (EPERM); 1495 np = &ump->um_defexported; 1496 np->neth_exflags = argp->exflags; 1497 np->neth_anon = argp->anon; 1498 np->neth_anon.cr_ref = 1; 1499 mp->mnt_flag |= MNT_DEFEXPORTED; 1500 return (0); 1501 } 1502 if (argp->msklen > 0) { 1503 if (error = sockargs(&msk, (caddr_t)argp->smask, argp->msklen, 1504 MT_SONAME)) { 1505 m_freem(nam); 1506 return (error); 1507 } 1508 1509 /* 1510 * Scan all the hash lists to check against duplications. 1511 * For the net list, try both masks to catch a subnet 1512 * of another network. 1513 */ 1514 hnp = &ump->um_netaddr[NETMASK_HASH]; 1515 np = *hnp; 1516 if (saddr->sa_family == AF_INET) 1517 netmsk.had_inetaddr = 1518 mtod(msk, struct sockaddr_in *)->sin_addr.s_addr; 1519 else 1520 netmsk.had_nam = msk; 1521 while (np) { 1522 if (nfs_netaddr_match(np->neth_family, &np->neth_haddr, 1523 &np->neth_hmask, nam) || 1524 nfs_netaddr_match(np->neth_family, &np->neth_haddr, 1525 &netmsk, nam)) { 1526 m_freem(nam); 1527 m_freem(msk); 1528 return (EPERM); 1529 } 1530 np = np->neth_next; 1531 } 1532 for (i = 0; i < NETHASHSZ; i++) { 1533 np = ump->um_netaddr[i]; 1534 while (np) { 1535 if (nfs_netaddr_match(np->neth_family, &np->neth_haddr, 1536 &netmsk, nam)) { 1537 m_freem(nam); 1538 m_freem(msk); 1539 return (EPERM); 1540 } 1541 np = np->neth_next; 1542 } 1543 } 1544 } else { 1545 hnp = &ump->um_netaddr[NETADDRHASH(saddr)]; 1546 np = ump->um_netaddr[NETMASK_HASH]; 1547 while (np) { 1548 if (nfs_netaddr_match(np->neth_family, &np->neth_haddr, 1549 &np->neth_hmask, nam)) { 1550 m_freem(nam); 1551 return (EPERM); 1552 } 1553 np = np->neth_next; 1554 } 1555 np = *hnp; 1556 while (np) { 1557 if (nfs_netaddr_match(np->neth_family, &np->neth_haddr, 1558 (union nethostaddr *)0, nam)) { 1559 m_freem(nam); 1560 return (EPERM); 1561 } 1562 np = np->neth_next; 1563 } 1564 } 1565 np = (struct netaddrhash *) malloc(sizeof(struct netaddrhash), M_NETADDR, 1566 M_WAITOK); 1567 np->neth_family = saddr->sa_family; 1568 if (saddr->sa_family == AF_INET) { 1569 np->neth_inetaddr = ((struct sockaddr_in *)saddr)->sin_addr.s_addr; 1570 m_freem(nam); 1571 if (msk) { 1572 np->neth_inetmask = netmsk.had_inetaddr; 1573 m_freem(msk); 1574 if (np->neth_inetaddr &~ np->neth_inetmask) 1575 return (EPERM); 1576 } else 1577 np->neth_inetmask = 0xffffffff; 1578 } else { 1579 np->neth_nam = nam; 1580 np->neth_msk = msk; 1581 } 1582 np->neth_exflags = argp->exflags; 1583 np->neth_anon = argp->anon; 1584 np->neth_anon.cr_ref = 1; 1585 np->neth_next = *hnp; 1586 *hnp = np; 1587 return (0); 1588 } 1589 1590 /* 1591 * Free the net address hash lists that are hanging off the mount points. 1592 */ 1593 free_addrlist(ump) 1594 struct ufsmount *ump; 1595 { 1596 register struct netaddrhash *np, *onp; 1597 register int i; 1598 1599 for (i = 0; i <= NETHASHSZ; i++) { 1600 np = ump->um_netaddr[i]; 1601 ump->um_netaddr[i] = (struct netaddrhash *)0; 1602 while (np) { 1603 onp = np; 1604 np = np->neth_next; 1605 if (onp->neth_family != AF_INET) { 1606 m_freem(onp->neth_nam); 1607 m_freem(onp->neth_msk); 1608 } 1609 free((caddr_t)onp, M_NETADDR); 1610 } 1611 } 1612 } 1613 1614 /* 1615 * Generate a hash code for an iso host address. Used by NETADDRHASH() for 1616 * iso addresses. 1617 */ 1618 iso_addrhash(saddr) 1619 struct sockaddr *saddr; 1620 { 1621 #ifdef ISO 1622 register struct sockaddr_iso *siso; 1623 register int i, sum; 1624 1625 sum = 0; 1626 for (i = 0; i < siso->siso_nlen; i++) 1627 sum += siso->siso_data[i]; 1628 return (sum & (NETHASHSZ - 1)); 1629 #else 1630 return (0); 1631 #endif /* ISO */ 1632 } 1633 1634 /* 1635 * Check for badly aligned mbuf data areas and 1636 * realign data in an mbuf list by copying the data areas up, as required. 1637 */ 1638 void 1639 nfs_realign(m, hsiz) 1640 register struct mbuf *m; 1641 int hsiz; 1642 { 1643 register struct mbuf *m2; 1644 register int siz, mlen, olen; 1645 register caddr_t tcp, fcp; 1646 struct mbuf *mnew; 1647 1648 while (m) { 1649 /* 1650 * This never happens for UDP, rarely happens for TCP 1651 * but frequently happens for iso transport. 1652 */ 1653 if ((m->m_len & 0x3) || (mtod(m, int) & 0x3)) { 1654 olen = m->m_len; 1655 fcp = mtod(m, caddr_t); 1656 m->m_flags &= ~M_PKTHDR; 1657 if (m->m_flags & M_EXT) 1658 m->m_data = m->m_ext.ext_buf; 1659 else 1660 m->m_data = m->m_dat; 1661 m->m_len = 0; 1662 tcp = mtod(m, caddr_t); 1663 mnew = m; 1664 m2 = m->m_next; 1665 1666 /* 1667 * If possible, only put the first invariant part 1668 * of the RPC header in the first mbuf. 1669 */ 1670 if (olen <= hsiz) 1671 mlen = hsiz; 1672 else 1673 mlen = M_TRAILINGSPACE(m); 1674 1675 /* 1676 * Loop through the mbuf list consolidating data. 1677 */ 1678 while (m) { 1679 while (olen > 0) { 1680 if (mlen == 0) { 1681 m2->m_flags &= ~M_PKTHDR; 1682 if (m2->m_flags & M_EXT) 1683 m2->m_data = m2->m_ext.ext_buf; 1684 else 1685 m2->m_data = m2->m_dat; 1686 m2->m_len = 0; 1687 mlen = M_TRAILINGSPACE(m2); 1688 tcp = mtod(m2, caddr_t); 1689 mnew = m2; 1690 m2 = m2->m_next; 1691 } 1692 siz = MIN(mlen, olen); 1693 if (tcp != fcp) 1694 bcopy(fcp, tcp, siz); 1695 mnew->m_len += siz; 1696 mlen -= siz; 1697 olen -= siz; 1698 tcp += siz; 1699 fcp += siz; 1700 } 1701 m = m->m_next; 1702 if (m) { 1703 olen = m->m_len; 1704 fcp = mtod(m, caddr_t); 1705 } 1706 } 1707 1708 /* 1709 * Finally, set m_len == 0 for any trailing mbufs that have 1710 * been copied out of. 1711 */ 1712 while (m2) { 1713 m2->m_len = 0; 1714 m2 = m2->m_next; 1715 } 1716 return; 1717 } 1718 m = m->m_next; 1719 } 1720 } 1721 1722 /* 1723 * Socket upcall routine for the nfsd sockets. 1724 * The caddr_t arg is a pointer to the "struct nfssvc_sock". 1725 * Essentially do as much as possible non-blocking, else punt and it will 1726 * be called with M_WAIT from an nfsd. 1727 */ 1728 void 1729 nfsrv_rcv(so, arg, waitflag) 1730 struct socket *so; 1731 caddr_t arg; 1732 int waitflag; 1733 { 1734 register struct nfssvc_sock *slp = (struct nfssvc_sock *)arg; 1735 register struct mbuf *m; 1736 struct mbuf *mp, *nam; 1737 struct uio auio; 1738 int flags, error; 1739 1740 if ((slp->ns_flag & SLP_VALID) == 0) 1741 return; 1742 #ifdef notdef 1743 /* 1744 * Define this to test for nfsds handling this under heavy load. 1745 */ 1746 if (waitflag == M_DONTWAIT) { 1747 slp->ns_flag |= SLP_NEEDQ; goto dorecs; 1748 } 1749 #endif 1750 auio.uio_procp = NULL; 1751 if (so->so_type == SOCK_STREAM) { 1752 /* 1753 * If there are already records on the queue, defer soreceive() 1754 * to an nfsd so that there is feedback to the TCP layer that 1755 * the nfs servers are heavily loaded. 1756 */ 1757 if (slp->ns_rec && waitflag == M_DONTWAIT) { 1758 slp->ns_flag |= SLP_NEEDQ; 1759 goto dorecs; 1760 } 1761 1762 /* 1763 * Do soreceive(). 1764 */ 1765 auio.uio_resid = 1000000000; 1766 flags = MSG_DONTWAIT; 1767 error = soreceive(so, &nam, &auio, &mp, (struct mbuf **)0, &flags); 1768 if (error || mp == (struct mbuf *)0) { 1769 if (error == EWOULDBLOCK) 1770 slp->ns_flag |= SLP_NEEDQ; 1771 else 1772 slp->ns_flag |= SLP_DISCONN; 1773 goto dorecs; 1774 } 1775 m = mp; 1776 if (slp->ns_rawend) { 1777 slp->ns_rawend->m_next = m; 1778 slp->ns_cc += 1000000000 - auio.uio_resid; 1779 } else { 1780 slp->ns_raw = m; 1781 slp->ns_cc = 1000000000 - auio.uio_resid; 1782 } 1783 while (m->m_next) 1784 m = m->m_next; 1785 slp->ns_rawend = m; 1786 1787 /* 1788 * Now try and parse record(s) out of the raw stream data. 1789 */ 1790 if (error = nfsrv_getstream(slp, waitflag)) { 1791 if (error == EPERM) 1792 slp->ns_flag |= SLP_DISCONN; 1793 else 1794 slp->ns_flag |= SLP_NEEDQ; 1795 } 1796 } else { 1797 do { 1798 auio.uio_resid = 1000000000; 1799 flags = MSG_DONTWAIT; 1800 error = soreceive(so, &nam, &auio, &mp, 1801 (struct mbuf **)0, &flags); 1802 if (mp) { 1803 nfs_realign(mp, 10 * NFSX_UNSIGNED); 1804 if (nam) { 1805 m = nam; 1806 m->m_next = mp; 1807 } else 1808 m = mp; 1809 if (slp->ns_recend) 1810 slp->ns_recend->m_nextpkt = m; 1811 else 1812 slp->ns_rec = m; 1813 slp->ns_recend = m; 1814 m->m_nextpkt = (struct mbuf *)0; 1815 } 1816 if (error) { 1817 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) 1818 && error != EWOULDBLOCK) { 1819 slp->ns_flag |= SLP_DISCONN; 1820 goto dorecs; 1821 } 1822 } 1823 } while (mp); 1824 } 1825 1826 /* 1827 * Now try and process the request records, non-blocking. 1828 */ 1829 dorecs: 1830 if (waitflag == M_DONTWAIT && 1831 (slp->ns_rec || (slp->ns_flag & (SLP_NEEDQ | SLP_DISCONN)))) 1832 nfsrv_wakenfsd(slp); 1833 } 1834 1835 /* 1836 * Try and extract an RPC request from the mbuf data list received on a 1837 * stream socket. The "waitflag" argument indicates whether or not it 1838 * can sleep. 1839 */ 1840 nfsrv_getstream(slp, waitflag) 1841 register struct nfssvc_sock *slp; 1842 int waitflag; 1843 { 1844 register struct mbuf *m; 1845 register char *cp1, *cp2; 1846 register int len; 1847 struct mbuf *om, *m2, *recm; 1848 u_long recmark; 1849 1850 if (slp->ns_flag & SLP_GETSTREAM) 1851 panic("nfs getstream"); 1852 slp->ns_flag |= SLP_GETSTREAM; 1853 for (;;) { 1854 if (slp->ns_reclen == 0) { 1855 if (slp->ns_cc < NFSX_UNSIGNED) { 1856 slp->ns_flag &= ~SLP_GETSTREAM; 1857 return (0); 1858 } 1859 m = slp->ns_raw; 1860 if (m->m_len >= NFSX_UNSIGNED) { 1861 bcopy(mtod(m, caddr_t), (caddr_t)&recmark, NFSX_UNSIGNED); 1862 m->m_data += NFSX_UNSIGNED; 1863 m->m_len -= NFSX_UNSIGNED; 1864 } else { 1865 cp1 = (caddr_t)&recmark; 1866 cp2 = mtod(m, caddr_t); 1867 while (cp1 < ((caddr_t)&recmark) + NFSX_UNSIGNED) { 1868 while (m->m_len == 0) { 1869 m = m->m_next; 1870 cp2 = mtod(m, caddr_t); 1871 } 1872 *cp1++ = *cp2++; 1873 m->m_data++; 1874 m->m_len--; 1875 } 1876 } 1877 slp->ns_cc -= NFSX_UNSIGNED; 1878 slp->ns_reclen = ntohl(recmark) & ~0x80000000; 1879 if (slp->ns_reclen < NFS_MINPACKET || slp->ns_reclen > NFS_MAXPACKET) { 1880 slp->ns_flag &= ~SLP_GETSTREAM; 1881 return (EPERM); 1882 } 1883 } 1884 1885 /* 1886 * Now get the record part. 1887 */ 1888 if (slp->ns_cc == slp->ns_reclen) { 1889 recm = slp->ns_raw; 1890 slp->ns_raw = slp->ns_rawend = (struct mbuf *)0; 1891 slp->ns_cc = slp->ns_reclen = 0; 1892 } else if (slp->ns_cc > slp->ns_reclen) { 1893 len = 0; 1894 m = slp->ns_raw; 1895 om = (struct mbuf *)0; 1896 while (len < slp->ns_reclen) { 1897 if ((len + m->m_len) > slp->ns_reclen) { 1898 m2 = m_copym(m, 0, slp->ns_reclen - len, 1899 waitflag); 1900 if (m2) { 1901 if (om) { 1902 om->m_next = m2; 1903 recm = slp->ns_raw; 1904 } else 1905 recm = m2; 1906 m->m_data += slp->ns_reclen - len; 1907 m->m_len -= slp->ns_reclen - len; 1908 len = slp->ns_reclen; 1909 } else { 1910 slp->ns_flag &= ~SLP_GETSTREAM; 1911 return (EWOULDBLOCK); 1912 } 1913 } else if ((len + m->m_len) == slp->ns_reclen) { 1914 om = m; 1915 len += m->m_len; 1916 m = m->m_next; 1917 recm = slp->ns_raw; 1918 om->m_next = (struct mbuf *)0; 1919 } else { 1920 om = m; 1921 len += m->m_len; 1922 m = m->m_next; 1923 } 1924 } 1925 slp->ns_raw = m; 1926 slp->ns_cc -= len; 1927 slp->ns_reclen = 0; 1928 } else { 1929 slp->ns_flag &= ~SLP_GETSTREAM; 1930 return (0); 1931 } 1932 nfs_realign(recm, 10 * NFSX_UNSIGNED); 1933 if (slp->ns_recend) 1934 slp->ns_recend->m_nextpkt = recm; 1935 else 1936 slp->ns_rec = recm; 1937 slp->ns_recend = recm; 1938 } 1939 } 1940 1941 /* 1942 * Parse an RPC header. 1943 */ 1944 nfsrv_dorec(slp, nd) 1945 register struct nfssvc_sock *slp; 1946 register struct nfsd *nd; 1947 { 1948 register struct mbuf *m; 1949 int error; 1950 1951 if ((slp->ns_flag & SLP_VALID) == 0 || 1952 (m = slp->ns_rec) == (struct mbuf *)0) 1953 return (ENOBUFS); 1954 if (slp->ns_rec = m->m_nextpkt) 1955 m->m_nextpkt = (struct mbuf *)0; 1956 else 1957 slp->ns_recend = (struct mbuf *)0; 1958 if (m->m_type == MT_SONAME) { 1959 nd->nd_nam = m; 1960 nd->nd_md = nd->nd_mrep = m->m_next; 1961 m->m_next = (struct mbuf *)0; 1962 } else { 1963 nd->nd_nam = (struct mbuf *)0; 1964 nd->nd_md = nd->nd_mrep = m; 1965 } 1966 nd->nd_dpos = mtod(nd->nd_md, caddr_t); 1967 if (error = nfs_getreq(nd, TRUE)) { 1968 m_freem(nd->nd_nam); 1969 return (error); 1970 } 1971 return (0); 1972 } 1973 1974 /* 1975 * Parse an RPC request 1976 * - verify it 1977 * - fill in the cred struct. 1978 */ 1979 nfs_getreq(nd, has_header) 1980 register struct nfsd *nd; 1981 int has_header; 1982 { 1983 register int len, i; 1984 register u_long *tl; 1985 register long t1; 1986 struct uio uio; 1987 struct iovec iov; 1988 caddr_t dpos, cp2; 1989 u_long nfsvers, auth_type; 1990 int error = 0, nqnfs = 0; 1991 struct mbuf *mrep, *md; 1992 1993 mrep = nd->nd_mrep; 1994 md = nd->nd_md; 1995 dpos = nd->nd_dpos; 1996 if (has_header) { 1997 nfsm_dissect(tl, u_long *, 10*NFSX_UNSIGNED); 1998 nd->nd_retxid = *tl++; 1999 if (*tl++ != rpc_call) { 2000 m_freem(mrep); 2001 return (EBADRPC); 2002 } 2003 } else { 2004 nfsm_dissect(tl, u_long *, 8*NFSX_UNSIGNED); 2005 } 2006 nd->nd_repstat = 0; 2007 if (*tl++ != rpc_vers) { 2008 nd->nd_repstat = ERPCMISMATCH; 2009 nd->nd_procnum = NFSPROC_NOOP; 2010 return (0); 2011 } 2012 nfsvers = nfs_vers; 2013 if (*tl != nfs_prog) { 2014 if (*tl == nqnfs_prog) { 2015 nqnfs++; 2016 nfsvers = nqnfs_vers; 2017 } else { 2018 nd->nd_repstat = EPROGUNAVAIL; 2019 nd->nd_procnum = NFSPROC_NOOP; 2020 return (0); 2021 } 2022 } 2023 tl++; 2024 if (*tl++ != nfsvers) { 2025 nd->nd_repstat = EPROGMISMATCH; 2026 nd->nd_procnum = NFSPROC_NOOP; 2027 return (0); 2028 } 2029 nd->nd_procnum = fxdr_unsigned(u_long, *tl++); 2030 if (nd->nd_procnum == NFSPROC_NULL) 2031 return (0); 2032 if (nd->nd_procnum >= NFS_NPROCS || 2033 (!nqnfs && nd->nd_procnum > NFSPROC_STATFS) || 2034 (*tl != rpc_auth_unix && *tl != rpc_auth_kerb)) { 2035 nd->nd_repstat = EPROCUNAVAIL; 2036 nd->nd_procnum = NFSPROC_NOOP; 2037 return (0); 2038 } 2039 auth_type = *tl++; 2040 len = fxdr_unsigned(int, *tl++); 2041 if (len < 0 || len > RPCAUTH_MAXSIZ) { 2042 m_freem(mrep); 2043 return (EBADRPC); 2044 } 2045 2046 /* 2047 * Handle auth_unix or auth_kerb. 2048 */ 2049 if (auth_type == rpc_auth_unix) { 2050 len = fxdr_unsigned(int, *++tl); 2051 if (len < 0 || len > NFS_MAXNAMLEN) { 2052 m_freem(mrep); 2053 return (EBADRPC); 2054 } 2055 nfsm_adv(nfsm_rndup(len)); 2056 nfsm_dissect(tl, u_long *, 3*NFSX_UNSIGNED); 2057 nd->nd_cr.cr_uid = fxdr_unsigned(uid_t, *tl++); 2058 nd->nd_cr.cr_gid = fxdr_unsigned(gid_t, *tl++); 2059 len = fxdr_unsigned(int, *tl); 2060 if (len < 0 || len > RPCAUTH_UNIXGIDS) { 2061 m_freem(mrep); 2062 return (EBADRPC); 2063 } 2064 nfsm_dissect(tl, u_long *, (len + 2)*NFSX_UNSIGNED); 2065 for (i = 1; i <= len; i++) 2066 if (i < NGROUPS) 2067 nd->nd_cr.cr_groups[i] = fxdr_unsigned(gid_t, *tl++); 2068 else 2069 tl++; 2070 nd->nd_cr.cr_ngroups = (len >= NGROUPS) ? NGROUPS : (len + 1); 2071 } else if (auth_type == rpc_auth_kerb) { 2072 nd->nd_cr.cr_uid = fxdr_unsigned(uid_t, *tl++); 2073 nd->nd_authlen = fxdr_unsigned(int, *tl); 2074 iov.iov_len = uio.uio_resid = nfsm_rndup(nd->nd_authlen); 2075 if (uio.uio_resid > (len - 2*NFSX_UNSIGNED)) { 2076 m_freem(mrep); 2077 return (EBADRPC); 2078 } 2079 uio.uio_offset = 0; 2080 uio.uio_iov = &iov; 2081 uio.uio_iovcnt = 1; 2082 uio.uio_segflg = UIO_SYSSPACE; 2083 iov.iov_base = (caddr_t)nd->nd_authstr; 2084 nfsm_mtouio(&uio, uio.uio_resid); 2085 nfsm_dissect(tl, u_long *, 2*NFSX_UNSIGNED); 2086 nd->nd_flag |= NFSD_NEEDAUTH; 2087 } 2088 2089 /* 2090 * Do we have any use for the verifier. 2091 * According to the "Remote Procedure Call Protocol Spec." it 2092 * should be AUTH_NULL, but some clients make it AUTH_UNIX? 2093 * For now, just skip over it 2094 */ 2095 len = fxdr_unsigned(int, *++tl); 2096 if (len < 0 || len > RPCAUTH_MAXSIZ) { 2097 m_freem(mrep); 2098 return (EBADRPC); 2099 } 2100 if (len > 0) { 2101 nfsm_adv(nfsm_rndup(len)); 2102 } 2103 2104 /* 2105 * For nqnfs, get piggybacked lease request. 2106 */ 2107 if (nqnfs && nd->nd_procnum != NQNFSPROC_EVICTED) { 2108 nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); 2109 nd->nd_nqlflag = fxdr_unsigned(int, *tl); 2110 if (nd->nd_nqlflag) { 2111 nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); 2112 nd->nd_duration = fxdr_unsigned(int, *tl); 2113 } else 2114 nd->nd_duration = NQ_MINLEASE; 2115 } else { 2116 nd->nd_nqlflag = NQL_NOVAL; 2117 nd->nd_duration = NQ_MINLEASE; 2118 } 2119 nd->nd_md = md; 2120 nd->nd_dpos = dpos; 2121 return (0); 2122 nfsmout: 2123 return (error); 2124 } 2125 2126 /* 2127 * Search for a sleeping nfsd and wake it up. 2128 * SIDE EFFECT: If none found, set NFSD_CHECKSLP flag, so that one of the 2129 * running nfsds will go look for the work in the nfssvc_sock list. 2130 */ 2131 void 2132 nfsrv_wakenfsd(slp) 2133 struct nfssvc_sock *slp; 2134 { 2135 register struct nfsd *nd = nfsd_head.nd_next; 2136 2137 if ((slp->ns_flag & SLP_VALID) == 0) 2138 return; 2139 while (nd != (struct nfsd *)&nfsd_head) { 2140 if (nd->nd_flag & NFSD_WAITING) { 2141 nd->nd_flag &= ~NFSD_WAITING; 2142 if (nd->nd_slp) 2143 panic("nfsd wakeup"); 2144 slp->ns_sref++; 2145 nd->nd_slp = slp; 2146 wakeup((caddr_t)nd); 2147 return; 2148 } 2149 nd = nd->nd_next; 2150 } 2151 slp->ns_flag |= SLP_DOREC; 2152 nfsd_head.nd_flag |= NFSD_CHECKSLP; 2153 } 2154 2155 nfs_msg(p, server, msg) 2156 struct proc *p; 2157 char *server, *msg; 2158 { 2159 tpr_t tpr; 2160 2161 if (p) 2162 tpr = tprintf_open(p); 2163 else 2164 tpr = NULL; 2165 tprintf(tpr, "nfs server %s: %s\n", server, msg); 2166 tprintf_close(tpr); 2167 } 2168