1 /*- 2 * Copyright (c) 2009, Sun Microsystems, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * - Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * - Redistributions in binary form must reproduce the above copyright notice, 10 * this list of conditions and the following disclaimer in the documentation 11 * and/or other materials provided with the distribution. 12 * - Neither the name of Sun Microsystems, Inc. nor the names of its 13 * contributors may be used to endorse or promote products derived 14 * from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 * 28 * @(#)clnt_tcp.c 1.37 87/10/05 Copyr 1984 Sun Micro; 2.2 88/08/01 4.0 RPCSRC 29 * @(#)clnt_vc.c 1.19 89/03/16 Copyr 1988 Sun Micro 30 * $NetBSD: clnt_vc.c,v 1.4 2000/07/14 08:40:42 fvdl Exp $ 31 * $FreeBSD: src/lib/libc/rpc/clnt_vc.c,v 1.20 2006/09/09 22:18:57 mbr Exp $ 32 */ 33 34 /* 35 * clnt_tcp.c, Implements a TCP/IP based, client side RPC. 36 * 37 * Copyright (C) 1984, Sun Microsystems, Inc. 38 * 39 * TCP based RPC supports 'batched calls'. 40 * A sequence of calls may be batched-up in a send buffer. The rpc call 41 * return immediately to the client even though the call was not necessarily 42 * sent. The batching occurs if the results' xdr routine is NULL (0) AND 43 * the rpc timeout value is zero (see clnt.h, rpc). 44 * 45 * Clients should NOT casually batch calls that in fact return results; that is, 46 * the server side should be aware that a call is batched and not produce any 47 * return message. Batched calls that produce many result messages can 48 * deadlock (netlock) the client and the server.... 49 * 50 * Now go hang yourself. 51 */ 52 53 #include "namespace.h" 54 #include "reentrant.h" 55 #include <sys/types.h> 56 #include <sys/poll.h> 57 #include <sys/syslog.h> 58 #include <sys/socket.h> 59 #include <sys/un.h> 60 #include <sys/uio.h> 61 62 #include <arpa/inet.h> 63 #include <assert.h> 64 #include <err.h> 65 #include <errno.h> 66 #include <netdb.h> 67 #include <stdio.h> 68 #include <stdlib.h> 69 #include <string.h> 70 #include <unistd.h> 71 #include <signal.h> 72 73 #include <rpc/rpc.h> 74 #include "un-namespace.h" 75 #include "rpc_com.h" 76 #include "mt_misc.h" 77 78 #define MCALL_MSG_SIZE 24 79 80 struct cmessage { 81 struct cmsghdr cmsg; 82 struct cmsgcred cmcred; 83 }; 84 85 static void clnt_vc_abort(CLIENT *); 86 static enum clnt_stat clnt_vc_call(CLIENT *, rpcproc_t, xdrproc_t, void *, 87 xdrproc_t, void *, struct timeval); 88 static bool_t clnt_vc_control(CLIENT *, u_int, void *); 89 static void clnt_vc_destroy(CLIENT *); 90 static bool_t clnt_vc_freeres(CLIENT *, xdrproc_t, void *); 91 static void clnt_vc_geterr(CLIENT *, struct rpc_err *); 92 static struct clnt_ops *clnt_vc_ops(void); 93 static int __msgread(int, void *, size_t); 94 static int __msgwrite(int, void *, size_t); 95 static int read_vc(void *, void *, int); 96 static bool_t time_not_ok(struct timeval *); 97 static int write_vc(void *, void *, int); 98 99 struct ct_data { 100 int ct_fd; /* connection's fd */ 101 bool_t ct_closeit; /* close it on destroy */ 102 struct timeval ct_wait; /* wait interval in milliseconds */ 103 bool_t ct_waitset; /* wait set by clnt_control? */ 104 struct netbuf ct_addr; /* remote addr */ 105 struct rpc_err ct_error; 106 union { 107 char ct_mcallc[MCALL_MSG_SIZE]; /* marshalled callmsg */ 108 u_int32_t ct_mcalli; 109 } ct_u; 110 u_int ct_mpos; /* pos after marshal */ 111 XDR ct_xdrs; /* XDR stream */ 112 }; 113 114 /* 115 * This machinery implements per-fd locks for MT-safety. It is not 116 * sufficient to do per-CLIENT handle locks for MT-safety because a 117 * user may create more than one CLIENT handle with the same fd behind 118 * it. Therfore, we allocate an array of flags (vc_fd_locks), protected 119 * by the clnt_fd_lock mutex, and an array (vc_cv) of condition variables 120 * similarly protected. Vc_fd_lock[fd] == 1 => a call is activte on some 121 * CLIENT handle created for that fd. 122 * The current implementation holds locks across the entire RPC and reply. 123 * Yes, this is silly, and as soon as this code is proven to work, this 124 * should be the first thing fixed. One step at a time. 125 */ 126 static int *vc_fd_locks; 127 static cond_t *vc_cv; 128 #define release_fd_lock(fd, mask) { \ 129 mutex_lock(&clnt_fd_lock); \ 130 vc_fd_locks[fd] = 0; \ 131 mutex_unlock(&clnt_fd_lock); \ 132 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); \ 133 cond_signal(&vc_cv[fd]); \ 134 } 135 136 static const char clnt_vc_errstr[] = "%s : %s"; 137 static const char clnt_vc_str[] = "clnt_vc_create"; 138 static const char __no_mem_str[] = "out of memory"; 139 140 /* 141 * Create a client handle for a connection. 142 * Default options are set, which the user can change using clnt_control()'s. 143 * The rpc/vc package does buffering similar to stdio, so the client 144 * must pick send and receive buffer sizes, 0 => use the default. 145 * NB: fd is copied into a private area. 146 * NB: The rpch->cl_auth is set null authentication. Caller may wish to 147 * set this something more useful. 148 * 149 * fd should be an open socket 150 */ 151 CLIENT * 152 clnt_vc_create(int fd, /* open file descriptor */ 153 const struct netbuf *raddr, /* servers address */ 154 const rpcprog_t prog, /* program number */ 155 const rpcvers_t vers, /* version number */ 156 u_int sendsz, /* buffer recv size */ 157 u_int recvsz) /* buffer send size */ 158 { 159 CLIENT *cl; /* client handle */ 160 struct ct_data *ct = NULL; /* client handle */ 161 struct timeval now; 162 struct rpc_msg call_msg; 163 static u_int32_t disrupt; 164 sigset_t mask; 165 sigset_t newmask; 166 struct sockaddr_storage ss; 167 socklen_t slen; 168 struct __rpc_sockinfo si; 169 170 if (disrupt == 0) 171 disrupt = (u_int32_t)(long)raddr; 172 173 cl = (CLIENT *)mem_alloc(sizeof (*cl)); 174 ct = (struct ct_data *)mem_alloc(sizeof (*ct)); 175 if ((cl == NULL) || (ct == NULL)) { 176 syslog(LOG_ERR, clnt_vc_errstr, clnt_vc_str, __no_mem_str); 177 rpc_createerr.cf_stat = RPC_SYSTEMERROR; 178 rpc_createerr.cf_error.re_errno = errno; 179 goto err; 180 } 181 ct->ct_addr.buf = NULL; 182 sigfillset(&newmask); 183 thr_sigsetmask(SIG_SETMASK, &newmask, &mask); 184 mutex_lock(&clnt_fd_lock); 185 if (vc_fd_locks == NULL) { 186 int cv_allocsz, fd_allocsz; 187 int dtbsize = __rpc_dtbsize(); 188 189 fd_allocsz = dtbsize * sizeof (int); 190 vc_fd_locks = (int *) mem_alloc(fd_allocsz); 191 if (vc_fd_locks == NULL) { 192 mutex_unlock(&clnt_fd_lock); 193 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 194 goto err; 195 } else 196 memset(vc_fd_locks, '\0', fd_allocsz); 197 198 assert(vc_cv == NULL); 199 cv_allocsz = dtbsize * sizeof (cond_t); 200 vc_cv = (cond_t *) mem_alloc(cv_allocsz); 201 if (vc_cv == NULL) { 202 mem_free(vc_fd_locks, fd_allocsz); 203 vc_fd_locks = NULL; 204 mutex_unlock(&clnt_fd_lock); 205 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 206 goto err; 207 } else { 208 int i; 209 210 for (i = 0; i < dtbsize; i++) 211 cond_init(&vc_cv[i], 0, NULL); 212 } 213 } else 214 assert(vc_cv != NULL); 215 216 /* 217 * XXX - fvdl connecting while holding a mutex? 218 */ 219 slen = sizeof ss; 220 if (_getpeername(fd, (struct sockaddr *)(void *)&ss, &slen) < 0) { 221 if (errno != ENOTCONN) { 222 rpc_createerr.cf_stat = RPC_SYSTEMERROR; 223 rpc_createerr.cf_error.re_errno = errno; 224 mutex_unlock(&clnt_fd_lock); 225 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 226 goto err; 227 } 228 if (_connect(fd, (struct sockaddr *)raddr->buf, raddr->len) < 0){ 229 rpc_createerr.cf_stat = RPC_SYSTEMERROR; 230 rpc_createerr.cf_error.re_errno = errno; 231 mutex_unlock(&clnt_fd_lock); 232 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 233 goto err; 234 } 235 } 236 mutex_unlock(&clnt_fd_lock); 237 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 238 if (!__rpc_fd2sockinfo(fd, &si)) 239 goto err; 240 241 ct->ct_closeit = FALSE; 242 243 /* 244 * Set up private data struct 245 */ 246 ct->ct_fd = fd; 247 ct->ct_wait.tv_usec = 0; 248 ct->ct_waitset = FALSE; 249 ct->ct_addr.buf = malloc(raddr->maxlen); 250 if (ct->ct_addr.buf == NULL) 251 goto err; 252 memcpy(ct->ct_addr.buf, raddr->buf, raddr->len); 253 ct->ct_addr.len = raddr->maxlen; 254 ct->ct_addr.maxlen = raddr->maxlen; 255 256 /* 257 * Initialize call message 258 */ 259 gettimeofday(&now, NULL); 260 call_msg.rm_xid = ((u_int32_t)++disrupt) ^ __RPC_GETXID(&now); 261 call_msg.rm_direction = CALL; 262 call_msg.rm_call.cb_rpcvers = RPC_MSG_VERSION; 263 call_msg.rm_call.cb_prog = (u_int32_t)prog; 264 call_msg.rm_call.cb_vers = (u_int32_t)vers; 265 266 /* 267 * pre-serialize the static part of the call msg and stash it away 268 */ 269 xdrmem_create(&(ct->ct_xdrs), ct->ct_u.ct_mcallc, MCALL_MSG_SIZE, 270 XDR_ENCODE); 271 if (! xdr_callhdr(&(ct->ct_xdrs), &call_msg)) { 272 if (ct->ct_closeit) { 273 _close(fd); 274 } 275 goto err; 276 } 277 ct->ct_mpos = XDR_GETPOS(&(ct->ct_xdrs)); 278 XDR_DESTROY(&(ct->ct_xdrs)); 279 280 /* 281 * Create a client handle which uses xdrrec for serialization 282 * and authnone for authentication. 283 */ 284 cl->cl_ops = clnt_vc_ops(); 285 cl->cl_private = ct; 286 cl->cl_auth = authnone_create(); 287 sendsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsz); 288 recvsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsz); 289 xdrrec_create(&(ct->ct_xdrs), sendsz, recvsz, 290 cl->cl_private, read_vc, write_vc); 291 return (cl); 292 293 err: 294 if (cl) { 295 if (ct) { 296 if (ct->ct_addr.len) 297 mem_free(ct->ct_addr.buf, ct->ct_addr.len); 298 mem_free(ct, sizeof (struct ct_data)); 299 } 300 if (cl) 301 mem_free(cl, sizeof (CLIENT)); 302 } 303 return (NULL); 304 } 305 306 static enum clnt_stat 307 clnt_vc_call(CLIENT *cl, rpcproc_t proc, xdrproc_t xdr_args, void *args_ptr, 308 xdrproc_t xdr_results, void *results_ptr, struct timeval timeout) 309 { 310 struct ct_data *ct = (struct ct_data *) cl->cl_private; 311 XDR *xdrs = &(ct->ct_xdrs); 312 struct rpc_msg reply_msg; 313 u_int32_t x_id; 314 u_int32_t *msg_x_id = &ct->ct_u.ct_mcalli; /* yuk */ 315 bool_t shipnow; 316 int refreshes = 2; 317 sigset_t mask, newmask; 318 int rpc_lock_value; 319 320 assert(cl != NULL); 321 322 sigfillset(&newmask); 323 thr_sigsetmask(SIG_SETMASK, &newmask, &mask); 324 mutex_lock(&clnt_fd_lock); 325 while (vc_fd_locks[ct->ct_fd]) 326 cond_wait(&vc_cv[ct->ct_fd], &clnt_fd_lock); 327 if (__isthreaded) 328 rpc_lock_value = 1; 329 else 330 rpc_lock_value = 0; 331 vc_fd_locks[ct->ct_fd] = rpc_lock_value; 332 mutex_unlock(&clnt_fd_lock); 333 if (!ct->ct_waitset) { 334 /* If time is not within limits, we ignore it. */ 335 if (time_not_ok(&timeout) == FALSE) 336 ct->ct_wait = timeout; 337 } 338 339 shipnow = 340 (xdr_results == NULL && timeout.tv_sec == 0 341 && timeout.tv_usec == 0) ? FALSE : TRUE; 342 343 call_again: 344 xdrs->x_op = XDR_ENCODE; 345 ct->ct_error.re_status = RPC_SUCCESS; 346 x_id = ntohl(--(*msg_x_id)); 347 348 if ((! XDR_PUTBYTES(xdrs, ct->ct_u.ct_mcallc, ct->ct_mpos)) || 349 (! XDR_PUTINT32(xdrs, &proc)) || 350 (! AUTH_MARSHALL(cl->cl_auth, xdrs)) || 351 (! (*xdr_args)(xdrs, args_ptr))) { 352 if (ct->ct_error.re_status == RPC_SUCCESS) 353 ct->ct_error.re_status = RPC_CANTENCODEARGS; 354 xdrrec_endofrecord(xdrs, TRUE); 355 release_fd_lock(ct->ct_fd, mask); 356 return (ct->ct_error.re_status); 357 } 358 if (! xdrrec_endofrecord(xdrs, shipnow)) { 359 release_fd_lock(ct->ct_fd, mask); 360 return (ct->ct_error.re_status = RPC_CANTSEND); 361 } 362 if (! shipnow) { 363 release_fd_lock(ct->ct_fd, mask); 364 return (RPC_SUCCESS); 365 } 366 /* 367 * Hack to provide rpc-based message passing 368 */ 369 if (timeout.tv_sec == 0 && timeout.tv_usec == 0) { 370 release_fd_lock(ct->ct_fd, mask); 371 return(ct->ct_error.re_status = RPC_TIMEDOUT); 372 } 373 374 375 /* 376 * Keep receiving until we get a valid transaction id 377 */ 378 xdrs->x_op = XDR_DECODE; 379 while (TRUE) { 380 reply_msg.acpted_rply.ar_verf = _null_auth; 381 reply_msg.acpted_rply.ar_results.where = NULL; 382 reply_msg.acpted_rply.ar_results.proc = (xdrproc_t)xdr_void; 383 if (! xdrrec_skiprecord(xdrs)) { 384 release_fd_lock(ct->ct_fd, mask); 385 return (ct->ct_error.re_status); 386 } 387 /* now decode and validate the response header */ 388 if (! xdr_replymsg(xdrs, &reply_msg)) { 389 if (ct->ct_error.re_status == RPC_SUCCESS) 390 continue; 391 release_fd_lock(ct->ct_fd, mask); 392 return (ct->ct_error.re_status); 393 } 394 if (reply_msg.rm_xid == x_id) 395 break; 396 } 397 398 /* 399 * process header 400 */ 401 _seterr_reply(&reply_msg, &(ct->ct_error)); 402 if (ct->ct_error.re_status == RPC_SUCCESS) { 403 if (! AUTH_VALIDATE(cl->cl_auth, 404 &reply_msg.acpted_rply.ar_verf)) { 405 ct->ct_error.re_status = RPC_AUTHERROR; 406 ct->ct_error.re_why = AUTH_INVALIDRESP; 407 } else if (! (*xdr_results)(xdrs, results_ptr)) { 408 if (ct->ct_error.re_status == RPC_SUCCESS) 409 ct->ct_error.re_status = RPC_CANTDECODERES; 410 } 411 /* free verifier ... */ 412 if (reply_msg.acpted_rply.ar_verf.oa_base != NULL) { 413 xdrs->x_op = XDR_FREE; 414 xdr_opaque_auth(xdrs, &(reply_msg.acpted_rply.ar_verf)); 415 } 416 } /* end successful completion */ 417 else { 418 /* maybe our credentials need to be refreshed ... */ 419 if (refreshes-- && AUTH_REFRESH(cl->cl_auth, &reply_msg)) 420 goto call_again; 421 } /* end of unsuccessful completion */ 422 release_fd_lock(ct->ct_fd, mask); 423 return (ct->ct_error.re_status); 424 } 425 426 static void 427 clnt_vc_geterr(CLIENT *cl, struct rpc_err *errp) 428 { 429 struct ct_data *ct; 430 431 assert(cl != NULL); 432 assert(errp != NULL); 433 434 ct = (struct ct_data *) cl->cl_private; 435 *errp = ct->ct_error; 436 } 437 438 static bool_t 439 clnt_vc_freeres(CLIENT *cl, xdrproc_t xdr_res, void *res_ptr) 440 { 441 struct ct_data *ct; 442 XDR *xdrs; 443 bool_t dummy; 444 sigset_t mask; 445 sigset_t newmask; 446 447 assert(cl != NULL); 448 449 ct = (struct ct_data *)cl->cl_private; 450 xdrs = &(ct->ct_xdrs); 451 452 sigfillset(&newmask); 453 thr_sigsetmask(SIG_SETMASK, &newmask, &mask); 454 mutex_lock(&clnt_fd_lock); 455 while (vc_fd_locks[ct->ct_fd]) 456 cond_wait(&vc_cv[ct->ct_fd], &clnt_fd_lock); 457 xdrs->x_op = XDR_FREE; 458 dummy = (*xdr_res)(xdrs, res_ptr); 459 mutex_unlock(&clnt_fd_lock); 460 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 461 cond_signal(&vc_cv[ct->ct_fd]); 462 463 return dummy; 464 } 465 466 /*ARGSUSED*/ 467 static void 468 clnt_vc_abort(CLIENT *cl __unused) 469 { 470 } 471 472 static bool_t 473 clnt_vc_control(CLIENT *cl, u_int request, void *info) 474 { 475 struct ct_data *ct; 476 void *infop = info; 477 sigset_t mask; 478 sigset_t newmask; 479 int rpc_lock_value; 480 481 assert(cl != NULL); 482 483 ct = (struct ct_data *)cl->cl_private; 484 485 sigfillset(&newmask); 486 thr_sigsetmask(SIG_SETMASK, &newmask, &mask); 487 mutex_lock(&clnt_fd_lock); 488 while (vc_fd_locks[ct->ct_fd]) 489 cond_wait(&vc_cv[ct->ct_fd], &clnt_fd_lock); 490 if (__isthreaded) 491 rpc_lock_value = 1; 492 else 493 rpc_lock_value = 0; 494 vc_fd_locks[ct->ct_fd] = rpc_lock_value; 495 mutex_unlock(&clnt_fd_lock); 496 497 switch (request) { 498 case CLSET_FD_CLOSE: 499 ct->ct_closeit = TRUE; 500 release_fd_lock(ct->ct_fd, mask); 501 return (TRUE); 502 case CLSET_FD_NCLOSE: 503 ct->ct_closeit = FALSE; 504 release_fd_lock(ct->ct_fd, mask); 505 return (TRUE); 506 default: 507 break; 508 } 509 510 /* for other requests which use info */ 511 if (info == NULL) { 512 release_fd_lock(ct->ct_fd, mask); 513 return (FALSE); 514 } 515 switch (request) { 516 case CLSET_TIMEOUT: 517 if (time_not_ok((struct timeval *)info)) { 518 release_fd_lock(ct->ct_fd, mask); 519 return (FALSE); 520 } 521 ct->ct_wait = *(struct timeval *)infop; 522 ct->ct_waitset = TRUE; 523 break; 524 case CLGET_TIMEOUT: 525 *(struct timeval *)infop = ct->ct_wait; 526 break; 527 case CLGET_SERVER_ADDR: 528 memcpy(info, ct->ct_addr.buf, (size_t)ct->ct_addr.len); 529 break; 530 case CLGET_FD: 531 *(int *)info = ct->ct_fd; 532 break; 533 case CLGET_SVC_ADDR: 534 /* The caller should not free this memory area */ 535 *(struct netbuf *)info = ct->ct_addr; 536 break; 537 case CLSET_SVC_ADDR: /* set to new address */ 538 release_fd_lock(ct->ct_fd, mask); 539 return (FALSE); 540 case CLGET_XID: 541 /* 542 * use the knowledge that xid is the 543 * first element in the call structure 544 * This will get the xid of the PREVIOUS call 545 */ 546 *(u_int32_t *)info = 547 ntohl(*(u_int32_t *)(void *)&ct->ct_u.ct_mcalli); 548 break; 549 case CLSET_XID: 550 /* This will set the xid of the NEXT call */ 551 *(u_int32_t *)(void *)&ct->ct_u.ct_mcalli = 552 htonl(*((u_int32_t *)info) + 1); 553 /* increment by 1 as clnt_vc_call() decrements once */ 554 break; 555 case CLGET_VERS: 556 /* 557 * This RELIES on the information that, in the call body, 558 * the version number field is the fifth field from the 559 * begining of the RPC header. MUST be changed if the 560 * call_struct is changed 561 */ 562 *(u_int32_t *)info = 563 ntohl(*(u_int32_t *)(void *)(ct->ct_u.ct_mcallc + 564 4 * BYTES_PER_XDR_UNIT)); 565 break; 566 567 case CLSET_VERS: 568 *(u_int32_t *)(void *)(ct->ct_u.ct_mcallc + 569 4 * BYTES_PER_XDR_UNIT) = 570 htonl(*(u_int32_t *)info); 571 break; 572 573 case CLGET_PROG: 574 /* 575 * This RELIES on the information that, in the call body, 576 * the program number field is the fourth field from the 577 * begining of the RPC header. MUST be changed if the 578 * call_struct is changed 579 */ 580 *(u_int32_t *)info = 581 ntohl(*(u_int32_t *)(void *)(ct->ct_u.ct_mcallc + 582 3 * BYTES_PER_XDR_UNIT)); 583 break; 584 585 case CLSET_PROG: 586 *(u_int32_t *)(void *)(ct->ct_u.ct_mcallc + 587 3 * BYTES_PER_XDR_UNIT) = 588 htonl(*(u_int32_t *)info); 589 break; 590 591 default: 592 release_fd_lock(ct->ct_fd, mask); 593 return (FALSE); 594 } 595 release_fd_lock(ct->ct_fd, mask); 596 return (TRUE); 597 } 598 599 600 static void 601 clnt_vc_destroy(CLIENT *cl) 602 { 603 struct ct_data *ct = (struct ct_data *) cl->cl_private; 604 int ct_fd = ct->ct_fd; 605 sigset_t mask; 606 sigset_t newmask; 607 608 assert(cl != NULL); 609 610 ct = (struct ct_data *) cl->cl_private; 611 612 sigfillset(&newmask); 613 thr_sigsetmask(SIG_SETMASK, &newmask, &mask); 614 mutex_lock(&clnt_fd_lock); 615 while (vc_fd_locks[ct_fd]) 616 cond_wait(&vc_cv[ct_fd], &clnt_fd_lock); 617 if (ct->ct_closeit && ct->ct_fd != -1) { 618 _close(ct->ct_fd); 619 } 620 XDR_DESTROY(&(ct->ct_xdrs)); 621 if (ct->ct_addr.buf) 622 free(ct->ct_addr.buf); 623 mem_free(ct, sizeof(struct ct_data)); 624 mem_free(cl, sizeof(CLIENT)); 625 mutex_unlock(&clnt_fd_lock); 626 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 627 cond_signal(&vc_cv[ct_fd]); 628 } 629 630 /* 631 * Interface between xdr serializer and tcp connection. 632 * Behaves like the system calls, read & write, but keeps some error state 633 * around for the rpc level. 634 */ 635 static int 636 read_vc(void *ctp, void *buf, int len) 637 { 638 struct sockaddr sa; 639 socklen_t sal; 640 struct ct_data *ct = (struct ct_data *)ctp; 641 struct pollfd fd; 642 int milliseconds = (int)((ct->ct_wait.tv_sec * 1000) + 643 (ct->ct_wait.tv_usec / 1000)); 644 645 if (len == 0) 646 return (0); 647 fd.fd = ct->ct_fd; 648 fd.events = POLLIN; 649 for (;;) { 650 switch (_poll(&fd, 1, milliseconds)) { 651 case 0: 652 ct->ct_error.re_status = RPC_TIMEDOUT; 653 return (-1); 654 655 case -1: 656 if (errno == EINTR) 657 continue; 658 ct->ct_error.re_status = RPC_CANTRECV; 659 ct->ct_error.re_errno = errno; 660 return (-1); 661 } 662 break; 663 } 664 665 sal = sizeof(sa); 666 if ((_getpeername(ct->ct_fd, &sa, &sal) == 0) && 667 (sa.sa_family == AF_LOCAL)) { 668 len = __msgread(ct->ct_fd, buf, (size_t)len); 669 } else { 670 len = _read(ct->ct_fd, buf, (size_t)len); 671 } 672 673 switch (len) { 674 case 0: 675 /* premature eof */ 676 ct->ct_error.re_errno = ECONNRESET; 677 ct->ct_error.re_status = RPC_CANTRECV; 678 len = -1; /* it's really an error */ 679 break; 680 681 case -1: 682 ct->ct_error.re_errno = errno; 683 ct->ct_error.re_status = RPC_CANTRECV; 684 break; 685 } 686 return (len); 687 } 688 689 static int 690 write_vc(void *ctp, void *buf, int len) 691 { 692 struct sockaddr sa; 693 socklen_t sal; 694 struct ct_data *ct = (struct ct_data *)ctp; 695 int i, cnt; 696 697 sal = sizeof(sa); 698 if ((_getpeername(ct->ct_fd, &sa, &sal) == 0) && 699 (sa.sa_family == AF_LOCAL)) { 700 for (cnt = len; cnt > 0; cnt -= i, buf = (char *)buf + i) { 701 if ((i = __msgwrite(ct->ct_fd, buf, 702 (size_t)cnt)) == -1) { 703 ct->ct_error.re_errno = errno; 704 ct->ct_error.re_status = RPC_CANTSEND; 705 return (-1); 706 } 707 } 708 } else { 709 for (cnt = len; cnt > 0; cnt -= i, buf = (char *)buf + i) { 710 if ((i = _write(ct->ct_fd, buf, (size_t)cnt)) == -1) { 711 ct->ct_error.re_errno = errno; 712 ct->ct_error.re_status = RPC_CANTSEND; 713 return (-1); 714 } 715 } 716 } 717 return (len); 718 } 719 720 static struct clnt_ops * 721 clnt_vc_ops(void) 722 { 723 static struct clnt_ops ops; 724 sigset_t mask, newmask; 725 726 /* VARIABLES PROTECTED BY ops_lock: ops */ 727 728 sigfillset(&newmask); 729 thr_sigsetmask(SIG_SETMASK, &newmask, &mask); 730 mutex_lock(&ops_lock); 731 if (ops.cl_call == NULL) { 732 ops.cl_call = clnt_vc_call; 733 ops.cl_abort = clnt_vc_abort; 734 ops.cl_geterr = clnt_vc_geterr; 735 ops.cl_freeres = clnt_vc_freeres; 736 ops.cl_destroy = clnt_vc_destroy; 737 ops.cl_control = clnt_vc_control; 738 } 739 mutex_unlock(&ops_lock); 740 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); 741 return (&ops); 742 } 743 744 /* 745 * Make sure that the time is not garbage. -1 value is disallowed. 746 * Note this is different from time_not_ok in clnt_dg.c 747 */ 748 static bool_t 749 time_not_ok(struct timeval *t) 750 { 751 return (t->tv_sec <= -1 || t->tv_sec > 100000000 || 752 t->tv_usec <= -1 || t->tv_usec > 1000000); 753 } 754 755 static int 756 __msgread(int sock, void *buf, size_t cnt) 757 { 758 struct iovec iov[1]; 759 struct msghdr msg; 760 union { 761 struct cmsghdr cmsg; 762 char control[CMSG_SPACE(sizeof(struct cmsgcred))]; 763 } cm; 764 765 bzero((char *)&cm, sizeof(cm)); 766 iov[0].iov_base = buf; 767 iov[0].iov_len = cnt; 768 769 msg.msg_iov = iov; 770 msg.msg_iovlen = 1; 771 msg.msg_name = NULL; 772 msg.msg_namelen = 0; 773 msg.msg_control = (caddr_t)&cm; 774 msg.msg_controllen = CMSG_SPACE(sizeof(struct cmsgcred)); 775 msg.msg_flags = 0; 776 777 return(_recvmsg(sock, &msg, 0)); 778 } 779 780 static int 781 __msgwrite(int sock, void *buf, size_t cnt) 782 { 783 struct iovec iov[1]; 784 struct msghdr msg; 785 union { 786 struct cmsghdr cmsg; 787 char control[CMSG_SPACE(sizeof(struct cmsgcred))]; 788 } cm; 789 790 bzero((char *)&cm, sizeof(cm)); 791 iov[0].iov_base = buf; 792 iov[0].iov_len = cnt; 793 794 cm.cmsg.cmsg_type = SCM_CREDS; 795 cm.cmsg.cmsg_level = SOL_SOCKET; 796 cm.cmsg.cmsg_len = CMSG_LEN(sizeof(struct cmsgcred)); 797 798 msg.msg_iov = iov; 799 msg.msg_iovlen = 1; 800 msg.msg_name = NULL; 801 msg.msg_namelen = 0; 802 msg.msg_control = (caddr_t)&cm; 803 msg.msg_controllen = CMSG_SPACE(sizeof(struct cmsgcred)); 804 msg.msg_flags = 0; 805 806 return(_sendmsg(sock, &msg, 0)); 807 } 808