xref: /dragonfly/lib/libc/rpc/clnt_dg.c (revision ce0e08e2)
1 /*
2  * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
3  * unrestricted use provided that this legend is included on all tape
4  * media and as a part of the software program in whole or part.  Users
5  * may copy or modify Sun RPC without charge, but are not authorized
6  * to license or distribute it to anyone else except as part of a product or
7  * program developed by the user.
8  *
9  * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
10  * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
11  * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
12  *
13  * Sun RPC is provided with no support and without any obligation on the
14  * part of Sun Microsystems, Inc. to assist in its use, correction,
15  * modification or enhancement.
16  *
17  * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
18  * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
19  * OR ANY PART THEREOF.
20  *
21  * In no event will Sun Microsystems, Inc. be liable for any lost revenue
22  * or profits or other special, indirect and consequential damages, even if
23  * Sun has been advised of the possibility of such damages.
24  *
25  * Sun Microsystems, Inc.
26  * 2550 Garcia Avenue
27  * Mountain View, California  94043
28  *
29  * @(#)clnt_dg.c	1.23	94/04/22 SMI; 1.19 89/03/16 Copyr 1988 Sun Micro
30  * $NetBSD: clnt_dg.c,v 1.4 2000/07/14 08:40:41 fvdl Exp $
31  * $FreeBSD: src/lib/libc/rpc/clnt_dg.c,v 1.18 2006/02/27 22:10:58 deischen Exp $
32  * $DragonFly$
33  */
34 /*
35  * Copyright (c) 1986-1991 by Sun Microsystems Inc.
36  */
37 
38 /*
39  * Implements a connectionless client side RPC.
40  */
41 
42 #include "namespace.h"
43 #include "reentrant.h"
44 #include <sys/types.h>
45 #include <sys/event.h>
46 #include <sys/time.h>
47 #include <sys/socket.h>
48 #include <sys/ioctl.h>
49 #include <arpa/inet.h>
50 #include <rpc/rpc.h>
51 #include <errno.h>
52 #include <stdlib.h>
53 #include <string.h>
54 #include <signal.h>
55 #include <unistd.h>
56 #include <err.h>
57 #include "un-namespace.h"
58 #include "rpc_com.h"
59 #include "mt_misc.h"
60 
61 
62 #define	RPC_MAX_BACKOFF		30 /* seconds */
63 
64 
65 static void		 clnt_dg_abort(CLIENT *);
66 static enum clnt_stat	 clnt_dg_call(CLIENT *, rpcproc_t, xdrproc_t, void *,
67 				      xdrproc_t, void *, struct timeval);
68 static bool_t		 clnt_dg_control(CLIENT *, u_int, void *);
69 static void		 clnt_dg_destroy(CLIENT *);
70 static bool_t		 clnt_dg_freeres(CLIENT *, xdrproc_t, void *);
71 static void		 clnt_dg_geterr(CLIENT *, struct rpc_err *);
72 static struct clnt_ops	*clnt_dg_ops(void);
73 static bool_t		 time_not_ok(struct timeval *);
74 
75 
76 /*
77  *	This machinery implements per-fd locks for MT-safety.  It is not
78  *	sufficient to do per-CLIENT handle locks for MT-safety because a
79  *	user may create more than one CLIENT handle with the same fd behind
80  *	it.  Therfore, we allocate an array of flags (dg_fd_locks), protected
81  *	by the clnt_fd_lock mutex, and an array (dg_cv) of condition variables
82  *	similarly protected.  Dg_fd_lock[fd] == 1 => a call is activte on some
83  *	CLIENT handle created for that fd.
84  *	The current implementation holds locks across the entire RPC and reply,
85  *	including retransmissions.  Yes, this is silly, and as soon as this
86  *	code is proven to work, this should be the first thing fixed.  One step
87  *	at a time.
88  */
89 static int	*dg_fd_locks;
90 static cond_t	*dg_cv;
91 #define	release_fd_lock(fd, mask) {		\
92 	mutex_lock(&clnt_fd_lock);	\
93 	dg_fd_locks[fd] = 0;		\
94 	mutex_unlock(&clnt_fd_lock);	\
95 	thr_sigsetmask(SIG_SETMASK, &(mask), NULL); \
96 	cond_signal(&dg_cv[fd]);	\
97 }
98 
99 static const char mem_err_clnt_dg[] = "clnt_dg_create: out of memory";
100 
101 /* VARIABLES PROTECTED BY clnt_fd_lock: dg_fd_locks, dg_cv */
102 
103 /*
104  * Private data kept per client handle
105  */
106 struct cu_data {
107 	int			cu_fd;		/* connections fd */
108 	bool_t			cu_closeit;	/* opened by library */
109 	struct sockaddr_storage	cu_raddr;	/* remote address */
110 	int			cu_rlen;
111 	struct timeval		cu_wait;	/* retransmit interval */
112 	struct timeval		cu_total;	/* total time for the call */
113 	struct rpc_err		cu_error;
114 	XDR			cu_outxdrs;
115 	u_int			cu_xdrpos;
116 	u_int			cu_sendsz;	/* send size */
117 	char			*cu_outbuf;
118 	u_int			cu_recvsz;	/* recv size */
119 	int			cu_async;
120 	int			cu_connect;	/* Use connect(). */
121 	int			cu_connected;	/* Have done connect(). */
122 	struct kevent		cu_kin;
123 	int			cu_kq;
124 	char			cu_inbuf[1];
125 };
126 
127 /*
128  * Connection less client creation returns with client handle parameters.
129  * Default options are set, which the user can change using clnt_control().
130  * fd should be open and bound.
131  * NB: The rpch->cl_auth is initialized to null authentication.
132  * 	Caller may wish to set this something more useful.
133  *
134  * sendsz and recvsz are the maximum allowable packet sizes that can be
135  * sent and received. Normally they are the same, but they can be
136  * changed to improve the program efficiency and buffer allocation.
137  * If they are 0, use the transport default.
138  *
139  * If svcaddr is NULL, returns NULL.
140  */
141 CLIENT *
142 clnt_dg_create(int fd,			/* open file descriptor */
143 	const struct netbuf *svcaddr,	/* servers address */
144 	rpcprog_t program,		/* program number */
145 	rpcvers_t version,		/* version number */
146 	u_int sendsz,			/* buffer recv size */
147 	u_int recvsz)			/* buffer send size */
148 {
149 	CLIENT *cl = NULL;		/* client handle */
150 	struct cu_data *cu = NULL;	/* private data */
151 	struct timeval now;
152 	struct rpc_msg call_msg;
153 	sigset_t mask;
154 	sigset_t newmask;
155 	struct __rpc_sockinfo si;
156 	int one = 1;
157 
158 	sigfillset(&newmask);
159 	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
160 	mutex_lock(&clnt_fd_lock);
161 	if (dg_fd_locks == (int *) NULL) {
162 		int cv_allocsz;
163 		size_t fd_allocsz;
164 		int dtbsize = __rpc_dtbsize();
165 
166 		fd_allocsz = dtbsize * sizeof (int);
167 		dg_fd_locks = (int *) mem_alloc(fd_allocsz);
168 		if (dg_fd_locks == (int *) NULL) {
169 			mutex_unlock(&clnt_fd_lock);
170 			thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
171 			goto err1;
172 		} else
173 			memset(dg_fd_locks, '\0', fd_allocsz);
174 
175 		cv_allocsz = dtbsize * sizeof (cond_t);
176 		dg_cv = (cond_t *) mem_alloc(cv_allocsz);
177 		if (dg_cv == (cond_t *) NULL) {
178 			mem_free(dg_fd_locks, fd_allocsz);
179 			dg_fd_locks = (int *) NULL;
180 			mutex_unlock(&clnt_fd_lock);
181 			thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
182 			goto err1;
183 		} else {
184 			int i;
185 
186 			for (i = 0; i < dtbsize; i++)
187 				cond_init(&dg_cv[i], 0, (void *) 0);
188 		}
189 	}
190 
191 	mutex_unlock(&clnt_fd_lock);
192 	thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
193 
194 	if (svcaddr == NULL) {
195 		rpc_createerr.cf_stat = RPC_UNKNOWNADDR;
196 		return (NULL);
197 	}
198 
199 	if (!__rpc_fd2sockinfo(fd, &si)) {
200 		rpc_createerr.cf_stat = RPC_TLIERROR;
201 		rpc_createerr.cf_error.re_errno = 0;
202 		return (NULL);
203 	}
204 	/*
205 	 * Find the receive and the send size
206 	 */
207 	sendsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsz);
208 	recvsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsz);
209 	if ((sendsz == 0) || (recvsz == 0)) {
210 		rpc_createerr.cf_stat = RPC_TLIERROR; /* XXX */
211 		rpc_createerr.cf_error.re_errno = 0;
212 		return (NULL);
213 	}
214 
215 	if ((cl = mem_alloc(sizeof (CLIENT))) == NULL)
216 		goto err1;
217 	/*
218 	 * Should be multiple of 4 for XDR.
219 	 */
220 	sendsz = ((sendsz + 3) / 4) * 4;
221 	recvsz = ((recvsz + 3) / 4) * 4;
222 	cu = mem_alloc(sizeof (*cu) + sendsz + recvsz);
223 	if (cu == NULL)
224 		goto err1;
225 	memcpy(&cu->cu_raddr, svcaddr->buf, (size_t)svcaddr->len);
226 	cu->cu_rlen = svcaddr->len;
227 	cu->cu_outbuf = &cu->cu_inbuf[recvsz];
228 	/* Other values can also be set through clnt_control() */
229 	cu->cu_wait.tv_sec = 15;	/* heuristically chosen */
230 	cu->cu_wait.tv_usec = 0;
231 	cu->cu_total.tv_sec = -1;
232 	cu->cu_total.tv_usec = -1;
233 	cu->cu_sendsz = sendsz;
234 	cu->cu_recvsz = recvsz;
235 	cu->cu_async = FALSE;
236 	cu->cu_connect = FALSE;
237 	cu->cu_connected = FALSE;
238 	gettimeofday(&now, NULL);
239 	call_msg.rm_xid = __RPC_GETXID(&now);
240 	call_msg.rm_call.cb_prog = program;
241 	call_msg.rm_call.cb_vers = version;
242 	xdrmem_create(&(cu->cu_outxdrs), cu->cu_outbuf, sendsz, XDR_ENCODE);
243 	if (! xdr_callhdr(&(cu->cu_outxdrs), &call_msg)) {
244 		rpc_createerr.cf_stat = RPC_CANTENCODEARGS;  /* XXX */
245 		rpc_createerr.cf_error.re_errno = 0;
246 		goto err2;
247 	}
248 	cu->cu_xdrpos = XDR_GETPOS(&(cu->cu_outxdrs));
249 
250 	/* XXX fvdl - do we still want this? */
251 #if 0
252 	bindresvport_sa(fd, (struct sockaddr *)svcaddr->buf);
253 #endif
254 	_ioctl(fd, FIONBIO, (char *)(void *)&one);
255 
256 	/*
257 	 * By default, closeit is always FALSE. It is users responsibility
258 	 * to do a close on it, else the user may use clnt_control
259 	 * to let clnt_destroy do it for him/her.
260 	 */
261 	cu->cu_closeit = FALSE;
262 	cu->cu_fd = fd;
263 	cl->cl_ops = clnt_dg_ops();
264 	cl->cl_private = (caddr_t)(void *)cu;
265 	cl->cl_auth = authnone_create();
266 	cl->cl_tp = NULL;
267 	cl->cl_netid = NULL;
268 	cu->cu_kq = -1;
269 	EV_SET(&cu->cu_kin, cu->cu_fd, EVFILT_READ, EV_ADD, 0, 0, 0);
270 	return (cl);
271 err1:
272 	warnx(mem_err_clnt_dg);
273 	rpc_createerr.cf_stat = RPC_SYSTEMERROR;
274 	rpc_createerr.cf_error.re_errno = errno;
275 err2:
276 	if (cl) {
277 		mem_free(cl, sizeof (CLIENT));
278 		if (cu)
279 			mem_free(cu, sizeof (*cu) + sendsz + recvsz);
280 	}
281 	return (NULL);
282 }
283 
284 static enum clnt_stat
285 clnt_dg_call(CLIENT	*cl,		/* client handle */
286 	rpcproc_t	proc,		/* procedure number */
287 	xdrproc_t	xargs,		/* xdr routine for args */
288 	void		*argsp,		/* pointer to args */
289 	xdrproc_t	xresults,	/* xdr routine for results */
290 	void		*resultsp,	/* pointer to results */
291 	struct timeval	utimeout)	/* seconds to wait before giving up */
292 {
293 	struct cu_data *cu = (struct cu_data *)cl->cl_private;
294 	XDR *xdrs;
295 	size_t outlen = 0;
296 	struct rpc_msg reply_msg;
297 	XDR reply_xdrs;
298 	bool_t ok;
299 	int nrefreshes = 2;		/* number of times to refresh cred */
300 	struct timeval timeout;
301 	struct timeval retransmit_time;
302 	struct timeval next_sendtime, starttime, time_waited, tv;
303 	struct timespec ts;
304 	struct kevent kv;
305 	struct sockaddr *sa;
306 	sigset_t mask;
307 	sigset_t newmask;
308 	socklen_t inlen, salen;
309 	ssize_t recvlen = 0;
310 	int kin_len, n, rpc_lock_value;
311 	u_int32_t xid;
312 
313 	outlen = 0;
314 	sigfillset(&newmask);
315 	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
316 	mutex_lock(&clnt_fd_lock);
317 	while (dg_fd_locks[cu->cu_fd])
318 		cond_wait(&dg_cv[cu->cu_fd], &clnt_fd_lock);
319 	if (__isthreaded)
320 		rpc_lock_value = 1;
321 	else
322 		rpc_lock_value = 0;
323 	dg_fd_locks[cu->cu_fd] = rpc_lock_value;
324 	mutex_unlock(&clnt_fd_lock);
325 	if (cu->cu_total.tv_usec == -1) {
326 		timeout = utimeout;	/* use supplied timeout */
327 	} else {
328 		timeout = cu->cu_total;	/* use default timeout */
329 	}
330 
331 	if (cu->cu_connect && !cu->cu_connected) {
332 		if (_connect(cu->cu_fd, (struct sockaddr *)&cu->cu_raddr,
333 		    cu->cu_rlen) < 0) {
334 			cu->cu_error.re_errno = errno;
335 			cu->cu_error.re_status = RPC_CANTSEND;
336 			goto out;
337 		}
338 		cu->cu_connected = 1;
339 	}
340 	if (cu->cu_connected) {
341 		sa = NULL;
342 		salen = 0;
343 	} else {
344 		sa = (struct sockaddr *)&cu->cu_raddr;
345 		salen = cu->cu_rlen;
346 	}
347 	time_waited.tv_sec = 0;
348 	time_waited.tv_usec = 0;
349 	retransmit_time = next_sendtime = cu->cu_wait;
350 	gettimeofday(&starttime, NULL);
351 
352 	/* Clean up in case the last call ended in a longjmp(3) call. */
353 	if (cu->cu_kq >= 0)
354 		_close(cu->cu_kq);
355 	if ((cu->cu_kq = kqueue()) < 0) {
356 		cu->cu_error.re_errno = errno;
357 		cu->cu_error.re_status = RPC_CANTSEND;
358 		goto out;
359 	}
360 	kin_len = 1;
361 
362 call_again:
363 	xdrs = &(cu->cu_outxdrs);
364 	if (cu->cu_async == TRUE && xargs == NULL)
365 		goto get_reply;
366 	xdrs->x_op = XDR_ENCODE;
367 	XDR_SETPOS(xdrs, cu->cu_xdrpos);
368 	/*
369 	 * the transaction is the first thing in the out buffer
370 	 * XXX Yes, and it's in network byte order, so we should to
371 	 * be careful when we increment it, shouldn't we.
372 	 */
373 	xid = ntohl(*(u_int32_t *)(void *)(cu->cu_outbuf));
374 	xid++;
375 	*(u_int32_t *)(void *)(cu->cu_outbuf) = htonl(xid);
376 
377 	if ((! XDR_PUTINT32(xdrs, &proc)) ||
378 	    (! AUTH_MARSHALL(cl->cl_auth, xdrs)) ||
379 	    (! (*xargs)(xdrs, argsp))) {
380 		cu->cu_error.re_status = RPC_CANTENCODEARGS;
381 		goto out;
382 	}
383 	outlen = (size_t)XDR_GETPOS(xdrs);
384 
385 send_again:
386 	if (_sendto(cu->cu_fd, cu->cu_outbuf, outlen, 0, sa, salen) != outlen) {
387 		cu->cu_error.re_errno = errno;
388 		cu->cu_error.re_status = RPC_CANTSEND;
389 		goto out;
390 	}
391 
392 	/*
393 	 * Hack to provide rpc-based message passing
394 	 */
395 	if (timeout.tv_sec == 0 && timeout.tv_usec == 0) {
396 		cu->cu_error.re_status = RPC_TIMEDOUT;
397 		goto out;
398 	}
399 
400 get_reply:
401 
402 	/*
403 	 * sub-optimal code appears here because we have
404 	 * some clock time to spare while the packets are in flight.
405 	 * (We assume that this is actually only executed once.)
406 	 */
407 	reply_msg.acpted_rply.ar_verf = _null_auth;
408 	reply_msg.acpted_rply.ar_results.where = resultsp;
409 	reply_msg.acpted_rply.ar_results.proc = xresults;
410 
411 	for (;;) {
412 		/* Decide how long to wait. */
413 		if (timercmp(&next_sendtime, &timeout, <))
414 			timersub(&next_sendtime, &time_waited, &tv);
415 		else
416 			timersub(&timeout, &time_waited, &tv);
417 		if (tv.tv_sec < 0 || tv.tv_usec < 0)
418 			tv.tv_sec = tv.tv_usec = 0;
419 		TIMEVAL_TO_TIMESPEC(&tv, &ts);
420 
421 		n = _kevent(cu->cu_kq, &cu->cu_kin, kin_len, &kv, 1, &ts);
422 		/* We don't need to register the event again. */
423 		kin_len = 0;
424 
425 		if (n == 1) {
426 			if (kv.flags & EV_ERROR) {
427 				cu->cu_error.re_errno = kv.data;
428 				cu->cu_error.re_status = RPC_CANTRECV;
429 				goto out;
430 			}
431 			/* We have some data now */
432 			do {
433 				recvlen = _recvfrom(cu->cu_fd, cu->cu_inbuf,
434 				    cu->cu_recvsz, 0, NULL, NULL);
435 			} while (recvlen < 0 && errno == EINTR);
436 			if (recvlen < 0 && errno != EWOULDBLOCK) {
437 				cu->cu_error.re_errno = errno;
438 				cu->cu_error.re_status = RPC_CANTRECV;
439 				goto out;
440 			}
441 			if (recvlen >= sizeof(u_int32_t) &&
442 			    (cu->cu_async == TRUE ||
443 			    *((u_int32_t *)(void *)(cu->cu_inbuf)) ==
444 			    *((u_int32_t *)(void *)(cu->cu_outbuf)))) {
445 				/* We now assume we have the proper reply. */
446 				break;
447 			}
448 		}
449 		if (n == -1 && errno != EINTR) {
450 			cu->cu_error.re_errno = errno;
451 			cu->cu_error.re_status = RPC_CANTRECV;
452 			goto out;
453 		}
454 		gettimeofday(&tv, NULL);
455 		timersub(&tv, &starttime, &time_waited);
456 
457 		/* Check for timeout. */
458 		if (timercmp(&time_waited, &timeout, >)) {
459 			cu->cu_error.re_status = RPC_TIMEDOUT;
460 			goto out;
461 		}
462 
463 		/* Retransmit if necessary. */
464 		if (timercmp(&time_waited, &next_sendtime, >)) {
465 			/* update retransmit_time */
466 			if (retransmit_time.tv_sec < RPC_MAX_BACKOFF)
467 				timeradd(&retransmit_time, &retransmit_time,
468 				    &retransmit_time);
469 			timeradd(&next_sendtime, &retransmit_time,
470 			    &next_sendtime);
471 			goto send_again;
472 		}
473 	}
474 	inlen = (socklen_t)recvlen;
475 
476 	/*
477 	 * now decode and validate the response
478 	 */
479 
480 	xdrmem_create(&reply_xdrs, cu->cu_inbuf, (u_int)recvlen, XDR_DECODE);
481 	ok = xdr_replymsg(&reply_xdrs, &reply_msg);
482 	/* XDR_DESTROY(&reply_xdrs);	save a few cycles on noop destroy */
483 	if (ok) {
484 		if ((reply_msg.rm_reply.rp_stat == MSG_ACCEPTED) &&
485 			(reply_msg.acpted_rply.ar_stat == SUCCESS))
486 			cu->cu_error.re_status = RPC_SUCCESS;
487 		else
488 			_seterr_reply(&reply_msg, &(cu->cu_error));
489 
490 		if (cu->cu_error.re_status == RPC_SUCCESS) {
491 			if (! AUTH_VALIDATE(cl->cl_auth,
492 					    &reply_msg.acpted_rply.ar_verf)) {
493 				cu->cu_error.re_status = RPC_AUTHERROR;
494 				cu->cu_error.re_why = AUTH_INVALIDRESP;
495 			}
496 			if (reply_msg.acpted_rply.ar_verf.oa_base != NULL) {
497 				xdrs->x_op = XDR_FREE;
498 				xdr_opaque_auth(xdrs,
499 					&(reply_msg.acpted_rply.ar_verf));
500 			}
501 		}		/* end successful completion */
502 		/*
503 		 * If unsuccesful AND error is an authentication error
504 		 * then refresh credentials and try again, else break
505 		 */
506 		else if (cu->cu_error.re_status == RPC_AUTHERROR)
507 			/* maybe our credentials need to be refreshed ... */
508 			if (nrefreshes > 0 &&
509 			    AUTH_REFRESH(cl->cl_auth, &reply_msg)) {
510 				nrefreshes--;
511 				goto call_again;
512 			}
513 		/* end of unsuccessful completion */
514 	}	/* end of valid reply message */
515 	else {
516 		cu->cu_error.re_status = RPC_CANTDECODERES;
517 
518 	}
519 out:
520 	if (cu->cu_kq >= 0)
521 		_close(cu->cu_kq);
522 	cu->cu_kq = -1;
523 	release_fd_lock(cu->cu_fd, mask);
524 	return (cu->cu_error.re_status);
525 }
526 
527 static void
528 clnt_dg_geterr(CLIENT *cl, struct rpc_err *errp)
529 {
530 	struct cu_data *cu = (struct cu_data *)cl->cl_private;
531 
532 	*errp = cu->cu_error;
533 }
534 
535 static bool_t
536 clnt_dg_freeres(CLIENT *cl, xdrproc_t xdr_res, void *res_ptr)
537 {
538 	struct cu_data *cu = (struct cu_data *)cl->cl_private;
539 	XDR *xdrs = &(cu->cu_outxdrs);
540 	bool_t dummy;
541 	sigset_t mask;
542 	sigset_t newmask;
543 
544 	sigfillset(&newmask);
545 	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
546 	mutex_lock(&clnt_fd_lock);
547 	while (dg_fd_locks[cu->cu_fd])
548 		cond_wait(&dg_cv[cu->cu_fd], &clnt_fd_lock);
549 	xdrs->x_op = XDR_FREE;
550 	dummy = (*xdr_res)(xdrs, res_ptr);
551 	mutex_unlock(&clnt_fd_lock);
552 	thr_sigsetmask(SIG_SETMASK, &mask, NULL);
553 	cond_signal(&dg_cv[cu->cu_fd]);
554 	return (dummy);
555 }
556 
557 /*ARGSUSED*/
558 static void
559 clnt_dg_abort(CLIENT *h)
560 {
561 }
562 
563 static bool_t
564 clnt_dg_control(CLIENT *cl, u_int request, void *info)
565 {
566 	struct cu_data *cu = (struct cu_data *)cl->cl_private;
567 	struct netbuf *addr;
568 	sigset_t mask;
569 	sigset_t newmask;
570 	int rpc_lock_value;
571 
572 	sigfillset(&newmask);
573 	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
574 	mutex_lock(&clnt_fd_lock);
575 	while (dg_fd_locks[cu->cu_fd])
576 		cond_wait(&dg_cv[cu->cu_fd], &clnt_fd_lock);
577 	if (__isthreaded)
578                 rpc_lock_value = 1;
579         else
580                 rpc_lock_value = 0;
581 	dg_fd_locks[cu->cu_fd] = rpc_lock_value;
582 	mutex_unlock(&clnt_fd_lock);
583 	switch (request) {
584 	case CLSET_FD_CLOSE:
585 		cu->cu_closeit = TRUE;
586 		release_fd_lock(cu->cu_fd, mask);
587 		return (TRUE);
588 	case CLSET_FD_NCLOSE:
589 		cu->cu_closeit = FALSE;
590 		release_fd_lock(cu->cu_fd, mask);
591 		return (TRUE);
592 	}
593 
594 	/* for other requests which use info */
595 	if (info == NULL) {
596 		release_fd_lock(cu->cu_fd, mask);
597 		return (FALSE);
598 	}
599 	switch (request) {
600 	case CLSET_TIMEOUT:
601 		if (time_not_ok((struct timeval *)info)) {
602 			release_fd_lock(cu->cu_fd, mask);
603 			return (FALSE);
604 		}
605 		cu->cu_total = *(struct timeval *)info;
606 		break;
607 	case CLGET_TIMEOUT:
608 		*(struct timeval *)info = cu->cu_total;
609 		break;
610 	case CLGET_SERVER_ADDR:		/* Give him the fd address */
611 		/* Now obsolete. Only for backward compatibility */
612 		memcpy(info, &cu->cu_raddr, (size_t)cu->cu_rlen);
613 		break;
614 	case CLSET_RETRY_TIMEOUT:
615 		if (time_not_ok((struct timeval *)info)) {
616 			release_fd_lock(cu->cu_fd, mask);
617 			return (FALSE);
618 		}
619 		cu->cu_wait = *(struct timeval *)info;
620 		break;
621 	case CLGET_RETRY_TIMEOUT:
622 		*(struct timeval *)info = cu->cu_wait;
623 		break;
624 	case CLGET_FD:
625 		*(int *)info = cu->cu_fd;
626 		break;
627 	case CLGET_SVC_ADDR:
628 		addr = (struct netbuf *)info;
629 		addr->buf = &cu->cu_raddr;
630 		addr->len = cu->cu_rlen;
631 		addr->maxlen = sizeof cu->cu_raddr;
632 		break;
633 	case CLSET_SVC_ADDR:		/* set to new address */
634 		addr = (struct netbuf *)info;
635 		if (addr->len < sizeof cu->cu_raddr) {
636 			release_fd_lock(cu->cu_fd, mask);
637 			return (FALSE);
638 		}
639 		memcpy(&cu->cu_raddr, addr->buf, addr->len);
640 		cu->cu_rlen = addr->len;
641 		break;
642 	case CLGET_XID:
643 		/*
644 		 * use the knowledge that xid is the
645 		 * first element in the call structure *.
646 		 * This will get the xid of the PREVIOUS call
647 		 */
648 		*(u_int32_t *)info =
649 		    ntohl(*(u_int32_t *)(void *)cu->cu_outbuf);
650 		break;
651 
652 	case CLSET_XID:
653 		/* This will set the xid of the NEXT call */
654 		*(u_int32_t *)(void *)cu->cu_outbuf =
655 		    htonl(*(u_int32_t *)info - 1);
656 		/* decrement by 1 as clnt_dg_call() increments once */
657 		break;
658 
659 	case CLGET_VERS:
660 		/*
661 		 * This RELIES on the information that, in the call body,
662 		 * the version number field is the fifth field from the
663 		 * begining of the RPC header. MUST be changed if the
664 		 * call_struct is changed
665 		 */
666 		*(u_int32_t *)info =
667 		    ntohl(*(u_int32_t *)(void *)(cu->cu_outbuf +
668 		    4 * BYTES_PER_XDR_UNIT));
669 		break;
670 
671 	case CLSET_VERS:
672 		*(u_int32_t *)(void *)(cu->cu_outbuf + 4 * BYTES_PER_XDR_UNIT)
673 			= htonl(*(u_int32_t *)info);
674 		break;
675 
676 	case CLGET_PROG:
677 		/*
678 		 * This RELIES on the information that, in the call body,
679 		 * the program number field is the fourth field from the
680 		 * begining of the RPC header. MUST be changed if the
681 		 * call_struct is changed
682 		 */
683 		*(u_int32_t *)info =
684 		    ntohl(*(u_int32_t *)(void *)(cu->cu_outbuf +
685 		    3 * BYTES_PER_XDR_UNIT));
686 		break;
687 
688 	case CLSET_PROG:
689 		*(u_int32_t *)(void *)(cu->cu_outbuf + 3 * BYTES_PER_XDR_UNIT)
690 			= htonl(*(u_int32_t *)info);
691 		break;
692 	case CLSET_ASYNC:
693 		cu->cu_async = *(int *)info;
694 		break;
695 	case CLSET_CONNECT:
696 		cu->cu_connect = *(int *)info;
697 		break;
698 	default:
699 		release_fd_lock(cu->cu_fd, mask);
700 		return (FALSE);
701 	}
702 	release_fd_lock(cu->cu_fd, mask);
703 	return (TRUE);
704 }
705 
706 static void
707 clnt_dg_destroy(CLIENT *cl)
708 {
709 	struct cu_data *cu = (struct cu_data *)cl->cl_private;
710 	int cu_fd = cu->cu_fd;
711 	sigset_t mask;
712 	sigset_t newmask;
713 
714 	sigfillset(&newmask);
715 	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
716 	mutex_lock(&clnt_fd_lock);
717 	while (dg_fd_locks[cu_fd])
718 		cond_wait(&dg_cv[cu_fd], &clnt_fd_lock);
719 	if (cu->cu_closeit)
720 		_close(cu_fd);
721 	if (cu->cu_kq >= 0)
722 		_close(cu->cu_kq);
723 	XDR_DESTROY(&(cu->cu_outxdrs));
724 	mem_free(cu, (sizeof (*cu) + cu->cu_sendsz + cu->cu_recvsz));
725 	if (cl->cl_netid && cl->cl_netid[0])
726 		mem_free(cl->cl_netid, strlen(cl->cl_netid) +1);
727 	if (cl->cl_tp && cl->cl_tp[0])
728 		mem_free(cl->cl_tp, strlen(cl->cl_tp) +1);
729 	mem_free(cl, sizeof (CLIENT));
730 	mutex_unlock(&clnt_fd_lock);
731 	thr_sigsetmask(SIG_SETMASK, &mask, NULL);
732 	cond_signal(&dg_cv[cu_fd]);
733 }
734 
735 static struct clnt_ops *
736 clnt_dg_ops(void)
737 {
738 	static struct clnt_ops ops;
739 	sigset_t mask;
740 	sigset_t newmask;
741 
742 /* VARIABLES PROTECTED BY ops_lock: ops */
743 
744 	sigfillset(&newmask);
745 	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
746 	mutex_lock(&ops_lock);
747 	if (ops.cl_call == NULL) {
748 		ops.cl_call = clnt_dg_call;
749 		ops.cl_abort = clnt_dg_abort;
750 		ops.cl_geterr = clnt_dg_geterr;
751 		ops.cl_freeres = clnt_dg_freeres;
752 		ops.cl_destroy = clnt_dg_destroy;
753 		ops.cl_control = clnt_dg_control;
754 	}
755 	mutex_unlock(&ops_lock);
756 	thr_sigsetmask(SIG_SETMASK, &mask, NULL);
757 	return (&ops);
758 }
759 
760 /*
761  * Make sure that the time is not garbage.  -1 value is allowed.
762  */
763 static bool_t
764 time_not_ok(struct timeval *t)
765 {
766 	return (t->tv_sec < -1 || t->tv_sec > 100000000 ||
767 		t->tv_usec < -1 || t->tv_usec > 1000000);
768 }
769