xref: /netbsd/lib/libc/rpc/svc_dg.c (revision 15c1b43e)
1 /*	$NetBSD: svc_dg.c,v 1.17 2013/03/11 20:19:29 tron Exp $	*/
2 
3 /*
4  * Copyright (c) 2010, Oracle America, Inc.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are
8  * met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above
13  *       copyright notice, this list of conditions and the following
14  *       disclaimer in the documentation and/or other materials
15  *       provided with the distribution.
16  *     * Neither the name of the "Oracle America, Inc." nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  *   FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  *   COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
25  *   INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  *   DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
27  *   GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  *   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
29  *   WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30  *   NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * Copyright (c) 1986-1991 by Sun Microsystems Inc.
36  */
37 
38 /* #ident	"@(#)svc_dg.c	1.17	94/04/24 SMI" */
39 
40 
41 /*
42  * svc_dg.c, Server side for connectionless RPC.
43  *
44  * Does some caching in the hopes of achieving execute-at-most-once semantics.
45  */
46 
47 #include <sys/cdefs.h>
48 #if defined(LIBC_SCCS) && !defined(lint)
49 __RCSID("$NetBSD: svc_dg.c,v 1.17 2013/03/11 20:19:29 tron Exp $");
50 #endif
51 
52 #include "namespace.h"
53 #include "reentrant.h"
54 #include <sys/types.h>
55 #include <sys/socket.h>
56 #include <rpc/rpc.h>
57 #include <assert.h>
58 #include <errno.h>
59 #include <unistd.h>
60 #include <stdio.h>
61 #include <stdlib.h>
62 #include <string.h>
63 #ifdef RPC_CACHE_DEBUG
64 #include <netconfig.h>
65 #include <netdir.h>
66 #endif
67 #include <err.h>
68 
69 #include "svc_fdset.h"
70 #include "rpc_internal.h"
71 #include "svc_dg.h"
72 
73 #define	su_data(xprt)	((struct svc_dg_data *)(xprt->xp_p2))
74 #define	rpc_buffer(xprt) ((xprt)->xp_p1)
75 
76 #ifdef __weak_alias
77 __weak_alias(svc_dg_create,_svc_dg_create)
78 #endif
79 
80 #ifndef MAX
81 #define	MAX(a, b)	(((a) > (b)) ? (a) : (b))
82 #endif
83 
84 static void svc_dg_ops(SVCXPRT *);
85 static enum xprt_stat svc_dg_stat(SVCXPRT *);
86 static bool_t svc_dg_recv(SVCXPRT *, struct rpc_msg *);
87 static bool_t svc_dg_reply(SVCXPRT *, struct rpc_msg *);
88 static bool_t svc_dg_getargs(SVCXPRT *, xdrproc_t, caddr_t);
89 static bool_t svc_dg_freeargs(SVCXPRT *, xdrproc_t, caddr_t);
90 static void svc_dg_destroy(SVCXPRT *);
91 static bool_t svc_dg_control(SVCXPRT *, const u_int, void *);
92 static int cache_get(SVCXPRT *, struct rpc_msg *, char **, size_t *);
93 static void cache_set(SVCXPRT *, size_t);
94 
95 /*
96  * Usage:
97  *	xprt = svc_dg_create(sock, sendsize, recvsize);
98  * Does other connectionless specific initializations.
99  * Once *xprt is initialized, it is registered.
100  * see (svc.h, xprt_register). If recvsize or sendsize are 0 suitable
101  * system defaults are chosen.
102  * The routines returns NULL if a problem occurred.
103  */
104 static const char svc_dg_str[] = "svc_dg_create: %s";
105 static const char svc_dg_err1[] = "could not get transport information";
106 static const char svc_dg_err2[] = " transport does not support data transfer";
107 static const char __no_mem_str[] = "out of memory";
108 
109 SVCXPRT *
svc_dg_create(int fd,u_int sendsize,u_int recvsize)110 svc_dg_create(int fd, u_int sendsize, u_int recvsize)
111 {
112 	SVCXPRT *xprt;
113 	struct svc_dg_data *su = NULL;
114 	struct __rpc_sockinfo si;
115 	struct sockaddr_storage ss;
116 	socklen_t slen;
117 
118 	if (!__rpc_fd2sockinfo(fd, &si)) {
119 		warnx(svc_dg_str, svc_dg_err1);
120 		return (NULL);
121 	}
122 	/*
123 	 * Find the receive and the send size
124 	 */
125 	sendsize = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsize);
126 	recvsize = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsize);
127 	if ((sendsize == 0) || (recvsize == 0)) {
128 		warnx(svc_dg_str, svc_dg_err2);
129 		return (NULL);
130 	}
131 
132 	xprt = mem_alloc(sizeof (SVCXPRT));
133 	if (xprt == NULL)
134 		goto outofmem;
135 	memset(xprt, 0, sizeof (SVCXPRT));
136 
137 	su = mem_alloc(sizeof (*su));
138 	if (su == NULL)
139 		goto outofmem;
140 	su->su_iosz = ((MAX(sendsize, recvsize) + 3) / 4) * 4;
141 	if ((rpc_buffer(xprt) = malloc(su->su_iosz)) == NULL)
142 		goto outofmem;
143 	_DIAGASSERT(__type_fit(u_int, su->su_iosz));
144 	xdrmem_create(&(su->su_xdrs), rpc_buffer(xprt), (u_int)su->su_iosz,
145 		XDR_DECODE);
146 	su->su_cache = NULL;
147 	xprt->xp_fd = fd;
148 	xprt->xp_p2 = (caddr_t)(void *)su;
149 	xprt->xp_verf.oa_base = su->su_verfbody;
150 	svc_dg_ops(xprt);
151 	xprt->xp_rtaddr.maxlen = sizeof (struct sockaddr_storage);
152 
153 	slen = sizeof ss;
154 	if (getsockname(fd, (struct sockaddr *)(void *)&ss, &slen) < 0)
155 		goto freedata;
156 	xprt->xp_ltaddr.buf = mem_alloc(sizeof (struct sockaddr_storage));
157 	xprt->xp_ltaddr.maxlen = sizeof (struct sockaddr_storage);
158 	xprt->xp_ltaddr.len = slen;
159 	memcpy(xprt->xp_ltaddr.buf, &ss, slen);
160 
161 	if (!xprt_register(xprt))
162 		goto freedata;
163 	return (xprt);
164 
165 outofmem:
166 	(void) warnx(svc_dg_str, __no_mem_str);
167 freedata:
168 	if (xprt) {
169 		if (su)
170 			(void) mem_free(su, sizeof (*su));
171 		(void) mem_free(xprt, sizeof (SVCXPRT));
172 	}
173 	return (NULL);
174 }
175 
176 /*ARGSUSED*/
177 static enum xprt_stat
svc_dg_stat(SVCXPRT * xprt)178 svc_dg_stat(SVCXPRT *xprt)
179 {
180 	return (XPRT_IDLE);
181 }
182 
183 static bool_t
svc_dg_recv(SVCXPRT * xprt,struct rpc_msg * msg)184 svc_dg_recv(SVCXPRT *xprt, struct rpc_msg *msg)
185 {
186 	struct svc_dg_data *su;
187 	XDR *xdrs;
188 	char *reply;
189 	struct sockaddr_storage ss;
190 	socklen_t alen;
191 	size_t replylen;
192 	ssize_t rlen;
193 
194 	_DIAGASSERT(xprt != NULL);
195 	_DIAGASSERT(msg != NULL);
196 
197 	su = su_data(xprt);
198 	xdrs = &(su->su_xdrs);
199 
200 again:
201 	alen = sizeof (struct sockaddr_storage);
202 	rlen = recvfrom(xprt->xp_fd, rpc_buffer(xprt), su->su_iosz, 0,
203 	    (struct sockaddr *)(void *)&ss, &alen);
204 	if (rlen == -1 && errno == EINTR)
205 		goto again;
206 	if (rlen == -1 || (rlen < (ssize_t)(4 * sizeof (u_int32_t))))
207 		return (FALSE);
208 	if (xprt->xp_rtaddr.len < alen) {
209 		if (xprt->xp_rtaddr.len != 0)
210 			mem_free(xprt->xp_rtaddr.buf, xprt->xp_rtaddr.len);
211 		xprt->xp_rtaddr.buf = mem_alloc(alen);
212 		xprt->xp_rtaddr.len = alen;
213 	}
214 	memcpy(xprt->xp_rtaddr.buf, &ss, alen);
215 #ifdef PORTMAP
216 	if (ss.ss_family == AF_INET) {
217 		xprt->xp_raddr = *(struct sockaddr_in *)xprt->xp_rtaddr.buf;
218 		xprt->xp_addrlen = sizeof (struct sockaddr_in);
219 	}
220 #endif
221 	xdrs->x_op = XDR_DECODE;
222 	XDR_SETPOS(xdrs, 0);
223 	if (! xdr_callmsg(xdrs, msg)) {
224 		return (FALSE);
225 	}
226 	su->su_xid = msg->rm_xid;
227 	if (su->su_cache != NULL) {
228 		if (cache_get(xprt, msg, &reply, &replylen)) {
229 			(void)sendto(xprt->xp_fd, reply, replylen, 0,
230 			    (struct sockaddr *)(void *)&ss, alen);
231 			return (FALSE);
232 		}
233 	}
234 	return (TRUE);
235 }
236 
237 static bool_t
svc_dg_reply(SVCXPRT * xprt,struct rpc_msg * msg)238 svc_dg_reply(SVCXPRT *xprt, struct rpc_msg *msg)
239 {
240 	struct svc_dg_data *su;
241 	XDR *xdrs;
242 	bool_t stat = FALSE;
243 	size_t slen;
244 
245 	_DIAGASSERT(xprt != NULL);
246 	_DIAGASSERT(msg != NULL);
247 
248 	su = su_data(xprt);
249 	xdrs = &(su->su_xdrs);
250 
251 	xdrs->x_op = XDR_ENCODE;
252 	XDR_SETPOS(xdrs, 0);
253 	msg->rm_xid = su->su_xid;
254 	if (xdr_replymsg(xdrs, msg)) {
255 		slen = XDR_GETPOS(xdrs);
256 		if (sendto(xprt->xp_fd, rpc_buffer(xprt), slen, 0,
257 		    (struct sockaddr *)xprt->xp_rtaddr.buf,
258 		    (socklen_t)xprt->xp_rtaddr.len) == (ssize_t) slen) {
259 			stat = TRUE;
260 			if (su->su_cache)
261 				cache_set(xprt, slen);
262 		}
263 	}
264 	return (stat);
265 }
266 
267 static bool_t
svc_dg_getargs(SVCXPRT * xprt,xdrproc_t xdr_args,caddr_t args_ptr)268 svc_dg_getargs(SVCXPRT *xprt, xdrproc_t xdr_args, caddr_t args_ptr)
269 {
270 	return (*xdr_args)(&(su_data(xprt)->su_xdrs), args_ptr);
271 }
272 
273 static bool_t
svc_dg_freeargs(SVCXPRT * xprt,xdrproc_t xdr_args,caddr_t args_ptr)274 svc_dg_freeargs(SVCXPRT *xprt, xdrproc_t xdr_args, caddr_t args_ptr)
275 {
276 	XDR *xdrs;
277 
278 	_DIAGASSERT(xprt != NULL);
279 
280 	xdrs = &(su_data(xprt)->su_xdrs);
281 	xdrs->x_op = XDR_FREE;
282 	return (*xdr_args)(xdrs, args_ptr);
283 }
284 
285 static void
svc_dg_destroy(SVCXPRT * xprt)286 svc_dg_destroy(SVCXPRT *xprt)
287 {
288 	struct svc_dg_data *su;
289 
290 	_DIAGASSERT(xprt != NULL);
291 
292 	su = su_data(xprt);
293 
294 	xprt_unregister(xprt);
295 	if (xprt->xp_fd != -1)
296 		(void)close(xprt->xp_fd);
297 	XDR_DESTROY(&(su->su_xdrs));
298 	(void) mem_free(rpc_buffer(xprt), su->su_iosz);
299 	(void) mem_free(su, sizeof (*su));
300 	if (xprt->xp_rtaddr.buf)
301 		(void) mem_free(xprt->xp_rtaddr.buf, xprt->xp_rtaddr.maxlen);
302 	if (xprt->xp_ltaddr.buf)
303 		(void) mem_free(xprt->xp_ltaddr.buf, xprt->xp_ltaddr.maxlen);
304 	if (xprt->xp_tp)
305 		(void) free(xprt->xp_tp);
306 	(void) mem_free(xprt, sizeof (SVCXPRT));
307 }
308 
309 static bool_t
310 /*ARGSUSED*/
svc_dg_control(SVCXPRT * xprt,const u_int rq,void * in)311 svc_dg_control(SVCXPRT *xprt, const u_int rq, void *in)
312 {
313 	return (FALSE);
314 }
315 
316 static void
svc_dg_ops(SVCXPRT * xprt)317 svc_dg_ops(SVCXPRT *xprt)
318 {
319 	static struct xp_ops ops;
320 	static struct xp_ops2 ops2;
321 #ifdef _REENTRANT
322 	extern mutex_t ops_lock;
323 #endif
324 
325 	_DIAGASSERT(xprt != NULL);
326 
327 /* VARIABLES PROTECTED BY ops_lock: ops */
328 
329 	mutex_lock(&ops_lock);
330 	if (ops.xp_recv == NULL) {
331 		ops.xp_recv = svc_dg_recv;
332 		ops.xp_stat = svc_dg_stat;
333 		ops.xp_getargs = svc_dg_getargs;
334 		ops.xp_reply = svc_dg_reply;
335 		ops.xp_freeargs = svc_dg_freeargs;
336 		ops.xp_destroy = svc_dg_destroy;
337 		ops2.xp_control = svc_dg_control;
338 	}
339 	xprt->xp_ops = &ops;
340 	xprt->xp_ops2 = &ops2;
341 	mutex_unlock(&ops_lock);
342 }
343 
344 /*  The CACHING COMPONENT */
345 
346 /*
347  * Could have been a separate file, but some part of it depends upon the
348  * private structure of the client handle.
349  *
350  * Fifo cache for cl server
351  * Copies pointers to reply buffers into fifo cache
352  * Buffers are sent again if retransmissions are detected.
353  */
354 
355 #define	SPARSENESS 4	/* 75% sparse */
356 
357 #define	ALLOC(type, size)	\
358 	mem_alloc((sizeof (type) * (size)))
359 
360 #define	MEMZERO(addr, type, size)	 \
361 	(void) memset((void *) (addr), 0, sizeof (type) * (int) (size))
362 
363 #define	FREE(addr, type, size)	\
364 	mem_free((addr), (sizeof (type) * (size)))
365 
366 /*
367  * An entry in the cache
368  */
369 typedef struct cache_node *cache_ptr;
370 struct cache_node {
371 	/*
372 	 * Index into cache is xid, proc, vers, prog and address
373 	 */
374 	u_int32_t cache_xid;
375 	rpcproc_t cache_proc;
376 	rpcvers_t cache_vers;
377 	rpcprog_t cache_prog;
378 	struct netbuf cache_addr;
379 	/*
380 	 * The cached reply and length
381 	 */
382 	char *cache_reply;
383 	size_t cache_replylen;
384 	/*
385 	 * Next node on the list, if there is a collision
386 	 */
387 	cache_ptr cache_next;
388 };
389 
390 /*
391  * The entire cache
392  */
393 struct cl_cache {
394 	u_int uc_size;		/* size of cache */
395 	cache_ptr *uc_entries;	/* hash table of entries in cache */
396 	cache_ptr *uc_fifo;	/* fifo list of entries in cache */
397 	u_int uc_nextvictim;	/* points to next victim in fifo list */
398 	rpcprog_t uc_prog;	/* saved program number */
399 	rpcvers_t uc_vers;	/* saved version number */
400 	rpcproc_t uc_proc;	/* saved procedure number */
401 };
402 
403 
404 /*
405  * the hashing function
406  */
407 #define	CACHE_LOC(transp, xid)	\
408 	(xid % (SPARSENESS * ((struct cl_cache *) \
409 		su_data(transp)->su_cache)->uc_size))
410 
411 #ifdef _REENTRANT
412 extern mutex_t	dupreq_lock;
413 #endif
414 
415 /*
416  * Enable use of the cache. Returns 1 on success, 0 on failure.
417  * Note: there is no disable.
418  */
419 static const char cache_enable_str[] = "svc_enablecache: %s %s";
420 static const char alloc_err[] = "could not allocate cache ";
421 static const char enable_err[] = "cache already enabled";
422 
423 int
svc_dg_enablecache(SVCXPRT * transp,u_int size)424 svc_dg_enablecache(SVCXPRT *transp, u_int size)
425 {
426 	struct svc_dg_data *su;
427 	struct cl_cache *uc;
428 
429 	_DIAGASSERT(transp != NULL);
430 
431 	su = su_data(transp);
432 
433 	mutex_lock(&dupreq_lock);
434 	if (su->su_cache != NULL) {
435 		(void) warnx(cache_enable_str, enable_err, " ");
436 		mutex_unlock(&dupreq_lock);
437 		return (0);
438 	}
439 	uc = ALLOC(struct cl_cache, 1);
440 	if (uc == NULL) {
441 		warnx(cache_enable_str, alloc_err, " ");
442 		mutex_unlock(&dupreq_lock);
443 		return (0);
444 	}
445 	uc->uc_size = size;
446 	uc->uc_nextvictim = 0;
447 	uc->uc_entries = ALLOC(cache_ptr, size * SPARSENESS);
448 	if (uc->uc_entries == NULL) {
449 		warnx(cache_enable_str, alloc_err, "data");
450 		FREE(uc, struct cl_cache, 1);
451 		mutex_unlock(&dupreq_lock);
452 		return (0);
453 	}
454 	MEMZERO(uc->uc_entries, cache_ptr, size * SPARSENESS);
455 	uc->uc_fifo = ALLOC(cache_ptr, size);
456 	if (uc->uc_fifo == NULL) {
457 		warnx(cache_enable_str, alloc_err, "fifo");
458 		FREE(uc->uc_entries, cache_ptr, size * SPARSENESS);
459 		FREE(uc, struct cl_cache, 1);
460 		mutex_unlock(&dupreq_lock);
461 		return (0);
462 	}
463 	MEMZERO(uc->uc_fifo, cache_ptr, size);
464 	su->su_cache = (char *)(void *)uc;
465 	mutex_unlock(&dupreq_lock);
466 	return (1);
467 }
468 
469 /*
470  * Set an entry in the cache.  It assumes that the uc entry is set from
471  * the earlier call to cache_get() for the same procedure.  This will always
472  * happen because cache_get() is calle by svc_dg_recv and cache_set() is called
473  * by svc_dg_reply().  All this hoopla because the right RPC parameters are
474  * not available at svc_dg_reply time.
475  */
476 
477 static const char cache_set_str[] = "cache_set: %s";
478 static const char cache_set_err1[] = "victim not found";
479 static const char cache_set_err2[] = "victim alloc failed";
480 static const char cache_set_err3[] = "could not allocate new rpc buffer";
481 
482 static void
cache_set(SVCXPRT * xprt,size_t replylen)483 cache_set(SVCXPRT *xprt, size_t replylen)
484 {
485 	cache_ptr victim;
486 	cache_ptr *vicp;
487 	struct svc_dg_data *su;
488 	struct cl_cache *uc;
489 	u_int loc;
490 	char *newbuf;
491 #ifdef RPC_CACHE_DEBUG
492 	struct netconfig *nconf;
493 	char *uaddr;
494 #endif
495 
496 	_DIAGASSERT(xprt != NULL);
497 
498 	su = su_data(xprt);
499 	uc = (struct cl_cache *) su->su_cache;
500 
501 	mutex_lock(&dupreq_lock);
502 	/*
503 	 * Find space for the new entry, either by
504 	 * reusing an old entry, or by mallocing a new one
505 	 */
506 	victim = uc->uc_fifo[uc->uc_nextvictim];
507 	if (victim != NULL) {
508 		loc = CACHE_LOC(xprt, victim->cache_xid);
509 		for (vicp = &uc->uc_entries[loc];
510 			*vicp != NULL && *vicp != victim;
511 			vicp = &(*vicp)->cache_next)
512 			;
513 		if (*vicp == NULL) {
514 			warnx(cache_set_str, cache_set_err1);
515 			mutex_unlock(&dupreq_lock);
516 			return;
517 		}
518 		*vicp = victim->cache_next;	/* remove from cache */
519 		newbuf = victim->cache_reply;
520 	} else {
521 		victim = ALLOC(struct cache_node, 1);
522 		if (victim == NULL) {
523 			warnx(cache_set_str, cache_set_err2);
524 			mutex_unlock(&dupreq_lock);
525 			return;
526 		}
527 		newbuf = mem_alloc(su->su_iosz);
528 		if (newbuf == NULL) {
529 			warnx(cache_set_str, cache_set_err3);
530 			FREE(victim, struct cache_node, 1);
531 			mutex_unlock(&dupreq_lock);
532 			return;
533 		}
534 	}
535 
536 	/*
537 	 * Store it away
538 	 */
539 #ifdef RPC_CACHE_DEBUG
540 	if (nconf = getnetconfigent(xprt->xp_netid)) {
541 		uaddr = taddr2uaddr(nconf, &xprt->xp_rtaddr);
542 		freenetconfigent(nconf);
543 		printf(
544 	"cache set for xid= %x prog=%d vers=%d proc=%d for rmtaddr=%s\n",
545 			su->su_xid, uc->uc_prog, uc->uc_vers,
546 			uc->uc_proc, uaddr);
547 		free(uaddr);
548 	}
549 #endif
550 	victim->cache_replylen = replylen;
551 	victim->cache_reply = rpc_buffer(xprt);
552 	rpc_buffer(xprt) = newbuf;
553 	_DIAGASSERT(__type_fit(u_int, su->su_iosz));
554 	xdrmem_create(&(su->su_xdrs), rpc_buffer(xprt), (u_int)su->su_iosz,
555 	    XDR_ENCODE);
556 	victim->cache_xid = su->su_xid;
557 	victim->cache_proc = uc->uc_proc;
558 	victim->cache_vers = uc->uc_vers;
559 	victim->cache_prog = uc->uc_prog;
560 	victim->cache_addr = xprt->xp_rtaddr;
561 	victim->cache_addr.buf = ALLOC(char, xprt->xp_rtaddr.len);
562 	(void) memcpy(victim->cache_addr.buf, xprt->xp_rtaddr.buf,
563 	    (size_t)xprt->xp_rtaddr.len);
564 	loc = CACHE_LOC(xprt, victim->cache_xid);
565 	victim->cache_next = uc->uc_entries[loc];
566 	uc->uc_entries[loc] = victim;
567 	uc->uc_fifo[uc->uc_nextvictim++] = victim;
568 	uc->uc_nextvictim %= uc->uc_size;
569 	mutex_unlock(&dupreq_lock);
570 }
571 
572 /*
573  * Try to get an entry from the cache
574  * return 1 if found, 0 if not found and set the stage for cache_set()
575  */
576 static int
cache_get(SVCXPRT * xprt,struct rpc_msg * msg,char ** replyp,size_t * replylenp)577 cache_get(SVCXPRT *xprt, struct rpc_msg *msg, char **replyp, size_t *replylenp)
578 {
579 	u_int loc;
580 	cache_ptr ent;
581 	struct svc_dg_data *su;
582 	struct cl_cache *uc;
583 #ifdef RPC_CACHE_DEBUG
584 	struct netconfig *nconf;
585 	char *uaddr;
586 #endif
587 
588 	_DIAGASSERT(xprt != NULL);
589 	_DIAGASSERT(msg != NULL);
590 	_DIAGASSERT(replyp != NULL);
591 	_DIAGASSERT(replylenp != NULL);
592 
593 	su = su_data(xprt);
594 	uc = (struct cl_cache *) su->su_cache;
595 
596 	mutex_lock(&dupreq_lock);
597 	loc = CACHE_LOC(xprt, su->su_xid);
598 	for (ent = uc->uc_entries[loc]; ent != NULL; ent = ent->cache_next) {
599 		if (ent->cache_xid == su->su_xid &&
600 			ent->cache_proc == msg->rm_call.cb_proc &&
601 			ent->cache_vers == msg->rm_call.cb_vers &&
602 			ent->cache_prog == msg->rm_call.cb_prog &&
603 			ent->cache_addr.len == xprt->xp_rtaddr.len &&
604 			(memcmp(ent->cache_addr.buf, xprt->xp_rtaddr.buf,
605 				xprt->xp_rtaddr.len) == 0)) {
606 #ifdef RPC_CACHE_DEBUG
607 			if (nconf = getnetconfigent(xprt->xp_netid)) {
608 				uaddr = taddr2uaddr(nconf, &xprt->xp_rtaddr);
609 				freenetconfigent(nconf);
610 				printf(
611 	"cache entry found for xid=%x prog=%d vers=%d proc=%d for rmtaddr=%s\n",
612 					su->su_xid, msg->rm_call.cb_prog,
613 					msg->rm_call.cb_vers,
614 					msg->rm_call.cb_proc, uaddr);
615 				free(uaddr);
616 			}
617 #endif
618 			*replyp = ent->cache_reply;
619 			*replylenp = ent->cache_replylen;
620 			mutex_unlock(&dupreq_lock);
621 			return (1);
622 		}
623 	}
624 	/*
625 	 * Failed to find entry
626 	 * Remember a few things so we can do a set later
627 	 */
628 	uc->uc_proc = msg->rm_call.cb_proc;
629 	uc->uc_vers = msg->rm_call.cb_vers;
630 	uc->uc_prog = msg->rm_call.cb_prog;
631 	mutex_unlock(&dupreq_lock);
632 	return (0);
633 }
634