xref: /dragonfly/lib/libc/rpc/svc_dg.c (revision 2038fb68)
1 /*
2  * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
3  * unrestricted use provided that this legend is included on all tape
4  * media and as a part of the software program in whole or part.  Users
5  * may copy or modify Sun RPC without charge, but are not authorized
6  * to license or distribute it to anyone else except as part of a product or
7  * program developed by the user.
8  *
9  * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
10  * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
11  * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
12  *
13  * Sun RPC is provided with no support and without any obligation on the
14  * part of Sun Microsystems, Inc. to assist in its use, correction,
15  * modification or enhancement.
16  *
17  * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
18  * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
19  * OR ANY PART THEREOF.
20  *
21  * In no event will Sun Microsystems, Inc. be liable for any lost revenue
22  * or profits or other special, indirect and consequential damages, even if
23  * Sun has been advised of the possibility of such damages.
24  *
25  * Sun Microsystems, Inc.
26  * 2550 Garcia Avenue
27  * Mountain View, California  94043
28  *
29  * @(#)svc_dg.c	1.17	94/04/24 SMI
30  * $NetBSD: svc_dg.c,v 1.4 2000/07/06 03:10:35 christos Exp $
31  * $FreeBSD: src/lib/libc/rpc/svc_dg.c,v 1.8 2006/02/27 22:10:59 deischen Exp $
32  */
33 
34 /*
35  * Copyright (c) 1986-1991 by Sun Microsystems Inc.
36  */
37 
38 /*
39  * svc_dg.c, Server side for connectionless RPC.
40  *
41  * Does some caching in the hopes of achieving execute-at-most-once semantics.
42  */
43 
44 #include "namespace.h"
45 #include "reentrant.h"
46 #include <sys/types.h>
47 #include <sys/socket.h>
48 #include <rpc/rpc.h>
49 #include <rpc/svc_dg.h>
50 #include <errno.h>
51 #include <unistd.h>
52 #include <stdio.h>
53 #include <stdlib.h>
54 #include <string.h>
55 #ifdef RPC_CACHE_DEBUG
56 #include <netconfig.h>
57 #include <netdir.h>
58 #endif
59 #include <err.h>
60 #include "un-namespace.h"
61 
62 #include "rpc_com.h"
63 #include "mt_misc.h"
64 
65 #define	su_data(xprt)	((struct svc_dg_data *)(xprt->xp_p2))
66 #define	rpc_buffer(xprt) ((xprt)->xp_p1)
67 
68 #ifndef MAX
69 #define	MAX(a, b)	(((a) > (b)) ? (a) : (b))
70 #endif
71 
72 static void svc_dg_ops(SVCXPRT *);
73 static enum xprt_stat svc_dg_stat(SVCXPRT *);
74 static bool_t svc_dg_recv(SVCXPRT *, struct rpc_msg *);
75 static bool_t svc_dg_reply(SVCXPRT *, struct rpc_msg *);
76 static bool_t svc_dg_getargs(SVCXPRT *, xdrproc_t, void *);
77 static bool_t svc_dg_freeargs(SVCXPRT *, xdrproc_t, void *);
78 static void svc_dg_destroy(SVCXPRT *);
79 static bool_t svc_dg_control(SVCXPRT *, const u_int, void *);
80 static int cache_get(SVCXPRT *, struct rpc_msg *, char **, size_t *);
81 static void cache_set(SVCXPRT *, size_t);
82 int svc_dg_enablecache(SVCXPRT *, u_int);
83 
84 /*
85  * Usage:
86  *	xprt = svc_dg_create(sock, sendsize, recvsize);
87  * Does other connectionless specific initializations.
88  * Once *xprt is initialized, it is registered.
89  * see (svc.h, xprt_register). If recvsize or sendsize are 0 suitable
90  * system defaults are chosen.
91  * The routines returns NULL if a problem occurred.
92  */
93 static const char svc_dg_str[] = "svc_dg_create: %s";
94 static const char svc_dg_err1[] = "could not get transport information";
95 static const char svc_dg_err2[] = " transport does not support data transfer";
96 static const char __no_mem_str[] = "out of memory";
97 
98 SVCXPRT *
99 svc_dg_create(int fd, u_int sendsize, u_int recvsize)
100 {
101 	SVCXPRT *xprt;
102 	struct svc_dg_data *su = NULL;
103 	struct __rpc_sockinfo si;
104 	struct sockaddr_storage ss;
105 	socklen_t slen;
106 
107 	if (!__rpc_fd2sockinfo(fd, &si)) {
108 		warnx(svc_dg_str, svc_dg_err1);
109 		return (NULL);
110 	}
111 	/*
112 	 * Find the receive and the send size
113 	 */
114 	sendsize = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsize);
115 	recvsize = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsize);
116 	if ((sendsize == 0) || (recvsize == 0)) {
117 		warnx(svc_dg_str, svc_dg_err2);
118 		return (NULL);
119 	}
120 
121 	xprt = mem_alloc(sizeof (SVCXPRT));
122 	if (xprt == NULL)
123 		goto freedata;
124 	memset(xprt, 0, sizeof (SVCXPRT));
125 
126 	su = mem_alloc(sizeof (*su));
127 	if (su == NULL)
128 		goto freedata;
129 	su->su_iosz = ((MAX(sendsize, recvsize) + 3) / 4) * 4;
130 	if ((rpc_buffer(xprt) = mem_alloc(su->su_iosz)) == NULL)
131 		goto freedata;
132 	xdrmem_create(&(su->su_xdrs), rpc_buffer(xprt), su->su_iosz,
133 		XDR_DECODE);
134 	su->su_cache = NULL;
135 	xprt->xp_fd = fd;
136 	xprt->xp_p2 = su;
137 	xprt->xp_verf.oa_base = su->su_verfbody;
138 	svc_dg_ops(xprt);
139 	xprt->xp_rtaddr.maxlen = sizeof (struct sockaddr_storage);
140 
141 	slen = sizeof ss;
142 	if (_getsockname(fd, (struct sockaddr *)(void *)&ss, &slen) < 0)
143 		goto freedata;
144 	xprt->xp_ltaddr.buf = mem_alloc(sizeof (struct sockaddr_storage));
145 	xprt->xp_ltaddr.maxlen = sizeof (struct sockaddr_storage);
146 	xprt->xp_ltaddr.len = slen;
147 	memcpy(xprt->xp_ltaddr.buf, &ss, slen);
148 
149 	xprt_register(xprt);
150 	return (xprt);
151 freedata:
152 	warnx(svc_dg_str, __no_mem_str);
153 	if (xprt) {
154 		if (su)
155 			mem_free(su, sizeof (*su));
156 		mem_free(xprt, sizeof (SVCXPRT));
157 	}
158 	return (NULL);
159 }
160 
161 /*ARGSUSED*/
162 static enum xprt_stat
163 svc_dg_stat(SVCXPRT *xprt)
164 {
165 	return (XPRT_IDLE);
166 }
167 
168 static bool_t
169 svc_dg_recv(SVCXPRT *xprt, struct rpc_msg *msg)
170 {
171 	struct svc_dg_data *su = su_data(xprt);
172 	XDR *xdrs = &(su->su_xdrs);
173 	char *reply;
174 	struct sockaddr_storage ss;
175 	socklen_t alen;
176 	size_t replylen;
177 	ssize_t rlen;
178 
179 again:
180 	alen = sizeof (struct sockaddr_storage);
181 	rlen = _recvfrom(xprt->xp_fd, rpc_buffer(xprt), su->su_iosz, 0,
182 	    (struct sockaddr *)(void *)&ss, &alen);
183 	if (rlen == -1 && errno == EINTR)
184 		goto again;
185 	if (rlen == -1 || (rlen < (ssize_t)(4 * sizeof (u_int32_t))))
186 		return (FALSE);
187 	if (xprt->xp_rtaddr.len < alen) {
188 		if (xprt->xp_rtaddr.len != 0)
189 			mem_free(xprt->xp_rtaddr.buf, xprt->xp_rtaddr.len);
190 		xprt->xp_rtaddr.buf = mem_alloc(alen);
191 		xprt->xp_rtaddr.len = alen;
192 	}
193 	memcpy(xprt->xp_rtaddr.buf, &ss, alen);
194 #ifdef PORTMAP
195 	if (ss.ss_family == AF_INET) {
196 		xprt->xp_raddr = *(struct sockaddr_in *)xprt->xp_rtaddr.buf;
197 		xprt->xp_addrlen = sizeof (struct sockaddr_in);
198 	}
199 #endif				/* PORTMAP */
200 	xdrs->x_op = XDR_DECODE;
201 	XDR_SETPOS(xdrs, 0);
202 	if (! xdr_callmsg(xdrs, msg)) {
203 		return (FALSE);
204 	}
205 	su->su_xid = msg->rm_xid;
206 	if (su->su_cache != NULL) {
207 		if (cache_get(xprt, msg, &reply, &replylen)) {
208 			_sendto(xprt->xp_fd, reply, replylen, 0,
209 			    (struct sockaddr *)(void *)&ss, alen);
210 			return (FALSE);
211 		}
212 	}
213 	return (TRUE);
214 }
215 
216 static bool_t
217 svc_dg_reply(SVCXPRT *xprt, struct rpc_msg *msg)
218 {
219 	struct svc_dg_data *su = su_data(xprt);
220 	XDR *xdrs = &(su->su_xdrs);
221 	bool_t stat = FALSE;
222 	size_t slen;
223 
224 	xdrs->x_op = XDR_ENCODE;
225 	XDR_SETPOS(xdrs, 0);
226 	msg->rm_xid = su->su_xid;
227 	if (xdr_replymsg(xdrs, msg)) {
228 		slen = XDR_GETPOS(xdrs);
229 		if (_sendto(xprt->xp_fd, rpc_buffer(xprt), slen, 0,
230 		    (struct sockaddr *)xprt->xp_rtaddr.buf,
231 		    (socklen_t)xprt->xp_rtaddr.len) == (ssize_t) slen) {
232 			stat = TRUE;
233 			if (su->su_cache)
234 				cache_set(xprt, slen);
235 		}
236 	}
237 	return (stat);
238 }
239 
240 static bool_t
241 svc_dg_getargs(SVCXPRT *xprt, xdrproc_t xdr_args, void *args_ptr)
242 {
243 	return (*xdr_args)(&(su_data(xprt)->su_xdrs), args_ptr);
244 }
245 
246 static bool_t
247 svc_dg_freeargs(SVCXPRT *xprt, xdrproc_t xdr_args, void *args_ptr)
248 {
249 	XDR *xdrs = &(su_data(xprt)->su_xdrs);
250 
251 	xdrs->x_op = XDR_FREE;
252 	return (*xdr_args)(xdrs, args_ptr);
253 }
254 
255 static void
256 svc_dg_destroy(SVCXPRT *xprt)
257 {
258 	struct svc_dg_data *su = su_data(xprt);
259 
260 	xprt_unregister(xprt);
261 	if (xprt->xp_fd != -1)
262 		_close(xprt->xp_fd);
263 	XDR_DESTROY(&(su->su_xdrs));
264 	mem_free(rpc_buffer(xprt), su->su_iosz);
265 	mem_free(su, sizeof (*su));
266 	if (xprt->xp_rtaddr.buf)
267 		mem_free(xprt->xp_rtaddr.buf, xprt->xp_rtaddr.maxlen);
268 	if (xprt->xp_ltaddr.buf)
269 		mem_free(xprt->xp_ltaddr.buf, xprt->xp_ltaddr.maxlen);
270 	if (xprt->xp_tp)
271 		free(xprt->xp_tp);
272 	mem_free(xprt, sizeof (SVCXPRT));
273 }
274 
275 static bool_t
276 /*ARGSUSED*/
277 svc_dg_control(SVCXPRT *xprt, const u_int rq, void *in)
278 {
279 	return (FALSE);
280 }
281 
282 static void
283 svc_dg_ops(SVCXPRT *xprt)
284 {
285 	static struct xp_ops ops;
286 	static struct xp_ops2 ops2;
287 
288 /* VARIABLES PROTECTED BY ops_lock: ops */
289 
290 	mutex_lock(&ops_lock);
291 	if (ops.xp_recv == NULL) {
292 		ops.xp_recv = svc_dg_recv;
293 		ops.xp_stat = svc_dg_stat;
294 		ops.xp_getargs = svc_dg_getargs;
295 		ops.xp_reply = svc_dg_reply;
296 		ops.xp_freeargs = svc_dg_freeargs;
297 		ops.xp_destroy = svc_dg_destroy;
298 		ops2.xp_control = svc_dg_control;
299 	}
300 	xprt->xp_ops = &ops;
301 	xprt->xp_ops2 = &ops2;
302 	mutex_unlock(&ops_lock);
303 }
304 
305 /*  The CACHING COMPONENT */
306 
307 /*
308  * Could have been a separate file, but some part of it depends upon the
309  * private structure of the client handle.
310  *
311  * Fifo cache for cl server
312  * Copies pointers to reply buffers into fifo cache
313  * Buffers are sent again if retransmissions are detected.
314  */
315 
316 #define	SPARSENESS 4	/* 75% sparse */
317 
318 #define	ALLOC(type, size)	\
319 	(type *) mem_alloc((sizeof (type) * (size)))
320 
321 #define	MEMZERO(addr, type, size)	 \
322 	memset((void *) (addr), 0, sizeof (type) * (int) (size))
323 
324 #define	FREE(addr, type, size)	\
325 	mem_free((addr), (sizeof (type) * (size)))
326 
327 /*
328  * An entry in the cache
329  */
330 typedef struct cache_node *cache_ptr;
331 struct cache_node {
332 	/*
333 	 * Index into cache is xid, proc, vers, prog and address
334 	 */
335 	u_int32_t cache_xid;
336 	rpcproc_t cache_proc;
337 	rpcvers_t cache_vers;
338 	rpcprog_t cache_prog;
339 	struct netbuf cache_addr;
340 	/*
341 	 * The cached reply and length
342 	 */
343 	char *cache_reply;
344 	size_t cache_replylen;
345 	/*
346 	 * Next node on the list, if there is a collision
347 	 */
348 	cache_ptr cache_next;
349 };
350 
351 /*
352  * The entire cache
353  */
354 struct cl_cache {
355 	u_int uc_size;		/* size of cache */
356 	cache_ptr *uc_entries;	/* hash table of entries in cache */
357 	cache_ptr *uc_fifo;	/* fifo list of entries in cache */
358 	u_int uc_nextvictim;	/* points to next victim in fifo list */
359 	rpcprog_t uc_prog;	/* saved program number */
360 	rpcvers_t uc_vers;	/* saved version number */
361 	rpcproc_t uc_proc;	/* saved procedure number */
362 };
363 
364 
365 /*
366  * the hashing function
367  */
368 #define	CACHE_LOC(transp, xid)	\
369 	(xid % (SPARSENESS * ((struct cl_cache *) \
370 		su_data(transp)->su_cache)->uc_size))
371 
372 /*
373  * Enable use of the cache. Returns 1 on success, 0 on failure.
374  * Note: there is no disable.
375  */
376 static const char cache_enable_str[] = "svc_enablecache: %s %s";
377 static const char alloc_err[] = "could not allocate cache ";
378 static const char enable_err[] = "cache already enabled";
379 
380 int
381 svc_dg_enablecache(SVCXPRT *transp, u_int size)
382 {
383 	struct svc_dg_data *su = su_data(transp);
384 	struct cl_cache *uc;
385 
386 	mutex_lock(&dupreq_lock);
387 	if (su->su_cache != NULL) {
388 		warnx(cache_enable_str, enable_err, " ");
389 		mutex_unlock(&dupreq_lock);
390 		return (0);
391 	}
392 	uc = ALLOC(struct cl_cache, 1);
393 	if (uc == NULL) {
394 		warnx(cache_enable_str, alloc_err, " ");
395 		mutex_unlock(&dupreq_lock);
396 		return (0);
397 	}
398 	uc->uc_size = size;
399 	uc->uc_nextvictim = 0;
400 	uc->uc_entries = ALLOC(cache_ptr, size * SPARSENESS);
401 	if (uc->uc_entries == NULL) {
402 		warnx(cache_enable_str, alloc_err, "data");
403 		FREE(uc, struct cl_cache, 1);
404 		mutex_unlock(&dupreq_lock);
405 		return (0);
406 	}
407 	MEMZERO(uc->uc_entries, cache_ptr, size * SPARSENESS);
408 	uc->uc_fifo = ALLOC(cache_ptr, size);
409 	if (uc->uc_fifo == NULL) {
410 		warnx(cache_enable_str, alloc_err, "fifo");
411 		FREE(uc->uc_entries, cache_ptr, size * SPARSENESS);
412 		FREE(uc, struct cl_cache, 1);
413 		mutex_unlock(&dupreq_lock);
414 		return (0);
415 	}
416 	MEMZERO(uc->uc_fifo, cache_ptr, size);
417 	su->su_cache = (char *)(void *)uc;
418 	mutex_unlock(&dupreq_lock);
419 	return (1);
420 }
421 
422 /*
423  * Set an entry in the cache.  It assumes that the uc entry is set from
424  * the earlier call to cache_get() for the same procedure.  This will always
425  * happen because cache_get() is calle by svc_dg_recv and cache_set() is called
426  * by svc_dg_reply().  All this hoopla because the right RPC parameters are
427  * not available at svc_dg_reply time.
428  */
429 
430 static const char cache_set_str[] = "cache_set: %s";
431 static const char cache_set_err1[] = "victim not found";
432 static const char cache_set_err2[] = "victim alloc failed";
433 static const char cache_set_err3[] = "could not allocate new rpc buffer";
434 
435 static void
436 cache_set(SVCXPRT *xprt, size_t replylen)
437 {
438 	cache_ptr victim;
439 	cache_ptr *vicp;
440 	struct svc_dg_data *su = su_data(xprt);
441 	struct cl_cache *uc = (struct cl_cache *) su->su_cache;
442 	u_int loc;
443 	char *newbuf;
444 #ifdef RPC_CACHE_DEBUG
445 	struct netconfig *nconf;
446 	char *uaddr;
447 #endif
448 
449 	mutex_lock(&dupreq_lock);
450 	/*
451 	 * Find space for the new entry, either by
452 	 * reusing an old entry, or by mallocing a new one
453 	 */
454 	victim = uc->uc_fifo[uc->uc_nextvictim];
455 	if (victim != NULL) {
456 		loc = CACHE_LOC(xprt, victim->cache_xid);
457 		for (vicp = &uc->uc_entries[loc];
458 			*vicp != NULL && *vicp != victim;
459 			vicp = &(*vicp)->cache_next)
460 			;
461 		if (*vicp == NULL) {
462 			warnx(cache_set_str, cache_set_err1);
463 			mutex_unlock(&dupreq_lock);
464 			return;
465 		}
466 		*vicp = victim->cache_next;	/* remove from cache */
467 		newbuf = victim->cache_reply;
468 	} else {
469 		victim = ALLOC(struct cache_node, 1);
470 		if (victim == NULL) {
471 			warnx(cache_set_str, cache_set_err2);
472 			mutex_unlock(&dupreq_lock);
473 			return;
474 		}
475 		newbuf = mem_alloc(su->su_iosz);
476 		if (newbuf == NULL) {
477 			warnx(cache_set_str, cache_set_err3);
478 			FREE(victim, struct cache_node, 1);
479 			mutex_unlock(&dupreq_lock);
480 			return;
481 		}
482 	}
483 
484 	/*
485 	 * Store it away
486 	 */
487 #ifdef RPC_CACHE_DEBUG
488 	if (nconf = getnetconfigent(xprt->xp_netid)) {
489 		uaddr = taddr2uaddr(nconf, &xprt->xp_rtaddr);
490 		freenetconfigent(nconf);
491 		printf(
492 	"cache set for xid= %x prog=%d vers=%d proc=%d for rmtaddr=%s\n",
493 			su->su_xid, uc->uc_prog, uc->uc_vers,
494 			uc->uc_proc, uaddr);
495 		free(uaddr);
496 	}
497 #endif
498 	victim->cache_replylen = replylen;
499 	victim->cache_reply = rpc_buffer(xprt);
500 	rpc_buffer(xprt) = newbuf;
501 	xdrmem_create(&(su->su_xdrs), rpc_buffer(xprt),
502 			su->su_iosz, XDR_ENCODE);
503 	victim->cache_xid = su->su_xid;
504 	victim->cache_proc = uc->uc_proc;
505 	victim->cache_vers = uc->uc_vers;
506 	victim->cache_prog = uc->uc_prog;
507 	victim->cache_addr = xprt->xp_rtaddr;
508 	victim->cache_addr.buf = ALLOC(char, xprt->xp_rtaddr.len);
509 	memcpy(victim->cache_addr.buf, xprt->xp_rtaddr.buf,
510 	    (size_t)xprt->xp_rtaddr.len);
511 	loc = CACHE_LOC(xprt, victim->cache_xid);
512 	victim->cache_next = uc->uc_entries[loc];
513 	uc->uc_entries[loc] = victim;
514 	uc->uc_fifo[uc->uc_nextvictim++] = victim;
515 	uc->uc_nextvictim %= uc->uc_size;
516 	mutex_unlock(&dupreq_lock);
517 }
518 
519 /*
520  * Try to get an entry from the cache
521  * return 1 if found, 0 if not found and set the stage for cache_set()
522  */
523 static int
524 cache_get(SVCXPRT *xprt, struct rpc_msg *msg, char **replyp, size_t *replylenp)
525 {
526 	u_int loc;
527 	cache_ptr ent;
528 	struct svc_dg_data *su = su_data(xprt);
529 	struct cl_cache *uc = (struct cl_cache *) su->su_cache;
530 #ifdef RPC_CACHE_DEBUG
531 	struct netconfig *nconf;
532 	char *uaddr;
533 #endif
534 
535 	mutex_lock(&dupreq_lock);
536 	loc = CACHE_LOC(xprt, su->su_xid);
537 	for (ent = uc->uc_entries[loc]; ent != NULL; ent = ent->cache_next) {
538 		if (ent->cache_xid == su->su_xid &&
539 			ent->cache_proc == msg->rm_call.cb_proc &&
540 			ent->cache_vers == msg->rm_call.cb_vers &&
541 			ent->cache_prog == msg->rm_call.cb_prog &&
542 			ent->cache_addr.len == xprt->xp_rtaddr.len &&
543 			(memcmp(ent->cache_addr.buf, xprt->xp_rtaddr.buf,
544 				xprt->xp_rtaddr.len) == 0)) {
545 #ifdef RPC_CACHE_DEBUG
546 			if (nconf = getnetconfigent(xprt->xp_netid)) {
547 				uaddr = taddr2uaddr(nconf, &xprt->xp_rtaddr);
548 				freenetconfigent(nconf);
549 				printf(
550 	"cache entry found for xid=%x prog=%d vers=%d proc=%d for rmtaddr=%s\n",
551 					su->su_xid, msg->rm_call.cb_prog,
552 					msg->rm_call.cb_vers,
553 					msg->rm_call.cb_proc, uaddr);
554 				free(uaddr);
555 			}
556 #endif
557 			*replyp = ent->cache_reply;
558 			*replylenp = ent->cache_replylen;
559 			mutex_unlock(&dupreq_lock);
560 			return (1);
561 		}
562 	}
563 	/*
564 	 * Failed to find entry
565 	 * Remember a few things so we can do a set later
566 	 */
567 	uc->uc_proc = msg->rm_call.cb_proc;
568 	uc->uc_vers = msg->rm_call.cb_vers;
569 	uc->uc_prog = msg->rm_call.cb_prog;
570 	mutex_unlock(&dupreq_lock);
571 	return (0);
572 }
573