1
2 /*
3 * Copyright (c) 2009, Sun Microsystems, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * - Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 * - Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 * - Neither the name of Sun Microsystems, Inc. nor the names of its
14 * contributors may be used to endorse or promote products derived
15 * from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * Copyright (c) 1986-1991 by Sun Microsystems Inc.
32 */
33
34 //#include <sys/cdefs.h>
35
36 /*
37 * svc_dg.c, Server side for connectionless RPC.
38 *
39 * Does some caching in the hopes of achieving execute-at-most-once semantics.
40 */
41 #include <wintirpc.h>
42 //#include <pthread.h>
43 #include <reentrant.h>
44 #include <sys/types.h>
45 //#include <sys/socket.h>
46 #include <rpc/rpc.h>
47 #include <rpc/svc_dg.h>
48 #include <errno.h>
49 //#include <unistd.h>
50 #include <stdio.h>
51 #include <stdlib.h>
52 #include <string.h>
53 #ifdef RPC_CACHE_DEBUG
54 #include <netconfig.h>
55 #include <netdir.h>
56 #endif
57 //#include <err.h>
58
59 #include "rpc_com.h"
60
61 #define su_data(xprt) ((struct svc_dg_data *)(xprt->xp_p2))
62 #define rpc_buffer(xprt) ((xprt)->xp_p1)
63
64 #ifndef MAX
65 #define MAX(a, b) (((a) > (b)) ? (a) : (b))
66 #endif
67
68 static void svc_dg_ops(SVCXPRT *);
69 static enum xprt_stat svc_dg_stat(SVCXPRT *);
70 static bool_t svc_dg_recv(SVCXPRT *, struct rpc_msg *);
71 static bool_t svc_dg_reply(SVCXPRT *, struct rpc_msg *);
72 static bool_t svc_dg_getargs(SVCXPRT *, xdrproc_t, void *);
73 static bool_t svc_dg_freeargs(SVCXPRT *, xdrproc_t, void *);
74 static void svc_dg_destroy(SVCXPRT *);
75 static bool_t svc_dg_control(SVCXPRT *, const u_int, void *);
76 static int cache_get(SVCXPRT *, struct rpc_msg *, char **, size_t *);
77 static void cache_set(SVCXPRT *, size_t);
78 int svc_dg_enablecache(SVCXPRT *, u_int);
79
80 /*
81 * Usage:
82 * xprt = svc_dg_create(sock, sendsize, recvsize);
83 * Does other connectionless specific initializations.
84 * Once *xprt is initialized, it is registered.
85 * see (svc.h, xprt_register). If recvsize or sendsize are 0 suitable
86 * system defaults are chosen.
87 * The routines returns NULL if a problem occurred.
88 */
89 static const char svc_dg_str[] = "svc_dg_create: %s";
90 static const char svc_dg_err1[] = "could not get transport information";
91 static const char svc_dg_err2[] = " transport does not support data transfer";
92 static const char __no_mem_str[] = "out of memory";
93
94 SVCXPRT *
svc_dg_create(fd,sendsize,recvsize)95 svc_dg_create(fd, sendsize, recvsize)
96 int fd;
97 u_int sendsize;
98 u_int recvsize;
99 {
100 SVCXPRT *xprt;
101 struct svc_dg_data *su = NULL;
102 struct __rpc_sockinfo si;
103 struct sockaddr_storage ss;
104 socklen_t slen;
105
106 if (!__rpc_fd2sockinfo(fd, &si)) {
107 // XXX warnx(svc_dg_str, svc_dg_err1);
108 return (NULL);
109 }
110 /*
111 * Find the receive and the send size
112 */
113 sendsize = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsize);
114 recvsize = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsize);
115 if ((sendsize == 0) || (recvsize == 0)) {
116 // XXX warnx(svc_dg_str, svc_dg_err2);
117 return (NULL);
118 }
119
120 xprt = mem_alloc(sizeof (SVCXPRT));
121 if (xprt == NULL)
122 goto freedata;
123 memset(xprt, 0, sizeof (SVCXPRT));
124
125 su = mem_alloc(sizeof (*su));
126 if (su == NULL)
127 goto freedata;
128 su->su_iosz = ((MAX(sendsize, recvsize) + 3) / 4) * 4;
129 if ((rpc_buffer(xprt) = mem_alloc(su->su_iosz)) == NULL)
130 goto freedata;
131 xdrmem_create(&(su->su_xdrs), rpc_buffer(xprt), su->su_iosz,
132 XDR_DECODE);
133 su->su_cache = NULL;
134 xprt->xp_fd = fd;
135 xprt->xp_p2 = su;
136 xprt->xp_verf.oa_base = su->su_verfbody;
137 svc_dg_ops(xprt);
138 xprt->xp_rtaddr.maxlen = sizeof (struct sockaddr_storage);
139
140 slen = sizeof ss;
141 if (getsockname(fd, (struct sockaddr *)(void *)&ss, &slen) == SOCKET_ERROR)
142 goto freedata;
143 __rpc_set_netbuf(&xprt->xp_ltaddr, &ss, slen);
144
145 xprt_register(xprt);
146 return (xprt);
147 freedata:
148 // XXX (void) warnx(svc_dg_str, __no_mem_str);
149 if (xprt) {
150 if (su)
151 (void) mem_free(su, sizeof (*su));
152 (void) mem_free(xprt, sizeof (SVCXPRT));
153 }
154 return (NULL);
155 }
156
157 /*ARGSUSED*/
158 static enum xprt_stat
svc_dg_stat(xprt)159 svc_dg_stat(xprt)
160 SVCXPRT *xprt;
161 {
162 return (XPRT_IDLE);
163 }
164
165 static bool_t
svc_dg_recv(xprt,msg)166 svc_dg_recv(xprt, msg)
167 SVCXPRT *xprt;
168 struct rpc_msg *msg;
169 {
170 struct svc_dg_data *su = su_data(xprt);
171 XDR *xdrs = &(su->su_xdrs);
172 char *reply;
173 struct sockaddr_storage ss;
174 socklen_t alen;
175 size_t replylen;
176 ssize_t rlen;
177
178 again:
179 alen = sizeof (struct sockaddr_storage);
180 rlen = recvfrom(xprt->xp_fd, rpc_buffer(xprt), su->su_iosz, 0,
181 (struct sockaddr *)(void *)&ss, &alen);
182 if (rlen == -1 && errno == EINTR)
183 goto again;
184 if (rlen == -1 || (rlen < (ssize_t)(4 * sizeof (u_int32_t))))
185 return (FALSE);
186 __rpc_set_netbuf(&xprt->xp_rtaddr, &ss, alen);
187
188 __xprt_set_raddr(xprt, &ss);
189 xdrs->x_op = XDR_DECODE;
190 XDR_SETPOS(xdrs, 0);
191 if (! xdr_callmsg(xdrs, msg)) {
192 return (FALSE);
193 }
194 su->su_xid = msg->rm_xid;
195 if (su->su_cache != NULL) {
196 if (cache_get(xprt, msg, &reply, &replylen)) {
197 (void)sendto(xprt->xp_fd, reply, replylen, 0,
198 (struct sockaddr *)(void *)&ss, alen);
199 return (FALSE);
200 }
201 }
202 return (TRUE);
203 }
204
205 static bool_t
svc_dg_reply(xprt,msg)206 svc_dg_reply(xprt, msg)
207 SVCXPRT *xprt;
208 struct rpc_msg *msg;
209 {
210 struct svc_dg_data *su = su_data(xprt);
211 XDR *xdrs = &(su->su_xdrs);
212 bool_t stat = FALSE;
213 size_t slen;
214
215 xdrs->x_op = XDR_ENCODE;
216 XDR_SETPOS(xdrs, 0);
217 msg->rm_xid = su->su_xid;
218 if (xdr_replymsg(xdrs, msg)) {
219 slen = XDR_GETPOS(xdrs);
220 if (sendto(xprt->xp_fd, rpc_buffer(xprt), slen, 0,
221 (struct sockaddr *)xprt->xp_rtaddr.buf,
222 (socklen_t)xprt->xp_rtaddr.len) == (ssize_t) slen) {
223 stat = TRUE;
224 if (su->su_cache)
225 cache_set(xprt, slen);
226 }
227 }
228 return (stat);
229 }
230
231 static bool_t
svc_dg_getargs(xprt,xdr_args,args_ptr)232 svc_dg_getargs(xprt, xdr_args, args_ptr)
233 SVCXPRT *xprt;
234 xdrproc_t xdr_args;
235 void *args_ptr;
236 {
237 return (*xdr_args)(&(su_data(xprt)->su_xdrs), args_ptr);
238 }
239
240 static bool_t
svc_dg_freeargs(xprt,xdr_args,args_ptr)241 svc_dg_freeargs(xprt, xdr_args, args_ptr)
242 SVCXPRT *xprt;
243 xdrproc_t xdr_args;
244 void *args_ptr;
245 {
246 XDR *xdrs = &(su_data(xprt)->su_xdrs);
247
248 xdrs->x_op = XDR_FREE;
249 return (*xdr_args)(xdrs, args_ptr);
250 }
251
252 static void
svc_dg_destroy(xprt)253 svc_dg_destroy(xprt)
254 SVCXPRT *xprt;
255 {
256 struct svc_dg_data *su = su_data(xprt);
257
258 xprt_unregister(xprt);
259 if (xprt->xp_fd != -1)
260 (void)closesocket(xprt->xp_fd);
261 XDR_DESTROY(&(su->su_xdrs));
262 (void) mem_free(rpc_buffer(xprt), su->su_iosz);
263 (void) mem_free(su, sizeof (*su));
264 if (xprt->xp_rtaddr.buf)
265 (void) mem_free(xprt->xp_rtaddr.buf, xprt->xp_rtaddr.maxlen);
266 if (xprt->xp_ltaddr.buf)
267 (void) mem_free(xprt->xp_ltaddr.buf, xprt->xp_ltaddr.maxlen);
268 if (xprt->xp_tp)
269 (void) free(xprt->xp_tp);
270 (void) mem_free(xprt, sizeof (SVCXPRT));
271 }
272
273 static bool_t
274 /*ARGSUSED*/
svc_dg_control(xprt,rq,in)275 svc_dg_control(xprt, rq, in)
276 SVCXPRT *xprt;
277 const u_int rq;
278 void *in;
279 {
280 return (FALSE);
281 }
282
283 static void
svc_dg_ops(xprt)284 svc_dg_ops(xprt)
285 SVCXPRT *xprt;
286 {
287 static struct xp_ops ops;
288 static struct xp_ops2 ops2;
289 extern mutex_t ops_lock;
290
291 /* VARIABLES PROTECTED BY ops_lock: ops */
292
293 mutex_lock(&ops_lock);
294 if (ops.xp_recv == NULL) {
295 ops.xp_recv = svc_dg_recv;
296 ops.xp_stat = svc_dg_stat;
297 ops.xp_getargs = svc_dg_getargs;
298 ops.xp_reply = svc_dg_reply;
299 ops.xp_freeargs = svc_dg_freeargs;
300 ops.xp_destroy = svc_dg_destroy;
301 ops2.xp_control = svc_dg_control;
302 }
303 xprt->xp_ops = &ops;
304 xprt->xp_ops2 = &ops2;
305 mutex_unlock(&ops_lock);
306 }
307
308 /* The CACHING COMPONENT */
309
310 /*
311 * Could have been a separate file, but some part of it depends upon the
312 * private structure of the client handle.
313 *
314 * Fifo cache for cl server
315 * Copies pointers to reply buffers into fifo cache
316 * Buffers are sent again if retransmissions are detected.
317 */
318
319 #define SPARSENESS 4 /* 75% sparse */
320
321 #define ALLOC(type, size) \
322 (type *) mem_alloc((sizeof (type) * (size)))
323
324 #define MEMZERO(addr, type, size) \
325 (void) memset((void *) (addr), 0, sizeof (type) * (int) (size))
326
327 #define FREE(addr, type, size) \
328 mem_free((addr), (sizeof (type) * (size)))
329
330 /*
331 * An entry in the cache
332 */
333 typedef struct cache_node *cache_ptr;
334 struct cache_node {
335 /*
336 * Index into cache is xid, proc, vers, prog and address
337 */
338 u_int32_t cache_xid;
339 rpcproc_t cache_proc;
340 rpcvers_t cache_vers;
341 rpcprog_t cache_prog;
342 struct netbuf cache_addr;
343 /*
344 * The cached reply and length
345 */
346 char *cache_reply;
347 size_t cache_replylen;
348 /*
349 * Next node on the list, if there is a collision
350 */
351 cache_ptr cache_next;
352 };
353
354 /*
355 * The entire cache
356 */
357 struct cl_cache {
358 u_int uc_size; /* size of cache */
359 cache_ptr *uc_entries; /* hash table of entries in cache */
360 cache_ptr *uc_fifo; /* fifo list of entries in cache */
361 u_int uc_nextvictim; /* points to next victim in fifo list */
362 rpcprog_t uc_prog; /* saved program number */
363 rpcvers_t uc_vers; /* saved version number */
364 rpcproc_t uc_proc; /* saved procedure number */
365 };
366
367
368 /*
369 * the hashing function
370 */
371 #define CACHE_LOC(transp, xid) \
372 (xid % (SPARSENESS * ((struct cl_cache *) \
373 su_data(transp)->su_cache)->uc_size))
374
375 extern mutex_t dupreq_lock;
376
377 /*
378 * Enable use of the cache. Returns 1 on success, 0 on failure.
379 * Note: there is no disable.
380 */
381 static const char cache_enable_str[] = "svc_enablecache: %s %s";
382 static const char alloc_err[] = "could not allocate cache ";
383 static const char enable_err[] = "cache already enabled";
384
385 int
svc_dg_enablecache(transp,size)386 svc_dg_enablecache(transp, size)
387 SVCXPRT *transp;
388 u_int size;
389 {
390 struct svc_dg_data *su = su_data(transp);
391 struct cl_cache *uc;
392
393 mutex_lock(&dupreq_lock);
394 if (su->su_cache != NULL) {
395 // XXX (void) warnx(cache_enable_str, enable_err, " ");
396 mutex_unlock(&dupreq_lock);
397 return (0);
398 }
399 uc = ALLOC(struct cl_cache, 1);
400 if (uc == NULL) {
401 // XXX warnx(cache_enable_str, alloc_err, " ");
402 mutex_unlock(&dupreq_lock);
403 return (0);
404 }
405 uc->uc_size = size;
406 uc->uc_nextvictim = 0;
407 uc->uc_entries = ALLOC(cache_ptr, size * SPARSENESS);
408 if (uc->uc_entries == NULL) {
409 // XXX warnx(cache_enable_str, alloc_err, "data");
410 FREE(uc, struct cl_cache, 1);
411 mutex_unlock(&dupreq_lock);
412 return (0);
413 }
414 MEMZERO(uc->uc_entries, cache_ptr, size * SPARSENESS);
415 uc->uc_fifo = ALLOC(cache_ptr, size);
416 if (uc->uc_fifo == NULL) {
417 // XXX warnx(cache_enable_str, alloc_err, "fifo");
418 FREE(uc->uc_entries, cache_ptr, size * SPARSENESS);
419 FREE(uc, struct cl_cache, 1);
420 mutex_unlock(&dupreq_lock);
421 return (0);
422 }
423 MEMZERO(uc->uc_fifo, cache_ptr, size);
424 su->su_cache = (char *)(void *)uc;
425 mutex_unlock(&dupreq_lock);
426 return (1);
427 }
428
429 /*
430 * Set an entry in the cache. It assumes that the uc entry is set from
431 * the earlier call to cache_get() for the same procedure. This will always
432 * happen because cache_get() is calle by svc_dg_recv and cache_set() is called
433 * by svc_dg_reply(). All this hoopla because the right RPC parameters are
434 * not available at svc_dg_reply time.
435 */
436
437 static const char cache_set_str[] = "cache_set: %s";
438 static const char cache_set_err1[] = "victim not found";
439 static const char cache_set_err2[] = "victim alloc failed";
440 static const char cache_set_err3[] = "could not allocate new rpc buffer";
441
442 static void
cache_set(xprt,replylen)443 cache_set(xprt, replylen)
444 SVCXPRT *xprt;
445 size_t replylen;
446 {
447 cache_ptr victim;
448 cache_ptr *vicp;
449 struct svc_dg_data *su = su_data(xprt);
450 struct cl_cache *uc = (struct cl_cache *) su->su_cache;
451 u_int loc;
452 char *newbuf;
453 #ifdef RPC_CACHE_DEBUG
454 struct netconfig *nconf;
455 char *uaddr;
456 #endif
457
458 mutex_lock(&dupreq_lock);
459 /*
460 * Find space for the new entry, either by
461 * reusing an old entry, or by mallocing a new one
462 */
463 victim = uc->uc_fifo[uc->uc_nextvictim];
464 if (victim != NULL) {
465 loc = CACHE_LOC(xprt, victim->cache_xid);
466 for (vicp = &uc->uc_entries[loc];
467 *vicp != NULL && *vicp != victim;
468 vicp = &(*vicp)->cache_next)
469 ;
470 if (*vicp == NULL) {
471 // XXX warnx(cache_set_str, cache_set_err1);
472 mutex_unlock(&dupreq_lock);
473 return;
474 }
475 *vicp = victim->cache_next; /* remove from cache */
476 newbuf = victim->cache_reply;
477 } else {
478 victim = ALLOC(struct cache_node, 1);
479 if (victim == NULL) {
480 // XXX warnx(cache_set_str, cache_set_err2);
481 mutex_unlock(&dupreq_lock);
482 return;
483 }
484 newbuf = mem_alloc(su->su_iosz);
485 if (newbuf == NULL) {
486 // XXX warnx(cache_set_str, cache_set_err3);
487 FREE(victim, struct cache_node, 1);
488 mutex_unlock(&dupreq_lock);
489 return;
490 }
491 }
492
493 /*
494 * Store it away
495 */
496 #ifdef RPC_CACHE_DEBUG
497 if (nconf = getnetconfigent(xprt->xp_netid)) {
498 uaddr = taddr2uaddr(nconf, &xprt->xp_rtaddr);
499 freenetconfigent(nconf);
500 printf(
501 "cache set for xid= %x prog=%d vers=%d proc=%d for rmtaddr=%s\n",
502 su->su_xid, uc->uc_prog, uc->uc_vers,
503 uc->uc_proc, uaddr);
504 free(uaddr);
505 }
506 #endif
507 victim->cache_replylen = replylen;
508 victim->cache_reply = rpc_buffer(xprt);
509 rpc_buffer(xprt) = newbuf;
510 xdrmem_create(&(su->su_xdrs), rpc_buffer(xprt),
511 su->su_iosz, XDR_ENCODE);
512 victim->cache_xid = su->su_xid;
513 victim->cache_proc = uc->uc_proc;
514 victim->cache_vers = uc->uc_vers;
515 victim->cache_prog = uc->uc_prog;
516 victim->cache_addr = xprt->xp_rtaddr;
517 victim->cache_addr.buf = ALLOC(char, xprt->xp_rtaddr.len);
518 (void) memcpy(victim->cache_addr.buf, xprt->xp_rtaddr.buf,
519 (size_t)xprt->xp_rtaddr.len);
520 loc = CACHE_LOC(xprt, victim->cache_xid);
521 victim->cache_next = uc->uc_entries[loc];
522 uc->uc_entries[loc] = victim;
523 uc->uc_fifo[uc->uc_nextvictim++] = victim;
524 uc->uc_nextvictim %= uc->uc_size;
525 mutex_unlock(&dupreq_lock);
526 }
527
528 /*
529 * Try to get an entry from the cache
530 * return 1 if found, 0 if not found and set the stage for cache_set()
531 */
532 static int
cache_get(xprt,msg,replyp,replylenp)533 cache_get(xprt, msg, replyp, replylenp)
534 SVCXPRT *xprt;
535 struct rpc_msg *msg;
536 char **replyp;
537 size_t *replylenp;
538 {
539 u_int loc;
540 cache_ptr ent;
541 struct svc_dg_data *su = su_data(xprt);
542 struct cl_cache *uc = (struct cl_cache *) su->su_cache;
543 #ifdef RPC_CACHE_DEBUG
544 struct netconfig *nconf;
545 char *uaddr;
546 #endif
547
548 mutex_lock(&dupreq_lock);
549 loc = CACHE_LOC(xprt, su->su_xid);
550 for (ent = uc->uc_entries[loc]; ent != NULL; ent = ent->cache_next) {
551 if (ent->cache_xid == su->su_xid &&
552 ent->cache_proc == msg->rm_call.cb_proc &&
553 ent->cache_vers == msg->rm_call.cb_vers &&
554 ent->cache_prog == msg->rm_call.cb_prog &&
555 ent->cache_addr.len == xprt->xp_rtaddr.len &&
556 (memcmp(ent->cache_addr.buf, xprt->xp_rtaddr.buf,
557 xprt->xp_rtaddr.len) == 0)) {
558 #ifdef RPC_CACHE_DEBUG
559 if (nconf = getnetconfigent(xprt->xp_netid)) {
560 uaddr = taddr2uaddr(nconf, &xprt->xp_rtaddr);
561 freenetconfigent(nconf);
562 printf(
563 "cache entry found for xid=%x prog=%d vers=%d proc=%d for rmtaddr=%s\n",
564 su->su_xid, msg->rm_call.cb_prog,
565 msg->rm_call.cb_vers,
566 msg->rm_call.cb_proc, uaddr);
567 free(uaddr);
568 }
569 #endif
570 *replyp = ent->cache_reply;
571 *replylenp = ent->cache_replylen;
572 mutex_unlock(&dupreq_lock);
573 return (1);
574 }
575 }
576 /*
577 * Failed to find entry
578 * Remember a few things so we can do a set later
579 */
580 uc->uc_proc = msg->rm_call.cb_proc;
581 uc->uc_vers = msg->rm_call.cb_vers;
582 uc->uc_prog = msg->rm_call.cb_prog;
583 mutex_unlock(&dupreq_lock);
584 return (0);
585 }
586