1 /* $NetBSD: amq_subr.c,v 1.5 2022/08/24 05:01:13 christos Exp $ */
2
3 /*
4 * Copyright (c) 1997-2014 Erez Zadok
5 * Copyright (c) 1990 Jan-Simon Pendry
6 * Copyright (c) 1990 Imperial College of Science, Technology & Medicine
7 * Copyright (c) 1990 The Regents of the University of California.
8 * All rights reserved.
9 *
10 * This code is derived from software contributed to Berkeley by
11 * Jan-Simon Pendry at Imperial College, London.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 *
38 * File: am-utils/amd/amq_subr.c
39 *
40 */
41 /*
42 * Auxiliary routines for amq tool
43 */
44
45 #ifdef HAVE_CONFIG_H
46 # include <config.h>
47 #endif /* HAVE_CONFIG_H */
48 #include <am_defs.h>
49 #include <amd.h>
50
51 /* forward definitions */
52 bool_t xdr_amq_mount_tree_node(XDR *xdrs, amq_mount_tree *objp);
53 bool_t xdr_amq_mount_subtree(XDR *xdrs, amq_mount_tree *objp);
54
55
56 voidp
amqproc_null_1_svc(voidp argp,struct svc_req * rqstp)57 amqproc_null_1_svc(voidp argp, struct svc_req *rqstp)
58 {
59 static char res;
60
61 return (voidp) &res;
62 }
63
64
65 /*
66 * Return a sub-tree of mounts
67 */
68 amq_mount_tree_p *
amqproc_mnttree_1_svc(voidp argp,struct svc_req * rqstp)69 amqproc_mnttree_1_svc(voidp argp, struct svc_req *rqstp)
70 {
71 static am_node *mp;
72
73 mp = find_ap(*(char **) argp);
74 return (amq_mount_tree_p *) ((void *)&mp);
75 }
76
77
78 /*
79 * Unmount a single node
80 */
81 int *
amqproc_umnt_1_svc(voidp argp,struct svc_req * rqstp)82 amqproc_umnt_1_svc(voidp argp, struct svc_req *rqstp)
83 {
84 static int res = AMQ_UMNT_OK;
85 am_node *mp = find_ap(*(char **) argp);
86
87 if (mp)
88 forcibly_timeout_mp(mp);
89
90 return &res;
91 }
92
93
94 /*
95 * Synchronously unmount a single node - parent side.
96 */
97 int *
amqproc_sync_umnt_1_svc_parent(voidp argp,struct svc_req * rqstp)98 amqproc_sync_umnt_1_svc_parent(voidp argp, struct svc_req *rqstp)
99 {
100 amqproc_umnt_1_svc(argp, rqstp);
101 return NULL;
102 }
103
104
105 /*
106 * Synchronously unmount a single node - child side.
107 */
108 amq_sync_umnt *
amqproc_sync_umnt_1_svc_child(voidp argp,struct svc_req * rqstp)109 amqproc_sync_umnt_1_svc_child(voidp argp, struct svc_req *rqstp)
110 {
111 static amq_sync_umnt rv;
112 amq_sync_umnt buf;
113 ssize_t n;
114
115 am_node *mp = find_ap(*(char **) argp);
116
117 memset(&rv, 0, sizeof(rv));
118 rv.au_etype = AMQ_UMNT_READ;
119 if (mp && mp->am_fd[0] >= 0) {
120 n = read(mp->am_fd[0], &buf, sizeof(buf));
121 if (n == sizeof(buf))
122 rv = buf;
123 }
124 return &rv;
125 }
126
127
128 /*
129 * Synchronously unmount a single node - use if we can't fork (asynchronous).
130 */
131 amq_sync_umnt *
amqproc_sync_umnt_1_svc_async(voidp argp,struct svc_req * rqstp)132 amqproc_sync_umnt_1_svc_async(voidp argp, struct svc_req *rqstp)
133 {
134 static amq_sync_umnt rv;
135
136 memset(&rv, 0, sizeof(rv));
137 rv.au_etype = AMQ_UMNT_FORK;
138 rv.au_errno = errno;
139
140 amqproc_umnt_1_svc(argp, rqstp);
141
142 return &rv;
143 }
144
145
146 /*
147 * Return global statistics
148 */
149 amq_mount_stats *
amqproc_stats_1_svc(voidp argp,struct svc_req * rqstp)150 amqproc_stats_1_svc(voidp argp, struct svc_req *rqstp)
151 {
152 return (amq_mount_stats *) ((void *)&amd_stats);
153 }
154
155
156 /*
157 * Return the entire tree of mount nodes
158 */
159 amq_mount_tree_list *
amqproc_export_1_svc(voidp argp,struct svc_req * rqstp)160 amqproc_export_1_svc(voidp argp, struct svc_req *rqstp)
161 {
162 static amq_mount_tree_list aml;
163 static am_node *mp;
164
165 mp = get_exported_ap(0);
166 aml.amq_mount_tree_list_val = (amq_mount_tree_p *) ((void *) &mp);
167 aml.amq_mount_tree_list_len = 1; /* XXX */
168
169 return &aml;
170 }
171
172
173 int *
amqproc_setopt_1_svc(voidp argp,struct svc_req * rqstp)174 amqproc_setopt_1_svc(voidp argp, struct svc_req *rqstp)
175 {
176 static int rc;
177 amq_setopt *opt = (amq_setopt *) argp;
178
179 rc = 0;
180
181 switch (opt->as_opt) {
182
183 case AMOPT_DEBUG:
184 if (debug_option(opt->as_str))
185 rc = EINVAL;
186 break;
187
188 case AMOPT_LOGFILE:
189 if (gopt.logfile && opt->as_str
190 && STREQ(gopt.logfile, opt->as_str)) {
191 if (switch_to_logfile(opt->as_str, orig_umask, 0))
192 rc = EINVAL;
193 } else {
194 rc = EACCES;
195 }
196 break;
197
198 case AMOPT_XLOG:
199 if (switch_option(opt->as_str))
200 rc = EINVAL;
201 break;
202
203 case AMOPT_FLUSHMAPC:
204 if (amd_state == Run) {
205 plog(XLOG_INFO, "amq says flush cache");
206 do_mapc_reload = 0;
207 flush_nfs_fhandle_cache((fserver *) NULL);
208 flush_srvr_nfs_cache((fserver *) NULL);
209 }
210 break;
211 }
212
213 return &rc;
214 }
215
216
217 amq_mount_info_list *
amqproc_getmntfs_1_svc(voidp argp,struct svc_req * rqstp)218 amqproc_getmntfs_1_svc(voidp argp, struct svc_req *rqstp)
219 {
220 return (amq_mount_info_list *) ((void *)&mfhead); /* XXX */
221 }
222
223 extern qelem map_list_head;
224 amq_map_info_list *
amqproc_getmapinfo_1_svc(voidp argp,struct svc_req * rqstp)225 amqproc_getmapinfo_1_svc(voidp argp, struct svc_req *rqstp)
226 {
227 return (amq_map_info_list *) ((void *)&map_list_head); /* XXX */
228 }
229
230 amq_string *
amqproc_getvers_1_svc(voidp argp,struct svc_req * rqstp)231 amqproc_getvers_1_svc(voidp argp, struct svc_req *rqstp)
232 {
233 static amq_string res;
234
235 res = get_version_string();
236 return &res;
237 }
238
239
240 /* get PID of remote amd */
241 int *
amqproc_getpid_1_svc(voidp argp,struct svc_req * rqstp)242 amqproc_getpid_1_svc(voidp argp, struct svc_req *rqstp)
243 {
244 static int res;
245
246 res = getpid();
247 return &res;
248 }
249
250
251 /*
252 * Process PAWD string of remote pawd tool.
253 *
254 * We repeat the resolution of the string until the resolved string resolves
255 * to itself. This ensures that we follow path resolutions through all
256 * possible Amd mount points until we reach some sort of convergence. To
257 * prevent possible infinite loops, we break out of this loop if the strings
258 * do not converge after MAX_PAWD_TRIES times.
259 */
260 amq_string *
amqproc_pawd_1_svc(voidp argp,struct svc_req * rqstp)261 amqproc_pawd_1_svc(voidp argp, struct svc_req *rqstp)
262 {
263 static amq_string res;
264 #define MAX_PAWD_TRIES 10
265 int index, len, maxagain = MAX_PAWD_TRIES;
266 am_node *mp;
267 char *mountpoint;
268 char *dir = *(char **) argp;
269 static char tmp_buf[MAXPATHLEN];
270 char prev_buf[MAXPATHLEN];
271
272 tmp_buf[0] = prev_buf[0] = '\0'; /* default is empty string: no match */
273 do {
274 for (mp = get_first_exported_ap(&index);
275 mp;
276 mp = get_next_exported_ap(&index)) {
277 if (STREQ(mp->am_al->al_mnt->mf_ops->fs_type, "toplvl"))
278 continue;
279 if (STREQ(mp->am_al->al_mnt->mf_ops->fs_type, "auto"))
280 continue;
281 mountpoint = (mp->am_link ? mp->am_link : mp->am_al->al_mnt->mf_mount);
282 len = strlen(mountpoint);
283 if (len == 0)
284 continue;
285 if (!NSTREQ(mountpoint, dir, len))
286 continue;
287 if (dir[len] != '\0' && dir[len] != '/')
288 continue;
289 xstrlcpy(tmp_buf, mp->am_path, sizeof(tmp_buf));
290 xstrlcat(tmp_buf, &dir[len], sizeof(tmp_buf));
291 break;
292 } /* end of "for" loop */
293 /* once tmp_buf and prev_buf are equal, break out of "do" loop */
294 if (STREQ(tmp_buf, prev_buf))
295 break;
296 else
297 xstrlcpy(prev_buf, tmp_buf, sizeof(prev_buf));
298 } while (--maxagain);
299 /* check if we couldn't resolve the string after MAX_PAWD_TRIES times */
300 if (maxagain <= 0)
301 plog(XLOG_WARNING, "path \"%s\" did not resolve after %d tries",
302 tmp_buf, MAX_PAWD_TRIES);
303
304 res = tmp_buf;
305 return &res;
306 }
307
308
309 /*
310 * XDR routines.
311 */
312
313
314 bool_t
xdr_amq_setopt(XDR * xdrs,amq_setopt * objp)315 xdr_amq_setopt(XDR *xdrs, amq_setopt *objp)
316 {
317 if (!xdr_enum(xdrs, (enum_t *) ((voidp) &objp->as_opt))) {
318 return (FALSE);
319 }
320 if (!xdr_string(xdrs, &objp->as_str, AMQ_STRLEN)) {
321 return (FALSE);
322 }
323 return (TRUE);
324 }
325
326
327 /*
328 * More XDR routines - Should be used for OUTPUT ONLY.
329 */
330 bool_t
xdr_amq_mount_tree_node(XDR * xdrs,amq_mount_tree * objp)331 xdr_amq_mount_tree_node(XDR *xdrs, amq_mount_tree *objp)
332 {
333 am_node *mp = (am_node *) objp;
334 longlong_t mtime;
335
336 if (!xdr_amq_string(xdrs, &mp->am_al->al_mnt->mf_info)) {
337 return (FALSE);
338 }
339 if (!xdr_amq_string(xdrs, &mp->am_path)) {
340 return (FALSE);
341 }
342 if (!xdr_amq_string(xdrs, mp->am_link ? &mp->am_link : &mp->am_al->al_mnt->mf_mount)) {
343 return (FALSE);
344 }
345 if (!xdr_amq_string(xdrs, &mp->am_al->al_mnt->mf_ops->fs_type)) {
346 return (FALSE);
347 }
348 mtime = mp->am_stats.s_mtime;
349 if (!xdr_longlong_t(xdrs, &mtime)) {
350 return (FALSE);
351 }
352 if (!xdr_u_short(xdrs, &mp->am_stats.s_uid)) {
353 return (FALSE);
354 }
355 if (!xdr_int(xdrs, &mp->am_stats.s_getattr)) {
356 return (FALSE);
357 }
358 if (!xdr_int(xdrs, &mp->am_stats.s_lookup)) {
359 return (FALSE);
360 }
361 if (!xdr_int(xdrs, &mp->am_stats.s_readdir)) {
362 return (FALSE);
363 }
364 if (!xdr_int(xdrs, &mp->am_stats.s_readlink)) {
365 return (FALSE);
366 }
367 if (!xdr_int(xdrs, &mp->am_stats.s_statfs)) {
368 return (FALSE);
369 }
370 return (TRUE);
371 }
372
373
374 bool_t
xdr_amq_mount_subtree(XDR * xdrs,amq_mount_tree * objp)375 xdr_amq_mount_subtree(XDR *xdrs, amq_mount_tree *objp)
376 {
377 am_node *mp = (am_node *) objp;
378
379 if (!xdr_amq_mount_tree_node(xdrs, objp)) {
380 return (FALSE);
381 }
382 if (!xdr_pointer(xdrs,
383 (char **) ((voidp) &mp->am_osib),
384 sizeof(amq_mount_tree),
385 (XDRPROC_T_TYPE) xdr_amq_mount_subtree)) {
386 return (FALSE);
387 }
388 if (!xdr_pointer(xdrs,
389 (char **) ((voidp) &mp->am_child),
390 sizeof(amq_mount_tree),
391 (XDRPROC_T_TYPE) xdr_amq_mount_subtree)) {
392 return (FALSE);
393 }
394 return (TRUE);
395 }
396
397
398 bool_t
xdr_amq_mount_tree(XDR * xdrs,amq_mount_tree * objp)399 xdr_amq_mount_tree(XDR *xdrs, amq_mount_tree *objp)
400 {
401 am_node *mp = (am_node *) objp;
402 am_node *mnil = NULL;
403
404 if (!xdr_amq_mount_tree_node(xdrs, objp)) {
405 return (FALSE);
406 }
407 if (!xdr_pointer(xdrs,
408 (char **) ((voidp) &mnil),
409 sizeof(amq_mount_tree),
410 (XDRPROC_T_TYPE) xdr_amq_mount_subtree)) {
411 return (FALSE);
412 }
413 if (!xdr_pointer(xdrs,
414 (char **) ((voidp) &mp->am_child),
415 sizeof(amq_mount_tree),
416 (XDRPROC_T_TYPE) xdr_amq_mount_subtree)) {
417 return (FALSE);
418 }
419 return (TRUE);
420 }
421
422
423 bool_t
xdr_amq_mount_tree_p(XDR * xdrs,amq_mount_tree_p * objp)424 xdr_amq_mount_tree_p(XDR *xdrs, amq_mount_tree_p *objp)
425 {
426 if (!xdr_pointer(xdrs, (char **) objp, sizeof(amq_mount_tree), (XDRPROC_T_TYPE) xdr_amq_mount_tree)) {
427 return (FALSE);
428 }
429 return (TRUE);
430 }
431
432
433 bool_t
xdr_amq_mount_stats(XDR * xdrs,amq_mount_stats * objp)434 xdr_amq_mount_stats(XDR *xdrs, amq_mount_stats *objp)
435 {
436 if (!xdr_int(xdrs, &objp->as_drops)) {
437 return (FALSE);
438 }
439 if (!xdr_int(xdrs, &objp->as_stale)) {
440 return (FALSE);
441 }
442 if (!xdr_int(xdrs, &objp->as_mok)) {
443 return (FALSE);
444 }
445 if (!xdr_int(xdrs, &objp->as_merr)) {
446 return (FALSE);
447 }
448 if (!xdr_int(xdrs, &objp->as_uerr)) {
449 return (FALSE);
450 }
451 return (TRUE);
452 }
453
454
455
456 bool_t
xdr_amq_mount_tree_list(XDR * xdrs,amq_mount_tree_list * objp)457 xdr_amq_mount_tree_list(XDR *xdrs, amq_mount_tree_list *objp)
458 {
459 if (!xdr_array(xdrs,
460 (char **) ((voidp) &objp->amq_mount_tree_list_val),
461 (u_int *) &objp->amq_mount_tree_list_len,
462 ~0,
463 sizeof(amq_mount_tree_p),
464 (XDRPROC_T_TYPE) xdr_amq_mount_tree_p)) {
465 return (FALSE);
466 }
467 return (TRUE);
468 }
469
470
471 bool_t
xdr_amq_mount_info_qelem(XDR * xdrs,qelem * qhead)472 xdr_amq_mount_info_qelem(XDR *xdrs, qelem *qhead)
473 {
474 mntfs *mf;
475 u_int len = 0;
476
477 /*
478 * Compute length of list
479 */
480 for (mf = AM_LAST(mntfs, qhead); mf != HEAD(mntfs, qhead); mf = PREV(mntfs, mf)) {
481 if (!(mf->mf_fsflags & FS_AMQINFO))
482 continue;
483 len++;
484 }
485 xdr_u_int(xdrs, &len);
486
487 /*
488 * Send individual data items
489 */
490 for (mf = AM_LAST(mntfs, qhead); mf != HEAD(mntfs, qhead); mf = PREV(mntfs, mf)) {
491 int up;
492 if (!(mf->mf_fsflags & FS_AMQINFO))
493 continue;
494
495 if (!xdr_amq_string(xdrs, &mf->mf_ops->fs_type)) {
496 return (FALSE);
497 }
498 if (!xdr_amq_string(xdrs, &mf->mf_mount)) {
499 return (FALSE);
500 }
501 if (!xdr_amq_string(xdrs, &mf->mf_info)) {
502 return (FALSE);
503 }
504 if (!xdr_amq_string(xdrs, &mf->mf_server->fs_host)) {
505 return (FALSE);
506 }
507 if (!xdr_int(xdrs, &mf->mf_error)) {
508 return (FALSE);
509 }
510 if (!xdr_int(xdrs, &mf->mf_refc)) {
511 return (FALSE);
512 }
513 if (FSRV_ERROR(mf->mf_server) || FSRV_ISDOWN(mf->mf_server))
514 up = 0;
515 else if (FSRV_ISUP(mf->mf_server))
516 up = 1;
517 else
518 up = -1;
519 if (!xdr_int(xdrs, &up)) {
520 return (FALSE);
521 }
522 }
523 return (TRUE);
524 }
525
526 bool_t
xdr_amq_map_info_qelem(XDR * xdrs,qelem * qhead)527 xdr_amq_map_info_qelem(XDR *xdrs, qelem *qhead)
528 {
529 mnt_map *m;
530 u_int len = 0;
531 int x;
532 char *n;
533 longlong_t modify;
534
535 /*
536 * Compute length of list
537 */
538 ITER(m, mnt_map, qhead) {
539 len++;
540 }
541
542 if (!xdr_u_int(xdrs, &len))
543 return (FALSE);
544
545 /*
546 * Send individual data items
547 */
548 ITER(m, mnt_map, qhead) {
549 if (!xdr_amq_string(xdrs, &m->map_name)) {
550 return (FALSE);
551 }
552
553 n = m->wildcard ? m->wildcard : "";
554 if (!xdr_amq_string(xdrs, &n)) {
555 return (FALSE);
556 }
557
558 modify = m->modify;
559 if (!xdr_longlong_t(xdrs, &modify)) {
560 return (FALSE);
561 }
562
563 x = m->flags;
564 if (!xdr_int(xdrs, &x)) {
565 return (FALSE);
566 }
567
568 x = m->nentries;
569 if (!xdr_int(xdrs, &x)) {
570 return (FALSE);
571 }
572
573 x = m->reloads;
574 if (!xdr_int(xdrs, &x)) {
575 return (FALSE);
576 }
577
578 if (!xdr_int(xdrs, &m->refc)) {
579 return (FALSE);
580 }
581
582 if (m->isup)
583 x = (*m->isup)(m, m->map_name);
584 else
585 x = -1;
586 if (!xdr_int(xdrs, &x)) {
587 return (FALSE);
588 }
589 }
590 return (TRUE);
591 }
592
593 bool_t
xdr_pri_free(XDRPROC_T_TYPE xdr_args,caddr_t args_ptr)594 xdr_pri_free(XDRPROC_T_TYPE xdr_args, caddr_t args_ptr)
595 {
596 XDR xdr;
597
598 xdr.x_op = XDR_FREE;
599 return ((*xdr_args) (&xdr, (caddr_t *) args_ptr));
600 }
601