xref: /original-bsd/usr.sbin/amd/amd/amq_subr.c (revision fbcc2ded)
1 /*
2  * $Id: amq_subr.c,v 5.2 90/06/23 22:19:20 jsp Rel $
3  *
4  * Copyright (c) 1990 Jan-Simon Pendry
5  * Copyright (c) 1990 Imperial College of Science, Technology & Medicine
6  * Copyright (c) 1990 The Regents of the University of California.
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * Jan-Simon Pendry at Imperial College, London.
11  *
12  * %sccs.include.redist.c%
13  *
14  *	@(#)amq_subr.c	5.1 (Berkeley) 06/29/90
15  */
16 /*
17  * Auxilliary routines for amq tool
18  */
19 
20 #include "am.h"
21 #include "amq.h"
22 
23 #include <sys/param.h>
24 
25 /*ARGSUSED*/
26 voidp
27 amqproc_null_1(argp, rqstp)
28 voidp argp;
29 struct svc_req *rqstp;
30 {
31 	static char res;
32 
33 	return (voidp) &res;
34 }
35 
36 /*
37  * Return a sub-tree of mounts
38  */
39 /*ARGSUSED*/
40 amq_mount_tree_p *
41 amqproc_mnttree_1(argp, rqstp)
42 voidp argp;
43 struct svc_req *rqstp;
44 {
45 	static am_node *mp;
46 	mp = find_ap(*(char **) argp);
47 	return (amq_mount_tree_p *) &mp;
48 }
49 
50 /*
51  * Unmount a single node
52  */
53 /*ARGSUSED*/
54 voidp
55 amqproc_umnt_1(argp, rqstp)
56 voidp argp;
57 struct svc_req *rqstp;
58 {
59 	static char res;
60 	am_node *mp = find_ap(*(char **) argp);
61 	if (mp)
62 		forcibly_timeout_mp(mp);
63 
64 	return (voidp) &res;
65 }
66 
67 /*
68  * Return global statistics
69  */
70 /*ARGSUSED*/
71 amq_mount_stats *
72 amqproc_stats_1(argp, rqstp)
73 voidp argp;
74 struct svc_req *rqstp;
75 {
76 	return (amq_mount_stats *) &amd_stats;
77 }
78 
79 /*
80  * Return the entire tree of mount nodes
81  */
82 /*ARGSUSED*/
83 amq_mount_tree_list *
84 amqproc_export_1(argp, rqstp)
85 voidp argp;
86 struct svc_req *rqstp;
87 {
88 	static amq_mount_tree_list aml;
89 
90 #ifdef oldcode
91 	static am_node **mvec;
92 	int i;
93 	int n = 0;
94 
95 	mvec = (struct am_node **)
96 		xrealloc(mvec, (1+last_used_map) * sizeof(am_node *));
97 	for (i = last_used_map; i >= 0; --i) {
98 		am_node *mp = exported_ap[i];
99 		if (mp && (mp->am_flags & AMF_ROOT))
100 			mvec[n++] = mp;
101 	}
102 
103 	aml.amq_mount_tree_list_val = (amq_mount_tree_p *) mvec;
104 	aml.amq_mount_tree_list_len = n;
105 #else
106 	aml.amq_mount_tree_list_val = (amq_mount_tree_p *) &exported_ap[0];
107 	aml.amq_mount_tree_list_len = 1;	/* XXX */
108 #endif /* oldcode */
109 	return &aml;
110 }
111 
112 int *
113 amqproc_setopt_1(argp, rqstp)
114 voidp argp;
115 struct svc_req *rqstp;
116 {
117 	static int rc;
118 
119 	amq_setopt *opt = (amq_setopt *) argp;
120 
121 	rc = 0;
122 	switch (opt->as_opt) {
123 	case AMOPT_DEBUG:
124 #ifdef DEBUG
125 		if (debug_option(opt->as_str))
126 			rc = EINVAL;
127 #else
128 		rc = EINVAL;
129 #endif /* DEBUG */
130 		break;
131 
132 	case AMOPT_LOGFILE:
133 #ifdef not_yet
134 		if (switch_to_logfile(opt->as_str))
135 			rc = EINVAL;
136 #else
137 		rc = EACCES;
138 #endif /* not_yet */
139 		break;
140 
141 	case AMOPT_XLOG:
142 		if (switch_option(opt->as_str))
143 			rc = EINVAL;
144 		break;
145 
146 	case AMOPT_FLUSHMAPC:
147 		if (amd_state == Run) {
148 			plog(XLOG_INFO, "amq says flush cache");
149 			do_mapc_reload = 0;
150 		}
151 		break;
152 	}
153 	return &rc;
154 }
155 
156 amq_mount_info_list *
157 amqproc_getmntfs_1(argp, rqstp)
158 voidp argp;
159 struct svc_req *rqstp;
160 {
161 extern qelem mfhead;
162 	return (amq_mount_info_list *) &mfhead;	/* XXX */
163 }
164 
165 /*
166  * XDR routines.
167  */
168 bool_t
169 xdr_amq_string(xdrs, objp)
170 	XDR *xdrs;
171 	amq_string *objp;
172 {
173 	if (!xdr_string(xdrs, objp, AMQ_STRLEN)) {
174 		return (FALSE);
175 	}
176 	return (TRUE);
177 }
178 
179 bool_t
180 xdr_amq_setopt(xdrs, objp)
181 	XDR *xdrs;
182 	amq_setopt *objp;
183 {
184 	if (!xdr_enum(xdrs, (enum_t *)&objp->as_opt)) {
185 		return (FALSE);
186 	}
187 	if (!xdr_string(xdrs, &objp->as_str, AMQ_STRLEN)) {
188 		return (FALSE);
189 	}
190 	return (TRUE);
191 }
192 
193 /*
194  * More XDR routines  - Should be used for OUTPUT ONLY.
195  */
196 bool_t
197 xdr_amq_mount_tree(xdrs, objp)
198 	XDR *xdrs;
199 	amq_mount_tree *objp;
200 {
201 	am_node *mp = (am_node *) objp;
202 
203 	if (!xdr_amq_string(xdrs, &mp->am_mnt->mf_info)) {
204 		return (FALSE);
205 	}
206 	if (!xdr_amq_string(xdrs, &mp->am_path)) {
207 		return (FALSE);
208 	}
209 	if (!xdr_amq_string(xdrs, mp->am_link ? &mp->am_link : &mp->am_mnt->mf_mount)) {
210 		return (FALSE);
211 	}
212 	if (!xdr_amq_string(xdrs, &mp->am_mnt->mf_ops->fs_type)) {
213 		return (FALSE);
214 	}
215 	if (!xdr_long(xdrs, &mp->am_stats.s_mtime)) {
216 		return (FALSE);
217 	}
218 	if (!xdr_u_short(xdrs, &mp->am_stats.s_uid)) {
219 		return (FALSE);
220 	}
221 	if (!xdr_int(xdrs, &mp->am_stats.s_getattr)) {
222 		return (FALSE);
223 	}
224 	if (!xdr_int(xdrs, &mp->am_stats.s_lookup)) {
225 		return (FALSE);
226 	}
227 	if (!xdr_int(xdrs, &mp->am_stats.s_readdir)) {
228 		return (FALSE);
229 	}
230 	if (!xdr_int(xdrs, &mp->am_stats.s_readlink)) {
231 		return (FALSE);
232 	}
233 	if (!xdr_int(xdrs, &mp->am_stats.s_statfs)) {
234 		return (FALSE);
235 	}
236 	if (!xdr_pointer(xdrs, (char **)&mp->am_osib, sizeof(amq_mount_tree), xdr_amq_mount_tree)) {
237 		return (FALSE);
238 	}
239 	if (!xdr_pointer(xdrs, (char **)&mp->am_child, sizeof(amq_mount_tree), xdr_amq_mount_tree)) {
240 		return (FALSE);
241 	}
242 	return (TRUE);
243 }
244 
245 bool_t
246 xdr_amq_mount_tree_p(xdrs, objp)
247 	XDR *xdrs;
248 	amq_mount_tree_p *objp;
249 {
250 	if (!xdr_pointer(xdrs, (char **)objp, sizeof(amq_mount_tree), xdr_amq_mount_tree)) {
251 		return (FALSE);
252 	}
253 	return (TRUE);
254 }
255 
256 
257 bool_t
258 xdr_amq_mount_stats(xdrs, objp)
259 	XDR *xdrs;
260 	amq_mount_stats *objp;
261 {
262 	if (!xdr_int(xdrs, &objp->as_drops)) {
263 		return (FALSE);
264 	}
265 	if (!xdr_int(xdrs, &objp->as_stale)) {
266 		return (FALSE);
267 	}
268 	if (!xdr_int(xdrs, &objp->as_mok)) {
269 		return (FALSE);
270 	}
271 	if (!xdr_int(xdrs, &objp->as_merr)) {
272 		return (FALSE);
273 	}
274 	if (!xdr_int(xdrs, &objp->as_uerr)) {
275 		return (FALSE);
276 	}
277 	return (TRUE);
278 }
279 
280 
281 bool_t
282 xdr_amq_mount_tree_list(xdrs, objp)
283 	XDR *xdrs;
284 	amq_mount_tree_list *objp;
285 {
286 	 if (!xdr_array(xdrs, (char **)&objp->amq_mount_tree_list_val, (u_int *)&objp->amq_mount_tree_list_len, ~0, sizeof(amq_mount_tree_p), xdr_amq_mount_tree_p)) {
287 		return (FALSE);
288 	}
289 	return (TRUE);
290 }
291 
292 xdr_amq_mount_info_qelem(xdrs, qhead)
293 	XDR *xdrs;
294 	qelem *qhead;
295 {
296 	/*
297 	 * Compute length of list
298 	 */
299 	mntfs *mf;
300 	u_int len = 0;
301 	for (mf = LAST(mntfs, qhead); mf != HEAD(mntfs, qhead); mf = PREV(mntfs, mf)) {
302 		if (!(mf->mf_ops->fs_flags & FS_AMQINFO))
303 			continue;
304 		len++;
305 	}
306 	xdr_u_int(xdrs, &len);
307 
308 	/*
309 	 * Send individual data items
310 	 */
311 	for (mf = LAST(mntfs, qhead); mf != HEAD(mntfs, qhead); mf = PREV(mntfs, mf)) {
312 		int up;
313 		if (!(mf->mf_ops->fs_flags & FS_AMQINFO))
314 			continue;
315 
316 		if (!xdr_amq_string(xdrs, &mf->mf_ops->fs_type)) {
317 			return (FALSE);
318 		}
319 		if (!xdr_amq_string(xdrs, &mf->mf_mount)) {
320 			return (FALSE);
321 		}
322 		if (!xdr_amq_string(xdrs, &mf->mf_info)) {
323 			return (FALSE);
324 		}
325 		if (!xdr_amq_string(xdrs, &mf->mf_server->fs_host)) {
326 			return (FALSE);
327 		}
328 		if (!xdr_int(xdrs, &mf->mf_error)) {
329 			return (FALSE);
330 		}
331 		if (!xdr_int(xdrs, &mf->mf_refc)) {
332 			return (FALSE);
333 		}
334 		if (mf->mf_server->fs_flags & FSF_ERROR)
335 			up = 0;
336 		else switch (mf->mf_server->fs_flags & (FSF_DOWN|FSF_VALID)) {
337 		case FSF_DOWN|FSF_VALID: up = 0; break;
338 		case FSF_VALID: up = 1; break;
339 		default: up = -1; break;
340 		}
341 		if (!xdr_int(xdrs, &up)) {
342 			return (FALSE);
343 		}
344 	}
345 	return (TRUE);
346 }
347