1 /* 2 * Copyright (c) 1990 Jan-Simon Pendry 3 * Copyright (c) 1990 Imperial College of Science, Technology & Medicine 4 * Copyright (c) 1990, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * Jan-Simon Pendry at Imperial College, London. 9 * 10 * %sccs.include.redist.c% 11 * 12 * @(#)amq_subr.c 8.1 (Berkeley) 06/06/93 13 * 14 * $Id: amq_subr.c,v 5.2.2.1 1992/02/09 15:08:18 jsp beta $ 15 * 16 */ 17 /* 18 * Auxilliary routines for amq tool 19 */ 20 21 #include "am.h" 22 #include "amq.h" 23 #include <ctype.h> 24 25 /*ARGSUSED*/ 26 voidp 27 amqproc_null_1(argp, rqstp) 28 voidp argp; 29 struct svc_req *rqstp; 30 { 31 static char res; 32 33 return (voidp) &res; 34 } 35 36 /* 37 * Return a sub-tree of mounts 38 */ 39 /*ARGSUSED*/ 40 amq_mount_tree_p * 41 amqproc_mnttree_1(argp, rqstp) 42 voidp argp; 43 struct svc_req *rqstp; 44 { 45 static am_node *mp; 46 mp = find_ap(*(char **) argp); 47 return (amq_mount_tree_p *) ∓ 48 } 49 50 /* 51 * Unmount a single node 52 */ 53 /*ARGSUSED*/ 54 voidp 55 amqproc_umnt_1(argp, rqstp) 56 voidp argp; 57 struct svc_req *rqstp; 58 { 59 static char res; 60 am_node *mp = find_ap(*(char **) argp); 61 if (mp) 62 forcibly_timeout_mp(mp); 63 64 return (voidp) &res; 65 } 66 67 /* 68 * Return global statistics 69 */ 70 /*ARGSUSED*/ 71 amq_mount_stats * 72 amqproc_stats_1(argp, rqstp) 73 voidp argp; 74 struct svc_req *rqstp; 75 { 76 return (amq_mount_stats *) &amd_stats; 77 } 78 79 /* 80 * Return the entire tree of mount nodes 81 */ 82 /*ARGSUSED*/ 83 amq_mount_tree_list * 84 amqproc_export_1(argp, rqstp) 85 voidp argp; 86 struct svc_req *rqstp; 87 { 88 static amq_mount_tree_list aml; 89 90 aml.amq_mount_tree_list_val = (amq_mount_tree_p *) &exported_ap[0]; 91 aml.amq_mount_tree_list_len = 1; /* XXX */ 92 93 return &aml; 94 } 95 96 int * 97 amqproc_setopt_1(argp, rqstp) 98 voidp argp; 99 struct svc_req *rqstp; 100 { 101 static int rc; 102 103 amq_setopt *opt = (amq_setopt *) argp; 104 105 rc = 0; 106 switch (opt->as_opt) { 107 case AMOPT_DEBUG: 108 #ifdef DEBUG 109 if (debug_option(opt->as_str)) 110 rc = EINVAL; 111 #else 112 rc = EINVAL; 113 #endif /* DEBUG */ 114 break; 115 116 case AMOPT_LOGFILE: 117 #ifdef not_yet 118 if (switch_to_logfile(opt->as_str)) 119 rc = EINVAL; 120 #else 121 rc = EACCES; 122 #endif /* not_yet */ 123 break; 124 125 case AMOPT_XLOG: 126 if (switch_option(opt->as_str)) 127 rc = EINVAL; 128 break; 129 130 case AMOPT_FLUSHMAPC: 131 if (amd_state == Run) { 132 plog(XLOG_INFO, "amq says flush cache"); 133 do_mapc_reload = 0; 134 flush_nfs_fhandle_cache((fserver *) 0); 135 flush_srvr_nfs_cache(); 136 } 137 break; 138 } 139 return &rc; 140 } 141 142 amq_mount_info_list * 143 amqproc_getmntfs_1(argp, rqstp) 144 voidp argp; 145 struct svc_req *rqstp; 146 { 147 extern qelem mfhead; 148 return (amq_mount_info_list *) &mfhead; /* XXX */ 149 } 150 151 static int ok_security(rqstp) 152 struct svc_req *rqstp; 153 { 154 struct sockaddr_in *sin; 155 156 sin = svc_getcaller(rqstp->rq_xprt); 157 if (ntohs(sin->sin_port) >= 1024 || 158 !(sin->sin_addr.s_addr == htonl(0x7f000001) || 159 sin->sin_addr.s_addr == myipaddr.s_addr)) { 160 char dq[20]; 161 plog(XLOG_INFO, "AMQ request from %s.%d DENIED", 162 inet_dquad(dq, sin->sin_addr.s_addr), 163 ntohs(sin->sin_port)); 164 return(0); 165 } 166 return(1); 167 } 168 169 int * 170 amqproc_mount_1(argp, rqstp) 171 voidp argp; 172 struct svc_req *rqstp; 173 { 174 static int rc; 175 char *s = *(amq_string *) argp; 176 char *cp; 177 178 plog(XLOG_INFO, "amq requested mount of %s", s); 179 /* 180 * Minimalist security check. 181 */ 182 if (!ok_security(rqstp)) { 183 rc = EACCES; 184 return &rc; 185 } 186 187 /* 188 * Find end of key 189 */ 190 for (cp = (char *) s; *cp&&(!isascii(*cp)||!isspace(*cp)); cp++) 191 ; 192 193 if (!*cp) { 194 plog(XLOG_INFO, "amqproc_mount: Invalid arguments"); 195 rc = EINVAL; 196 return &rc; 197 } 198 *cp++ = '\0'; 199 200 /* 201 * Find start of value 202 */ 203 while (*cp && isascii(*cp) && isspace(*cp)) 204 cp++; 205 206 root_newmap(s, cp, (char *) 0); 207 rc = mount_auto_node(s, (voidp) root_node); 208 if (rc < 0) 209 return 0; 210 return &rc; 211 } 212 213 amq_string * 214 amqproc_getvers_1(argp, rqstp) 215 voidp argp; 216 struct svc_req *rqstp; 217 { 218 static amq_string res; 219 res = version; 220 return &res; 221 } 222 223 /* 224 * XDR routines. 225 */ 226 bool_t 227 xdr_amq_string(xdrs, objp) 228 XDR *xdrs; 229 amq_string *objp; 230 { 231 if (!xdr_string(xdrs, objp, AMQ_STRLEN)) { 232 return (FALSE); 233 } 234 return (TRUE); 235 } 236 237 bool_t 238 xdr_amq_setopt(xdrs, objp) 239 XDR *xdrs; 240 amq_setopt *objp; 241 { 242 if (!xdr_enum(xdrs, (enum_t *)&objp->as_opt)) { 243 return (FALSE); 244 } 245 if (!xdr_string(xdrs, &objp->as_str, AMQ_STRLEN)) { 246 return (FALSE); 247 } 248 return (TRUE); 249 } 250 251 /* 252 * More XDR routines - Should be used for OUTPUT ONLY. 253 */ 254 bool_t 255 xdr_amq_mount_tree_node(xdrs, objp) 256 XDR *xdrs; 257 amq_mount_tree *objp; 258 { 259 am_node *mp = (am_node *) objp; 260 261 if (!xdr_amq_string(xdrs, &mp->am_mnt->mf_info)) { 262 return (FALSE); 263 } 264 if (!xdr_amq_string(xdrs, &mp->am_path)) { 265 return (FALSE); 266 } 267 if (!xdr_amq_string(xdrs, mp->am_link ? &mp->am_link : &mp->am_mnt->mf_mount)) { 268 return (FALSE); 269 } 270 if (!xdr_amq_string(xdrs, &mp->am_mnt->mf_ops->fs_type)) { 271 return (FALSE); 272 } 273 if (!xdr_long(xdrs, &mp->am_stats.s_mtime)) { 274 return (FALSE); 275 } 276 if (!xdr_u_short(xdrs, &mp->am_stats.s_uid)) { 277 return (FALSE); 278 } 279 if (!xdr_int(xdrs, &mp->am_stats.s_getattr)) { 280 return (FALSE); 281 } 282 if (!xdr_int(xdrs, &mp->am_stats.s_lookup)) { 283 return (FALSE); 284 } 285 if (!xdr_int(xdrs, &mp->am_stats.s_readdir)) { 286 return (FALSE); 287 } 288 if (!xdr_int(xdrs, &mp->am_stats.s_readlink)) { 289 return (FALSE); 290 } 291 if (!xdr_int(xdrs, &mp->am_stats.s_statfs)) { 292 return (FALSE); 293 } 294 return (TRUE); 295 } 296 297 bool_t 298 xdr_amq_mount_subtree(xdrs, objp) 299 XDR *xdrs; 300 amq_mount_tree *objp; 301 { 302 am_node *mp = (am_node *) objp; 303 304 if (!xdr_amq_mount_tree_node(xdrs, objp)) { 305 return (FALSE); 306 } 307 if (!xdr_pointer(xdrs, (char **)&mp->am_osib, sizeof(amq_mount_tree), xdr_amq_mount_subtree)) { 308 return (FALSE); 309 } 310 if (!xdr_pointer(xdrs, (char **)&mp->am_child, sizeof(amq_mount_tree), xdr_amq_mount_subtree)) { 311 return (FALSE); 312 } 313 return (TRUE); 314 } 315 316 bool_t 317 xdr_amq_mount_tree(xdrs, objp) 318 XDR *xdrs; 319 amq_mount_tree *objp; 320 { 321 am_node *mp = (am_node *) objp; 322 am_node *mnil = 0; 323 324 if (!xdr_amq_mount_tree_node(xdrs, objp)) { 325 return (FALSE); 326 } 327 if (!xdr_pointer(xdrs, (char **)&mnil, sizeof(amq_mount_tree), xdr_amq_mount_subtree)) { 328 return (FALSE); 329 } 330 if (!xdr_pointer(xdrs, (char **)&mp->am_child, sizeof(amq_mount_tree), xdr_amq_mount_subtree)) { 331 return (FALSE); 332 } 333 return (TRUE); 334 } 335 336 bool_t 337 xdr_amq_mount_tree_p(xdrs, objp) 338 XDR *xdrs; 339 amq_mount_tree_p *objp; 340 { 341 if (!xdr_pointer(xdrs, (char **)objp, sizeof(amq_mount_tree), xdr_amq_mount_tree)) { 342 return (FALSE); 343 } 344 return (TRUE); 345 } 346 347 348 bool_t 349 xdr_amq_mount_stats(xdrs, objp) 350 XDR *xdrs; 351 amq_mount_stats *objp; 352 { 353 if (!xdr_int(xdrs, &objp->as_drops)) { 354 return (FALSE); 355 } 356 if (!xdr_int(xdrs, &objp->as_stale)) { 357 return (FALSE); 358 } 359 if (!xdr_int(xdrs, &objp->as_mok)) { 360 return (FALSE); 361 } 362 if (!xdr_int(xdrs, &objp->as_merr)) { 363 return (FALSE); 364 } 365 if (!xdr_int(xdrs, &objp->as_uerr)) { 366 return (FALSE); 367 } 368 return (TRUE); 369 } 370 371 372 bool_t 373 xdr_amq_mount_tree_list(xdrs, objp) 374 XDR *xdrs; 375 amq_mount_tree_list *objp; 376 { 377 if (!xdr_array(xdrs, (char **)&objp->amq_mount_tree_list_val, (u_int *)&objp->amq_mount_tree_list_len, ~0, sizeof(amq_mount_tree_p), xdr_amq_mount_tree_p)) { 378 return (FALSE); 379 } 380 return (TRUE); 381 } 382 383 bool_t 384 xdr_amq_mount_info_qelem(xdrs, qhead) 385 XDR *xdrs; 386 qelem *qhead; 387 { 388 /* 389 * Compute length of list 390 */ 391 mntfs *mf; 392 u_int len = 0; 393 for (mf = LAST(mntfs, qhead); mf != HEAD(mntfs, qhead); mf = PREV(mntfs, mf)) { 394 if (!(mf->mf_ops->fs_flags & FS_AMQINFO)) 395 continue; 396 len++; 397 } 398 xdr_u_int(xdrs, &len); 399 400 /* 401 * Send individual data items 402 */ 403 for (mf = LAST(mntfs, qhead); mf != HEAD(mntfs, qhead); mf = PREV(mntfs, mf)) { 404 int up; 405 if (!(mf->mf_ops->fs_flags & FS_AMQINFO)) 406 continue; 407 408 if (!xdr_amq_string(xdrs, &mf->mf_ops->fs_type)) { 409 return (FALSE); 410 } 411 if (!xdr_amq_string(xdrs, &mf->mf_mount)) { 412 return (FALSE); 413 } 414 if (!xdr_amq_string(xdrs, &mf->mf_info)) { 415 return (FALSE); 416 } 417 if (!xdr_amq_string(xdrs, &mf->mf_server->fs_host)) { 418 return (FALSE); 419 } 420 if (!xdr_int(xdrs, &mf->mf_error)) { 421 return (FALSE); 422 } 423 if (!xdr_int(xdrs, &mf->mf_refc)) { 424 return (FALSE); 425 } 426 if (mf->mf_server->fs_flags & FSF_ERROR) 427 up = 0; 428 else switch (mf->mf_server->fs_flags & (FSF_DOWN|FSF_VALID)) { 429 case FSF_DOWN|FSF_VALID: up = 0; break; 430 case FSF_VALID: up = 1; break; 431 default: up = -1; break; 432 } 433 if (!xdr_int(xdrs, &up)) { 434 return (FALSE); 435 } 436 } 437 return (TRUE); 438 } 439