xref: /dragonfly/sys/vfs/autofs/autofs.c (revision c87dd536)
1 /*-
2  * Copyright (c) 2016 Tomohiro Kusumi <kusumi.tomohiro@gmail.com>
3  * Copyright (c) 2016 The DragonFly Project
4  * Copyright (c) 2014 The FreeBSD Foundation
5  * All rights reserved.
6  *
7  * This software was developed by Edward Tomasz Napierala under sponsorship
8  * from the FreeBSD Foundation.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  */
32 /*-
33  * Copyright (c) 1989, 1991, 1993, 1995
34  *	The Regents of the University of California.  All rights reserved.
35  *
36  * This code is derived from software contributed to Berkeley by
37  * Rick Macklem at The University of Guelph.
38  *
39  * Redistribution and use in source and binary forms, with or without
40  * modification, are permitted provided that the following conditions
41  * are met:
42  * 1. Redistributions of source code must retain the above copyright
43  *    notice, this list of conditions and the following disclaimer.
44  * 2. Redistributions in binary form must reproduce the above copyright
45  *    notice, this list of conditions and the following disclaimer in the
46  *    documentation and/or other materials provided with the distribution.
47  * 3. Neither the name of the University nor the names of its contributors
48  *    may be used to endorse or promote products derived from this software
49  *    without specific prior written permission.
50  *
51  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
52  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
55  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61  * SUCH DAMAGE.
62  *
63  */
64 
65 #include <sys/kernel.h>
66 #include <sys/module.h>
67 #include <sys/sysctl.h>
68 #include <sys/queue.h>
69 #include <sys/signalvar.h>
70 #include <sys/refcount.h>
71 #include <sys/spinlock2.h>
72 #include <sys/kern_syscall.h>
73 
74 #include "autofs.h"
75 #include "autofs_ioctl.h"
76 
77 MALLOC_DEFINE(M_AUTOFS, "autofs", "Automounter filesystem");
78 
79 struct objcache *autofs_request_objcache = NULL;
80 struct objcache *autofs_node_objcache = NULL;
81 
82 static d_open_t		autofs_open;
83 static d_close_t	autofs_close;
84 static d_ioctl_t	autofs_ioctl;
85 
86 struct dev_ops autofs_ops = {
87 	{ "autofs", 0, 0 },
88 	.d_open		= autofs_open,
89 	.d_close	= autofs_close,
90 	.d_ioctl	= autofs_ioctl,
91 };
92 
93 /*
94  * List of signals that can interrupt an autofs trigger.
95  */
96 static int autofs_sig_set[] = {
97 	SIGINT,
98 	SIGTERM,
99 	SIGHUP,
100 	SIGKILL,
101 	SIGQUIT
102 };
103 
104 struct autofs_softc	*autofs_softc = NULL;
105 
106 SYSCTL_NODE(_vfs, OID_AUTO, autofs, CTLFLAG_RD, 0, "Automounter filesystem");
107 int autofs_debug = 1;
108 TUNABLE_INT("vfs.autofs.debug", &autofs_debug);
109 SYSCTL_INT(_vfs_autofs, OID_AUTO, debug, CTLFLAG_RW,
110     &autofs_debug, 1, "Enable debug messages");
111 #if 0
112 int autofs_mount_on_stat = 0;
113 TUNABLE_INT("vfs.autofs.mount_on_stat", &autofs_mount_on_stat);
114 SYSCTL_INT(_vfs_autofs, OID_AUTO, mount_on_stat, CTLFLAG_RW,
115     &autofs_mount_on_stat, 0, "Trigger mount on stat(2) on mountpoint");
116 #endif
117 static int autofs_timeout = 30;
118 TUNABLE_INT("vfs.autofs.timeout", &autofs_timeout);
119 SYSCTL_INT(_vfs_autofs, OID_AUTO, timeout, CTLFLAG_RW,
120     &autofs_timeout, 30, "Number of seconds to wait for automountd(8)");
121 static int autofs_cache = 600;
122 TUNABLE_INT("vfs.autofs.cache", &autofs_cache);
123 SYSCTL_INT(_vfs_autofs, OID_AUTO, cache, CTLFLAG_RW,
124     &autofs_cache, 600, "Number of seconds to wait before reinvoking "
125     "automountd(8) for any given file or directory");
126 static int autofs_retry_attempts = 3;
127 TUNABLE_INT("vfs.autofs.retry_attempts", &autofs_retry_attempts);
128 SYSCTL_INT(_vfs_autofs, OID_AUTO, retry_attempts, CTLFLAG_RW,
129     &autofs_retry_attempts, 3, "Number of attempts before failing mount");
130 static int autofs_retry_delay = 1;
131 TUNABLE_INT("vfs.autofs.retry_delay", &autofs_retry_delay);
132 SYSCTL_INT(_vfs_autofs, OID_AUTO, retry_delay, CTLFLAG_RW,
133     &autofs_retry_delay, 1, "Number of seconds before retrying");
134 static int autofs_interruptible = 1;
135 TUNABLE_INT("vfs.autofs.interruptible", &autofs_interruptible);
136 SYSCTL_INT(_vfs_autofs, OID_AUTO, interruptible, CTLFLAG_RW,
137     &autofs_interruptible, 1, "Allow requests to be interrupted by signal");
138 
139 static __inline pid_t
140 proc_pgid(const struct proc *p)
141 {
142 	return (p->p_pgrp->pg_id);
143 }
144 
145 static int
146 autofs_node_cmp(const struct autofs_node *a, const struct autofs_node *b)
147 {
148 	return (strcmp(a->an_name, b->an_name));
149 }
150 
151 RB_GENERATE(autofs_node_tree, autofs_node, an_link, autofs_node_cmp);
152 
153 bool
154 autofs_ignore_thread(void)
155 {
156 	struct proc *curp = curproc;
157 
158 	if (autofs_softc->sc_dev_opened == false)
159 		return (false);
160 
161 	lwkt_gettoken(&curp->p_token);
162 	if (autofs_softc->sc_dev_sid == proc_pgid(curp)) {
163 		lwkt_reltoken(&curp->p_token);
164 		return (true);
165 	}
166 	lwkt_reltoken(&curp->p_token);
167 
168 	return (false);
169 }
170 
171 char *
172 autofs_path(struct autofs_node *anp)
173 {
174 	struct autofs_mount *amp = anp->an_mount;
175 	size_t len;
176 	char *path, *tmp;
177 
178 	path = kstrdup("", M_AUTOFS);
179 	for (; anp->an_parent != NULL; anp = anp->an_parent) {
180 		len = strlen(anp->an_name) + strlen(path) + 2;
181 		tmp = kmalloc(len, M_AUTOFS, M_WAITOK);
182 		ksnprintf(tmp, len, "%s/%s", anp->an_name, path);
183 		kfree(path, M_AUTOFS);
184 		path = tmp;
185 	}
186 
187 	len = strlen(amp->am_on) + strlen(path) + 2;
188 	tmp = kmalloc(len, M_AUTOFS, M_WAITOK);
189 	ksnprintf(tmp, len, "%s/%s", amp->am_on, path);
190 	kfree(path, M_AUTOFS);
191 	path = tmp;
192 
193 	return (path);
194 }
195 
196 static void
197 autofs_task(void *context, int pending)
198 {
199 	struct autofs_request *ar = context;
200 
201 	mtx_lock_ex_quick(&autofs_softc->sc_lock);
202 	AUTOFS_WARN("request %d for %s timed out after %d seconds",
203 	    ar->ar_id, ar->ar_path, autofs_timeout);
204 
205 	ar->ar_error = ETIMEDOUT;
206 	ar->ar_wildcards = true;
207 	ar->ar_done = true;
208 	ar->ar_in_progress = false;
209 	cv_broadcast(&autofs_softc->sc_cv);
210 	mtx_unlock_ex(&autofs_softc->sc_lock);
211 }
212 
213 bool
214 autofs_cached(struct autofs_node *anp, const char *component, int componentlen)
215 {
216 	struct autofs_mount *amp = anp->an_mount;
217 
218 	KKASSERT(mtx_notlocked(&amp->am_lock));
219 
220 	/*
221 	 * For root node we need to request automountd(8) assistance even
222 	 * if the node is marked as cached, but the requested top-level
223 	 * directory does not exist.  This is necessary for wildcard indirect
224 	 * map keys to work.  We don't do this if we know that there are
225 	 * no wildcards.
226 	 */
227 	if (anp->an_parent == NULL && componentlen != 0 && anp->an_wildcards) {
228 		int error;
229 		KKASSERT(amp->am_root == anp);
230 		mtx_lock_sh_quick(&amp->am_lock);
231 		error = autofs_node_find(anp, component, componentlen, NULL);
232 		mtx_unlock_sh(&amp->am_lock);
233 		if (error)
234 			return (false);
235 	}
236 
237 	return (anp->an_cached);
238 }
239 
240 static void
241 autofs_cache_callout(void *context)
242 {
243 	struct autofs_node *anp = context;
244 
245 	autofs_node_uncache(anp);
246 }
247 
248 void
249 autofs_flush(struct autofs_mount *amp)
250 {
251 	struct autofs_node *anp = amp->am_root;
252 	struct autofs_node *child;
253 
254 	mtx_lock_ex_quick(&amp->am_lock);
255 	RB_FOREACH(child, autofs_node_tree, &anp->an_children) {
256 		autofs_node_uncache(child);
257 	}
258 	autofs_node_uncache(amp->am_root);
259 	mtx_unlock_ex(&amp->am_lock);
260 
261 	AUTOFS_DEBUG("%s flushed", amp->am_on);
262 }
263 
264 /*
265  * The set/restore sigmask functions are used to (temporarily) overwrite
266  * the thread sigmask during triggering.
267  */
268 static void
269 autofs_set_sigmask(sigset_t *oldset)
270 {
271 	struct lwp *lp = curthread->td_lwp;
272 	sigset_t newset;
273 	int i;
274 
275 	SIGFILLSET(newset);
276 	/* Remove the autofs set of signals from newset */
277 	lwkt_gettoken(&lp->lwp_token);
278 	for (i = 0; i < nitems(autofs_sig_set); i++) {
279 		/*
280 		 * But make sure we leave the ones already masked
281 		 * by the process, i.e. remove the signal from the
282 		 * temporary signalmask only if it wasn't already
283 		 * in sigmask.
284 		 */
285 		if (!SIGISMEMBER(lp->lwp_sigmask, autofs_sig_set[i]) &&
286 		    !SIGISMEMBER(lp->lwp_proc->p_sigacts->ps_sigignore,
287 		    autofs_sig_set[i])) {
288 			SIGDELSET(newset, autofs_sig_set[i]);
289 		}
290 	}
291 	kern_sigprocmask(SIG_SETMASK, &newset, oldset);
292 	lwkt_reltoken(&lp->lwp_token);
293 }
294 
295 static void
296 autofs_restore_sigmask(sigset_t *set)
297 {
298 	kern_sigprocmask(SIG_SETMASK, set, NULL);
299 }
300 
301 static int
302 autofs_trigger_one(struct autofs_node *anp,
303     const char *component, int componentlen)
304 {
305 #define _taskqueue_thread (taskqueue_thread[mycpuid])
306 	struct autofs_mount *amp = anp->an_mount;
307 	struct autofs_request *ar;
308 	char *key, *path;
309 	int error = 0, request_error;
310 	bool wildcards;
311 
312 	KKASSERT(mtx_islocked_ex(&autofs_softc->sc_lock));
313 
314 	if (anp->an_parent == NULL) {
315 		key = kstrndup(component, componentlen, M_AUTOFS);
316 	} else {
317 		struct autofs_node *firstanp;
318 		for (firstanp = anp; firstanp->an_parent->an_parent != NULL;
319 		    firstanp = firstanp->an_parent)
320 			continue;
321 		key = kstrdup(firstanp->an_name, M_AUTOFS);
322 	}
323 
324 	path = autofs_path(anp);
325 
326 	TAILQ_FOREACH(ar, &autofs_softc->sc_requests, ar_next) {
327 		if (strcmp(ar->ar_path, path))
328 			continue;
329 		if (strcmp(ar->ar_key, key))
330 			continue;
331 
332 		KASSERT(strcmp(ar->ar_from, amp->am_from) == 0,
333 		    ("from changed; %s != %s", ar->ar_from, amp->am_from));
334 		KASSERT(strcmp(ar->ar_prefix, amp->am_prefix) == 0,
335 		    ("prefix changed; %s != %s",
336 		     ar->ar_prefix, amp->am_prefix));
337 		KASSERT(strcmp(ar->ar_options, amp->am_options) == 0,
338 		    ("options changed; %s != %s",
339 		     ar->ar_options, amp->am_options));
340 		break;
341 	}
342 
343 	if (ar != NULL) {
344 		refcount_acquire(&ar->ar_refcount);
345 	} else {
346 		/*
347 		 * All struct fields must be initialized.
348 		 */
349 		ar = objcache_get(autofs_request_objcache, M_WAITOK);
350 		ar->ar_mount = amp;
351 		ar->ar_id = autofs_softc->sc_last_request_id++;
352 		ar->ar_done = false;
353 		ar->ar_error = 0;
354 		ar->ar_wildcards = false;
355 		ar->ar_in_progress = false;
356 		strlcpy(ar->ar_from, amp->am_from, sizeof(ar->ar_from));
357 		strlcpy(ar->ar_path, path, sizeof(ar->ar_path));
358 		strlcpy(ar->ar_prefix, amp->am_prefix, sizeof(ar->ar_prefix));
359 		strlcpy(ar->ar_key, key, sizeof(ar->ar_key));
360 		strlcpy(ar->ar_options,
361 		    amp->am_options, sizeof(ar->ar_options));
362 		TIMEOUT_TASK_INIT(_taskqueue_thread, &ar->ar_task, 0,
363 		    autofs_task, ar);
364 		taskqueue_enqueue_timeout(_taskqueue_thread, &ar->ar_task,
365 		    autofs_timeout * hz);
366 		refcount_init(&ar->ar_refcount, 1);
367 		TAILQ_INSERT_TAIL(&autofs_softc->sc_requests, ar, ar_next);
368 	}
369 
370 	cv_broadcast(&autofs_softc->sc_cv);
371 	while (ar->ar_done == false) {
372 		if (autofs_interruptible) {
373 			sigset_t oldset;
374 			autofs_set_sigmask(&oldset);
375 			error = cv_mtx_wait_sig(&autofs_softc->sc_cv,
376 			    &autofs_softc->sc_lock);
377 			autofs_restore_sigmask(&oldset);
378 			if (error) {
379 				AUTOFS_WARN("cv_mtx_wait_sig for %s failed "
380 				    "with error %d", ar->ar_path, error);
381 				break;
382 			}
383 		} else {
384 			cv_mtx_wait(&autofs_softc->sc_cv,
385 			    &autofs_softc->sc_lock);
386 		}
387 	}
388 
389 	request_error = ar->ar_error;
390 	if (request_error)
391 		AUTOFS_WARN("request for %s completed with error %d",
392 		    ar->ar_path, request_error);
393 
394 	wildcards = ar->ar_wildcards;
395 
396 	/*
397 	 * Check if this is the last reference.
398 	 */
399 	if (refcount_release(&ar->ar_refcount)) {
400 		TAILQ_REMOVE(&autofs_softc->sc_requests, ar, ar_next);
401 		mtx_unlock_ex(&autofs_softc->sc_lock);
402 		taskqueue_cancel_timeout(_taskqueue_thread, &ar->ar_task, NULL);
403 		taskqueue_drain_timeout(_taskqueue_thread, &ar->ar_task);
404 		objcache_put(autofs_request_objcache, ar);
405 		mtx_lock_ex_quick(&autofs_softc->sc_lock);
406 	}
407 
408 	/*
409 	 * Note that we do not do negative caching on purpose.  This
410 	 * way the user can retry access at any time, e.g. after fixing
411 	 * the failure reason, without waiting for cache timer to expire.
412 	 */
413 	if (error == 0 && request_error == 0 && autofs_cache > 0) {
414 		autofs_node_cache(anp);
415 		anp->an_wildcards = wildcards;
416 		callout_reset(&anp->an_callout, autofs_cache * hz,
417 		    autofs_cache_callout, anp);
418 	}
419 
420 	kfree(key, M_AUTOFS);
421 	kfree(path, M_AUTOFS);
422 
423 	if (error)
424 		return (error);
425 	return (request_error);
426 }
427 
428 int
429 autofs_trigger(struct autofs_node *anp,
430     const char *component, int componentlen)
431 {
432 	for (;;) {
433 		int error, dummy;
434 
435 		error = autofs_trigger_one(anp, component, componentlen);
436 		if (error == 0) {
437 			anp->an_retries = 0;
438 			return (0);
439 		}
440 		if (error == EINTR || error == ERESTART) {
441 			AUTOFS_DEBUG("trigger interrupted by signal, "
442 			    "not retrying");
443 			anp->an_retries = 0;
444 			return (error);
445 		}
446 		anp->an_retries++;
447 		if (anp->an_retries >= autofs_retry_attempts) {
448 			AUTOFS_DEBUG("trigger failed %d times; returning "
449 			    "error %d", anp->an_retries, error);
450 			anp->an_retries = 0;
451 			return (error);
452 
453 		}
454 		AUTOFS_DEBUG("trigger failed with error %d; will retry in "
455 		    "%d seconds, %d attempts left", error, autofs_retry_delay,
456 		    autofs_retry_attempts - anp->an_retries);
457 		mtx_unlock_ex(&autofs_softc->sc_lock);
458 		tsleep(&dummy, 0, "autofs_retry", autofs_retry_delay * hz);
459 		mtx_lock_ex_quick(&autofs_softc->sc_lock);
460 	}
461 }
462 
463 static int
464 autofs_ioctl_request(struct autofs_daemon_request *adr)
465 {
466 	struct proc *curp = curproc;
467 	struct autofs_request *ar;
468 
469 	mtx_lock_ex_quick(&autofs_softc->sc_lock);
470 	for (;;) {
471 		int error;
472 		TAILQ_FOREACH(ar, &autofs_softc->sc_requests, ar_next) {
473 			if (ar->ar_done)
474 				continue;
475 			if (ar->ar_in_progress)
476 				continue;
477 			break;
478 		}
479 
480 		if (ar != NULL)
481 			break;
482 
483 		error = cv_mtx_wait_sig(&autofs_softc->sc_cv,
484 		    &autofs_softc->sc_lock);
485 		if (error) {
486 			mtx_unlock_ex(&autofs_softc->sc_lock);
487 			return (error);
488 		}
489 	}
490 
491 	ar->ar_in_progress = true;
492 
493 	adr->adr_id = ar->ar_id;
494 	strlcpy(adr->adr_from, ar->ar_from, sizeof(adr->adr_from));
495 	strlcpy(adr->adr_path, ar->ar_path, sizeof(adr->adr_path));
496 	strlcpy(adr->adr_prefix, ar->ar_prefix, sizeof(adr->adr_prefix));
497 	strlcpy(adr->adr_key, ar->ar_key, sizeof(adr->adr_key));
498 	strlcpy(adr->adr_options, ar->ar_options, sizeof(adr->adr_options));
499 
500 	mtx_unlock_ex(&autofs_softc->sc_lock);
501 
502 	lwkt_gettoken(&curp->p_token);
503 	autofs_softc->sc_dev_sid = proc_pgid(curp);
504 	lwkt_reltoken(&curp->p_token);
505 
506 	return (0);
507 }
508 
509 static int
510 autofs_ioctl_done(struct autofs_daemon_done *add)
511 {
512 	struct autofs_request *ar;
513 
514 	mtx_lock_ex_quick(&autofs_softc->sc_lock);
515 	TAILQ_FOREACH(ar, &autofs_softc->sc_requests, ar_next) {
516 		if (ar->ar_id == add->add_id)
517 			break;
518 	}
519 
520 	if (ar == NULL) {
521 		mtx_unlock_ex(&autofs_softc->sc_lock);
522 		AUTOFS_DEBUG("id %d not found", add->add_id);
523 		return (ESRCH);
524 	}
525 
526 	ar->ar_error = add->add_error;
527 	ar->ar_wildcards = add->add_wildcards;
528 	ar->ar_done = true;
529 	ar->ar_in_progress = false;
530 	cv_broadcast(&autofs_softc->sc_cv);
531 
532 	mtx_unlock_ex(&autofs_softc->sc_lock);
533 
534 	return (0);
535 }
536 
537 static int
538 autofs_open(struct dev_open_args *ap)
539 {
540 	mtx_lock_ex_quick(&autofs_softc->sc_lock);
541 	/*
542 	 * We must never block automountd(8) and its descendants, and we use
543 	 * session ID to determine that: we store session id of the process
544 	 * that opened the device, and then compare it with session ids
545 	 * of triggering processes.  This means running a second automountd(8)
546 	 * instance would break the previous one.  The check below prevents
547 	 * it from happening.
548 	 */
549 	if (autofs_softc->sc_dev_opened) {
550 		mtx_unlock_ex(&autofs_softc->sc_lock);
551 		return (EBUSY);
552 	}
553 
554 	autofs_softc->sc_dev_opened = true;
555 	mtx_unlock_ex(&autofs_softc->sc_lock);
556 
557 	return (0);
558 }
559 
560 static int
561 autofs_close(struct dev_close_args *ap)
562 {
563 	mtx_lock_ex_quick(&autofs_softc->sc_lock);
564 	KASSERT(autofs_softc->sc_dev_opened, ("not opened?"));
565 	autofs_softc->sc_dev_opened = false;
566 	mtx_unlock_ex(&autofs_softc->sc_lock);
567 
568 	return (0);
569 }
570 
571 static int
572 autofs_ioctl(struct dev_ioctl_args *ap)
573 {
574 	u_long cmd = ap->a_cmd;
575 	void *arg = ap->a_data;
576 
577 	KASSERT(autofs_softc->sc_dev_opened, ("not opened?"));
578 
579 	switch (cmd) {
580 	case AUTOFSREQUEST:
581 		return (autofs_ioctl_request(
582 		    (struct autofs_daemon_request *)arg));
583 	case AUTOFSDONE:
584 		return (autofs_ioctl_done(
585 		    (struct autofs_daemon_done *)arg));
586 	default:
587 		AUTOFS_DEBUG("invalid cmd %lx", cmd);
588 		return (EINVAL);
589 	}
590 	return (EINVAL);
591 }
592 
593 int
594 _cv_mtx_timedwait(struct cv *c, struct mtx *mtx, int timo, int wakesig)
595 {
596 	int flags = wakesig ? PCATCH : 0;
597 	int error;
598 
599 	/*
600 	 * Can interlock without critical section/spinlock as long
601 	 * as we don't block before calling *sleep().  PINTERLOCKED
602 	 * must be passed to the *sleep() to use the manual interlock
603 	 * (else a new one is created which opens a timing race).
604 	 */
605 	tsleep_interlock(c, flags);
606 
607 	spin_lock(&c->cv_lock);
608 	c->cv_waiters++;
609 	spin_unlock(&c->cv_lock);
610 
611 	if (mtx)
612 		error = mtxsleep(c, mtx, flags | PINTERLOCKED, c->cv_desc,
613 		    timo);
614 	else
615 		error = tsleep(c, flags | PINTERLOCKED, c->cv_desc, timo);
616 
617 	return (error);
618 }
619