xref: /dragonfly/sys/vfs/autofs/autofs.c (revision 54368b50)
1 /*-
2  * Copyright (c) 2016 Tomohiro Kusumi <tkusumi@netbsd.org>
3  * Copyright (c) 2016 The DragonFly Project
4  * Copyright (c) 2014 The FreeBSD Foundation
5  * All rights reserved.
6  *
7  * This software was developed by Edward Tomasz Napierala under sponsorship
8  * from the FreeBSD Foundation.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  */
32 /*-
33  * Copyright (c) 1989, 1991, 1993, 1995
34  *	The Regents of the University of California.  All rights reserved.
35  *
36  * This code is derived from software contributed to Berkeley by
37  * Rick Macklem at The University of Guelph.
38  *
39  * Redistribution and use in source and binary forms, with or without
40  * modification, are permitted provided that the following conditions
41  * are met:
42  * 1. Redistributions of source code must retain the above copyright
43  *    notice, this list of conditions and the following disclaimer.
44  * 2. Redistributions in binary form must reproduce the above copyright
45  *    notice, this list of conditions and the following disclaimer in the
46  *    documentation and/or other materials provided with the distribution.
47  * 3. Neither the name of the University nor the names of its contributors
48  *    may be used to endorse or promote products derived from this software
49  *    without specific prior written permission.
50  *
51  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
52  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
55  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61  * SUCH DAMAGE.
62  *
63  */
64 
65 #include <sys/kernel.h>
66 #include <sys/module.h>
67 #include <sys/sysctl.h>
68 #include <sys/queue.h>
69 #include <sys/signalvar.h>
70 #include <sys/refcount.h>
71 #include <sys/spinlock2.h>
72 #include <sys/kern_syscall.h>
73 
74 #include "autofs.h"
75 #include "autofs_ioctl.h"
76 
77 MALLOC_DEFINE(M_AUTOFS, "autofs", "Automounter filesystem");
78 
79 struct objcache *autofs_request_objcache = NULL;
80 struct objcache *autofs_node_objcache = NULL;
81 
82 static d_open_t		autofs_open;
83 static d_close_t	autofs_close;
84 static d_ioctl_t	autofs_ioctl;
85 
86 struct dev_ops autofs_ops = {
87 	{ "autofs", 0, D_MPSAFE },
88 	.d_open		= autofs_open,
89 	.d_close	= autofs_close,
90 	.d_ioctl	= autofs_ioctl,
91 };
92 
93 /*
94  * List of signals that can interrupt an autofs trigger.
95  */
96 static int autofs_sig_set[] = {
97 	SIGINT,
98 	SIGTERM,
99 	SIGHUP,
100 	SIGKILL,
101 	SIGQUIT
102 };
103 
104 struct autofs_softc	*autofs_softc = NULL;
105 
106 SYSCTL_NODE(_vfs, OID_AUTO, autofs, CTLFLAG_RD, 0, "Automounter filesystem");
107 int autofs_debug = 1;
108 TUNABLE_INT("vfs.autofs.debug", &autofs_debug);
109 SYSCTL_INT(_vfs_autofs, OID_AUTO, debug, CTLFLAG_RW, &autofs_debug, 1,
110     "Enable debug messages");
111 #if 0
112 int autofs_mount_on_stat = 0;
113 TUNABLE_INT("vfs.autofs.mount_on_stat", &autofs_mount_on_stat);
114 SYSCTL_INT(_vfs_autofs, OID_AUTO, mount_on_stat, CTLFLAG_RW,
115     &autofs_mount_on_stat, 0, "Trigger mount on stat(2) on mountpoint");
116 #endif
117 static int autofs_timeout = 30;
118 TUNABLE_INT("vfs.autofs.timeout", &autofs_timeout);
119 SYSCTL_INT(_vfs_autofs, OID_AUTO, timeout, CTLFLAG_RW, &autofs_timeout, 30,
120     "Number of seconds to wait for automountd(8)");
121 static int autofs_cache = 600;
122 TUNABLE_INT("vfs.autofs.cache", &autofs_cache);
123 SYSCTL_INT(_vfs_autofs, OID_AUTO, cache, CTLFLAG_RW, &autofs_cache, 600,
124     "Number of seconds to wait before reinvoking automountd(8) for any given "
125     "file or directory");
126 static int autofs_retry_attempts = 3;
127 TUNABLE_INT("vfs.autofs.retry_attempts", &autofs_retry_attempts);
128 SYSCTL_INT(_vfs_autofs, OID_AUTO, retry_attempts, CTLFLAG_RW,
129     &autofs_retry_attempts, 3, "Number of attempts before failing mount");
130 static int autofs_retry_delay = 1;
131 TUNABLE_INT("vfs.autofs.retry_delay", &autofs_retry_delay);
132 SYSCTL_INT(_vfs_autofs, OID_AUTO, retry_delay, CTLFLAG_RW, &autofs_retry_delay,
133     1, "Number of seconds before retrying");
134 static int autofs_interruptible = 1;
135 TUNABLE_INT("vfs.autofs.interruptible", &autofs_interruptible);
136 SYSCTL_INT(_vfs_autofs, OID_AUTO, interruptible, CTLFLAG_RW,
137     &autofs_interruptible, 1, "Allow requests to be interrupted by signal");
138 
139 static __inline pid_t
proc_pgid(const struct proc * p)140 proc_pgid(const struct proc *p)
141 {
142 	return (p->p_pgrp->pg_id);
143 }
144 
145 static int
autofs_node_cmp(const struct autofs_node * a,const struct autofs_node * b)146 autofs_node_cmp(const struct autofs_node *a, const struct autofs_node *b)
147 {
148 	return (strcmp(a->an_name, b->an_name));
149 }
150 
151 RB_GENERATE(autofs_node_tree, autofs_node, an_link, autofs_node_cmp);
152 
153 bool
autofs_ignore_thread(void)154 autofs_ignore_thread(void)
155 {
156 	struct proc *curp = curproc;
157 
158 	if (autofs_softc->sc_dev_opened == false)
159 		return (false);
160 
161 	lwkt_gettoken(&curp->p_token);
162 	if (autofs_softc->sc_dev_sid == proc_pgid(curp)) {
163 		lwkt_reltoken(&curp->p_token);
164 		return (true);
165 	}
166 	lwkt_reltoken(&curp->p_token);
167 
168 	return (false);
169 }
170 
171 char *
autofs_path(struct autofs_node * anp)172 autofs_path(struct autofs_node *anp)
173 {
174 	struct autofs_mount *amp = anp->an_mount;
175 	size_t len;
176 	char *path, *tmp;
177 
178 	path = kstrdup("", M_AUTOFS);
179 	for (; anp->an_parent != NULL; anp = anp->an_parent) {
180 		len = strlen(anp->an_name) + strlen(path) + 2;
181 		tmp = kmalloc(len, M_AUTOFS, M_WAITOK);
182 		ksnprintf(tmp, len, "%s/%s", anp->an_name, path);
183 		kfree(path, M_AUTOFS);
184 		path = tmp;
185 	}
186 
187 	len = strlen(amp->am_on) + strlen(path) + 2;
188 	tmp = kmalloc(len, M_AUTOFS, M_WAITOK);
189 	ksnprintf(tmp, len, "%s/%s", amp->am_on, path);
190 	kfree(path, M_AUTOFS);
191 	path = tmp;
192 
193 	return (path);
194 }
195 
196 static void
autofs_task(void * context,int pending)197 autofs_task(void *context, int pending)
198 {
199 	struct autofs_request *ar = context;
200 
201 	mtx_lock_ex_quick(&autofs_softc->sc_lock);
202 	AUTOFS_WARN("request %d for %s timed out after %d seconds",
203 	    ar->ar_id, ar->ar_path, autofs_timeout);
204 
205 	ar->ar_error = ETIMEDOUT;
206 	ar->ar_wildcards = true;
207 	ar->ar_done = true;
208 	ar->ar_in_progress = false;
209 	cv_broadcast(&autofs_softc->sc_cv);
210 	mtx_unlock_ex(&autofs_softc->sc_lock);
211 }
212 
213 bool
autofs_cached(struct autofs_node * anp,const char * component,int componentlen)214 autofs_cached(struct autofs_node *anp, const char *component, int componentlen)
215 {
216 	struct autofs_mount *amp = anp->an_mount;
217 
218 	KKASSERT(mtx_notlocked(&amp->am_lock));
219 
220 	/*
221 	 * For root node we need to request automountd(8) assistance even
222 	 * if the node is marked as cached, but the requested top-level
223 	 * directory does not exist.  This is necessary for wildcard indirect
224 	 * map keys to work.  We don't do this if we know that there are
225 	 * no wildcards.
226 	 */
227 	if (anp->an_parent == NULL && componentlen != 0 && anp->an_wildcards) {
228 		int error;
229 		KKASSERT(amp->am_root == anp);
230 		mtx_lock_sh_quick(&amp->am_lock);
231 		error = autofs_node_find(anp, component, componentlen, NULL);
232 		mtx_unlock_sh(&amp->am_lock);
233 		if (error)
234 			return (false);
235 	}
236 
237 	return (anp->an_cached);
238 }
239 
240 static void
autofs_cache_callout(void * context)241 autofs_cache_callout(void *context)
242 {
243 	struct autofs_node *anp = context;
244 
245 	autofs_node_uncache(anp);
246 }
247 
248 void
autofs_flush(struct autofs_mount * amp)249 autofs_flush(struct autofs_mount *amp)
250 {
251 	struct autofs_node *anp = amp->am_root;
252 	struct autofs_node *child;
253 
254 	mtx_lock_ex_quick(&amp->am_lock);
255 	RB_FOREACH(child, autofs_node_tree, &anp->an_children)
256 		autofs_node_uncache(child);
257 	autofs_node_uncache(amp->am_root);
258 	mtx_unlock_ex(&amp->am_lock);
259 
260 	AUTOFS_DEBUG("%s flushed", amp->am_on);
261 }
262 
263 /*
264  * The set/restore sigmask functions are used to (temporarily) overwrite
265  * the thread sigmask during triggering.
266  */
267 static void
autofs_set_sigmask(sigset_t * oldset)268 autofs_set_sigmask(sigset_t *oldset)
269 {
270 	struct lwp *lp = curthread->td_lwp;
271 	sigset_t newset;
272 	int i;
273 
274 	SIGFILLSET(newset);
275 	/* Remove the autofs set of signals from newset */
276 	lwkt_gettoken(&lp->lwp_token);
277 	for (i = 0; i < nitems(autofs_sig_set); i++) {
278 		/*
279 		 * But make sure we leave the ones already masked
280 		 * by the process, i.e. remove the signal from the
281 		 * temporary signalmask only if it wasn't already
282 		 * in sigmask.
283 		 */
284 		if (!SIGISMEMBER(lp->lwp_sigmask, autofs_sig_set[i]) &&
285 		    !SIGISMEMBER(lp->lwp_proc->p_sigacts->ps_sigignore,
286 		    autofs_sig_set[i]))
287 			SIGDELSET(newset, autofs_sig_set[i]);
288 	}
289 	kern_sigprocmask(SIG_SETMASK, &newset, oldset);
290 	lwkt_reltoken(&lp->lwp_token);
291 }
292 
293 static void
autofs_restore_sigmask(sigset_t * set)294 autofs_restore_sigmask(sigset_t *set)
295 {
296 	kern_sigprocmask(SIG_SETMASK, set, NULL);
297 }
298 
299 static int
autofs_trigger_one(struct autofs_node * anp,const char * component,int componentlen)300 autofs_trigger_one(struct autofs_node *anp, const char *component,
301     int componentlen)
302 {
303 #define _taskqueue_thread (taskqueue_thread[mycpuid])
304 	struct autofs_mount *amp = anp->an_mount;
305 	struct autofs_request *ar;
306 	char *key, *path;
307 	int error = 0, request_error;
308 	bool wildcards;
309 
310 	KKASSERT(mtx_islocked_ex(&autofs_softc->sc_lock));
311 
312 	if (anp->an_parent == NULL) {
313 		key = kstrndup(component, componentlen, M_AUTOFS);
314 	} else {
315 		struct autofs_node *firstanp;
316 		for (firstanp = anp; firstanp->an_parent->an_parent != NULL;
317 		    firstanp = firstanp->an_parent)
318 			continue;
319 		key = kstrdup(firstanp->an_name, M_AUTOFS);
320 	}
321 
322 	path = autofs_path(anp);
323 
324 	TAILQ_FOREACH(ar, &autofs_softc->sc_requests, ar_next) {
325 		if (strcmp(ar->ar_path, path) || strcmp(ar->ar_key, key))
326 			continue;
327 		KASSERT(strcmp(ar->ar_from, amp->am_from) == 0,
328 		    ("from changed; %s != %s", ar->ar_from, amp->am_from));
329 		KASSERT(strcmp(ar->ar_prefix, amp->am_prefix) == 0,
330 		    ("prefix changed; %s != %s",
331 		     ar->ar_prefix, amp->am_prefix));
332 		KASSERT(strcmp(ar->ar_options, amp->am_options) == 0,
333 		    ("options changed; %s != %s",
334 		     ar->ar_options, amp->am_options));
335 		break;
336 	}
337 
338 	if (ar != NULL) {
339 		refcount_acquire(&ar->ar_refcount);
340 	} else {
341 		/*
342 		 * All struct fields must be initialized.
343 		 */
344 		ar = objcache_get(autofs_request_objcache, M_WAITOK);
345 		ar->ar_mount = amp;
346 		ar->ar_id = autofs_softc->sc_last_request_id++;
347 		ar->ar_done = false;
348 		ar->ar_error = 0;
349 		ar->ar_wildcards = false;
350 		ar->ar_in_progress = false;
351 		strlcpy(ar->ar_from, amp->am_from, sizeof(ar->ar_from));
352 		strlcpy(ar->ar_path, path, sizeof(ar->ar_path));
353 		strlcpy(ar->ar_prefix, amp->am_prefix, sizeof(ar->ar_prefix));
354 		strlcpy(ar->ar_key, key, sizeof(ar->ar_key));
355 		strlcpy(ar->ar_options, amp->am_options,
356 		    sizeof(ar->ar_options));
357 		TIMEOUT_TASK_INIT(_taskqueue_thread, &ar->ar_task, 0,
358 		    autofs_task, ar);
359 		taskqueue_enqueue_timeout(_taskqueue_thread, &ar->ar_task,
360 		    autofs_timeout * hz);
361 		refcount_init(&ar->ar_refcount, 1);
362 		TAILQ_INSERT_TAIL(&autofs_softc->sc_requests, ar, ar_next);
363 	}
364 
365 	cv_broadcast(&autofs_softc->sc_cv);
366 	while (ar->ar_done == false) {
367 		if (autofs_interruptible) {
368 			sigset_t oldset;
369 			autofs_set_sigmask(&oldset);
370 			error = cv_mtx_wait_sig(&autofs_softc->sc_cv,
371 			    &autofs_softc->sc_lock);
372 			autofs_restore_sigmask(&oldset);
373 			if (error) {
374 				AUTOFS_WARN("cv_mtx_wait_sig for %s failed "
375 				    "with error %d", ar->ar_path, error);
376 				break;
377 			}
378 		} else {
379 			cv_mtx_wait(&autofs_softc->sc_cv,
380 			    &autofs_softc->sc_lock);
381 		}
382 	}
383 
384 	request_error = ar->ar_error;
385 	if (request_error)
386 		AUTOFS_WARN("request for %s completed with error %d, "
387 		    "pid %d (%s)", ar->ar_path, request_error,
388 		    curproc->p_pid, curproc->p_comm);
389 
390 	wildcards = ar->ar_wildcards;
391 
392 	/*
393 	 * Check if this is the last reference.
394 	 */
395 	if (refcount_release(&ar->ar_refcount)) {
396 		TAILQ_REMOVE(&autofs_softc->sc_requests, ar, ar_next);
397 		mtx_unlock_ex(&autofs_softc->sc_lock);
398 		taskqueue_cancel_timeout(_taskqueue_thread, &ar->ar_task, NULL);
399 		taskqueue_drain_timeout(_taskqueue_thread, &ar->ar_task);
400 		objcache_put(autofs_request_objcache, ar);
401 		mtx_lock_ex_quick(&autofs_softc->sc_lock);
402 	}
403 
404 	/*
405 	 * Note that we do not do negative caching on purpose.  This
406 	 * way the user can retry access at any time, e.g. after fixing
407 	 * the failure reason, without waiting for cache timer to expire.
408 	 */
409 	if (error == 0 && request_error == 0 && autofs_cache > 0) {
410 		autofs_node_cache(anp);
411 		anp->an_wildcards = wildcards;
412 		callout_reset(&anp->an_callout, autofs_cache * hz,
413 		    autofs_cache_callout, anp);
414 	}
415 
416 	kfree(key, M_AUTOFS);
417 	kfree(path, M_AUTOFS);
418 
419 	if (error)
420 		return (error);
421 	return (request_error);
422 }
423 
424 int
autofs_trigger(struct autofs_node * anp,const char * component,int componentlen)425 autofs_trigger(struct autofs_node *anp, const char *component, int componentlen)
426 {
427 	for (;;) {
428 		int error, dummy;
429 
430 		error = autofs_trigger_one(anp, component, componentlen);
431 		if (error == 0) {
432 			anp->an_retries = 0;
433 			return (0);
434 		}
435 		if (error == EINTR || error == ERESTART) {
436 			AUTOFS_DEBUG("trigger interrupted by signal, "
437 			    "not retrying");
438 			anp->an_retries = 0;
439 			return (error);
440 		}
441 		anp->an_retries++;
442 		if (anp->an_retries >= autofs_retry_attempts) {
443 			AUTOFS_DEBUG("trigger failed %d times; returning "
444 			    "error %d", anp->an_retries, error);
445 			anp->an_retries = 0;
446 			return (error);
447 		}
448 		AUTOFS_DEBUG("trigger failed with error %d; will retry in "
449 		    "%d seconds, %d attempts left", error, autofs_retry_delay,
450 		    autofs_retry_attempts - anp->an_retries);
451 		mtx_unlock_ex(&autofs_softc->sc_lock);
452 		tsleep(&dummy, 0, "autofs_retry", autofs_retry_delay * hz);
453 		mtx_lock_ex_quick(&autofs_softc->sc_lock);
454 	}
455 }
456 
457 static int
autofs_ioctl_request(struct autofs_daemon_request * adr)458 autofs_ioctl_request(struct autofs_daemon_request *adr)
459 {
460 	struct proc *curp = curproc;
461 	struct autofs_request *ar;
462 
463 	mtx_lock_ex_quick(&autofs_softc->sc_lock);
464 	for (;;) {
465 		int error;
466 		TAILQ_FOREACH(ar, &autofs_softc->sc_requests, ar_next) {
467 			if (ar->ar_done || ar->ar_in_progress)
468 				continue;
469 			break;
470 		}
471 		if (ar != NULL)
472 			break; /* found (!done && !in_progress) */
473 
474 		error = cv_mtx_wait_sig(&autofs_softc->sc_cv,
475 		    &autofs_softc->sc_lock);
476 		if (error) {
477 			mtx_unlock_ex(&autofs_softc->sc_lock);
478 			return (error);
479 		}
480 	}
481 
482 	ar->ar_in_progress = true;
483 
484 	adr->adr_id = ar->ar_id;
485 	strlcpy(adr->adr_from, ar->ar_from, sizeof(adr->adr_from));
486 	strlcpy(adr->adr_path, ar->ar_path, sizeof(adr->adr_path));
487 	strlcpy(adr->adr_prefix, ar->ar_prefix, sizeof(adr->adr_prefix));
488 	strlcpy(adr->adr_key, ar->ar_key, sizeof(adr->adr_key));
489 	strlcpy(adr->adr_options, ar->ar_options, sizeof(adr->adr_options));
490 
491 	mtx_unlock_ex(&autofs_softc->sc_lock);
492 
493 	lwkt_gettoken(&curp->p_token);
494 	autofs_softc->sc_dev_sid = proc_pgid(curp);
495 	lwkt_reltoken(&curp->p_token);
496 
497 	return (0);
498 }
499 
500 static int
autofs_ioctl_done(struct autofs_daemon_done * add)501 autofs_ioctl_done(struct autofs_daemon_done *add)
502 {
503 	struct autofs_request *ar;
504 
505 	mtx_lock_ex_quick(&autofs_softc->sc_lock);
506 	TAILQ_FOREACH(ar, &autofs_softc->sc_requests, ar_next)
507 		if (ar->ar_id == add->add_id)
508 			break;
509 
510 	if (ar == NULL) {
511 		mtx_unlock_ex(&autofs_softc->sc_lock);
512 		AUTOFS_DEBUG("id %d not found", add->add_id);
513 		return (ESRCH);
514 	}
515 
516 	ar->ar_error = add->add_error;
517 	ar->ar_wildcards = add->add_wildcards;
518 	ar->ar_done = true;
519 	ar->ar_in_progress = false;
520 	cv_broadcast(&autofs_softc->sc_cv);
521 
522 	mtx_unlock_ex(&autofs_softc->sc_lock);
523 
524 	return (0);
525 }
526 
527 static int
autofs_open(struct dev_open_args * ap)528 autofs_open(struct dev_open_args *ap)
529 {
530 	mtx_lock_ex_quick(&autofs_softc->sc_lock);
531 	/*
532 	 * We must never block automountd(8) and its descendants, and we use
533 	 * session ID to determine that: we store session id of the process
534 	 * that opened the device, and then compare it with session ids
535 	 * of triggering processes.  This means running a second automountd(8)
536 	 * instance would break the previous one.  The check below prevents
537 	 * it from happening.
538 	 */
539 	if (autofs_softc->sc_dev_opened) {
540 		mtx_unlock_ex(&autofs_softc->sc_lock);
541 		return (EBUSY);
542 	}
543 
544 	autofs_softc->sc_dev_opened = true;
545 	mtx_unlock_ex(&autofs_softc->sc_lock);
546 
547 	return (0);
548 }
549 
550 static int
autofs_close(struct dev_close_args * ap)551 autofs_close(struct dev_close_args *ap)
552 {
553 	mtx_lock_ex_quick(&autofs_softc->sc_lock);
554 	KASSERT(autofs_softc->sc_dev_opened, ("not opened?"));
555 	autofs_softc->sc_dev_opened = false;
556 	mtx_unlock_ex(&autofs_softc->sc_lock);
557 
558 	return (0);
559 }
560 
561 static int
autofs_ioctl(struct dev_ioctl_args * ap)562 autofs_ioctl(struct dev_ioctl_args *ap)
563 {
564 	unsigned long cmd = ap->a_cmd;
565 	void *arg = ap->a_data;
566 
567 	KASSERT(autofs_softc->sc_dev_opened, ("not opened?"));
568 
569 	switch (cmd) {
570 	case AUTOFSREQUEST:
571 		return (autofs_ioctl_request(
572 		    (struct autofs_daemon_request *)arg));
573 	case AUTOFSDONE:
574 		return (autofs_ioctl_done((struct autofs_daemon_done *)arg));
575 	default:
576 		AUTOFS_DEBUG("invalid cmd %lx", cmd);
577 		return (EINVAL);
578 	}
579 	return (EINVAL);
580 }
581