1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  *	syseventd - The system event daemon
31  *
32  *		This daemon dispatches event buffers received from the
33  *		kernel to all interested SLM clients.  SLMs in turn
34  *		deliver the buffers to their particular application
35  *		clients.
36  */
37 #include <stdio.h>
38 #include <sys/types.h>
39 #include <dirent.h>
40 #include <stdarg.h>
41 #include <stddef.h>
42 #include <stdlib.h>
43 #include <dlfcn.h>
44 #include <door.h>
45 #include <errno.h>
46 #include <fcntl.h>
47 #include <signal.h>
48 #include <strings.h>
49 #include <unistd.h>
50 #include <synch.h>
51 #include <syslog.h>
52 #include <thread.h>
53 #include <libsysevent.h>
54 #include <limits.h>
55 #include <locale.h>
56 #include <sys/sysevent.h>
57 #include <sys/sysevent_impl.h>
58 #include <sys/modctl.h>
59 #include <sys/stat.h>
60 #include <sys/systeminfo.h>
61 #include <sys/wait.h>
62 
63 #include "sysevent_signal.h"
64 #include "syseventd.h"
65 #include "message.h"
66 
67 extern int insert_client(void *client, int client_type, int retry_limit);
68 extern void delete_client(int id);
69 extern void initialize_client_tbl(void);
70 
71 extern struct sysevent_client *sysevent_client_tbl[];
72 extern mutex_t client_tbl_lock;
73 
74 #define	DEBUG_LEVEL_FORK	9	/* will run in background at all */
75 					/* levels less than DEBUG_LEVEL_FORK */
76 
77 int debug_level = 0;
78 char *root_dir = "";			/* Relative root for lock and door */
79 
80 /* Maximum number of outstanding events dispatched */
81 #define	SE_EVENT_DISPATCH_CNT	100
82 
83 static int upcall_door;			/* Kernel event door */
84 static int door_upcall_retval;		/* Kernel event posting return value */
85 static int fini_pending = 0;		/* fini pending flag */
86 static int deliver_buf = 0;		/* Current event buffer from kernel */
87 static int dispatch_buf = 0;		/* Current event buffer dispatched */
88 static sysevent_t **eventbuf; 		/* Global array of event buffers */
89 static struct ev_completion *event_compq;	/* Event completion queue */
90 static mutex_t ev_comp_lock;		/* Event completion queue lock */
91 static mutex_t err_mutex;		/* error logging lock */
92 static mutex_t door_lock;		/* sync door return access */
93 static rwlock_t mod_unload_lock;		/* sync module unloading */
94 
95 /* declarations and definitions for avoiding multiple daemons running */
96 #define	DAEMON_LOCK_FILE "/etc/sysevent/syseventd_lock"
97 char local_lock_file[PATH_MAX + 1];
98 static int hold_daemon_lock;
99 static int daemon_lock_fd;
100 
101 /*
102  * sema_eventbuf - guards against the global buffer eventbuf
103  *	being written to before it has been dispatched to clients
104  *
105  * sema_dispatch - synchronizes between the kernel uploading thread
106  *	(producer) and the userland dispatch_message thread (consumer).
107  *
108  * sema_resource - throttles outstanding event consumption.
109  *
110  * event_comp_cv - synchronizes threads waiting for the event completion queue
111  *			to empty or become active.
112  */
113 static sema_t sema_eventbuf, sema_dispatch, sema_resource;
114 static cond_t event_comp_cv;
115 
116 /* Self-tuning concurrency level */
117 #define	MIN_CONCURRENCY_LEVEL	4
118 static int concurrency_level = MIN_CONCURRENCY_LEVEL;
119 
120 
121 /* SLM defines */
122 #define	MODULE_SUFFIX	".so"
123 #define	EVENT_FINI	"slm_fini"
124 #define	EVENT_INIT	"slm_init"
125 
126 #define	SE_TIMEOUT	60	/* Client dispatch timeout (seconds) */
127 
128 /* syslog message related */
129 static int logflag = 0;
130 static char *prog;
131 
132 /* function prototypes */
133 static void door_upcall(void *cookie, char *args, size_t alen, door_desc_t *ddp,
134 	uint_t ndid);
135 static void dispatch_message(void);
136 static int dispatch(void);
137 static void event_completion_thr(void);
138 static void usage(void);
139 
140 static void syseventd_init(void);
141 static void syseventd_fini(int sig);
142 
143 static pid_t enter_daemon_lock(void);
144 static void exit_daemon_lock(void);
145 
146 static void
147 usage() {
148 	(void) fprintf(stderr, "usage: syseventd [-d <debug_level>] "
149 	    "[-r <root_dir>]\n");
150 	(void) fprintf(stderr, "higher debug levels get progressively ");
151 	(void) fprintf(stderr, "more detailed debug information.\n");
152 	(void) fprintf(stderr, "syseventd will run in background if ");
153 	(void) fprintf(stderr, "run with a debug_level less than %d.\n",
154 	    DEBUG_LEVEL_FORK);
155 	exit(2);
156 }
157 
158 
159 /* common exit function which ensures releasing locks */
160 void
161 syseventd_exit(int status)
162 {
163 	syseventd_print(1, "exit status = %d\n", status);
164 
165 	if (hold_daemon_lock) {
166 		exit_daemon_lock();
167 	}
168 
169 	exit(status);
170 }
171 
172 
173 /*
174  * hup_handler - SIGHUP handler.  SIGHUP is used to force a reload of
175  *		 all SLMs.  During fini, events are drained from all
176  *		 client event queues.  The events that have been consumed
177  *		 by all clients are freed from the kernel event queue.
178  *
179  *		 Events that have not yet been delivered to all clients
180  *		 are not freed and will be replayed after all SLMs have
181  *		 been (re)loaded.
182  *
183  *		 After all client event queues have been drained, each
184  *		 SLM client is unloaded.  The init phase will (re)load
185  *		 each SLM and initiate event replay and delivery from
186  *		 the kernel.
187  *
188  */
189 /*ARGSUSED*/
190 static void
191 hup_handler(int sig)
192 {
193 	syseventd_err_print(SIGHUP_CAUGHT);
194 	(void) fflush(0);
195 	syseventd_fini(sig);
196 	syseventd_init();
197 	syseventd_err_print(DAEMON_RESTARTED);
198 	(void) fflush(0);
199 }
200 
201 /*
202  * Fault handler for other signals caught
203  */
204 /*ARGSUSED*/
205 static void
206 flt_handler(int sig)
207 {
208 	char signame[SIG2STR_MAX];
209 
210 	if (sig2str(sig, signame) == -1) {
211 		syseventd_err_print(UNKNOWN_SIGNAL_CAUGHT, sig);
212 	}
213 
214 	(void) se_signal_sethandler(sig, SIG_DFL, NULL);
215 
216 	switch (sig) {
217 		case SIGINT:
218 		case SIGSTOP:
219 		case SIGTERM:
220 			/* Close kernel door */
221 			(void) door_revoke(upcall_door);
222 
223 			/* Gracefully exit current event delivery threads */
224 			syseventd_fini(sig);
225 
226 			(void) fflush(0);
227 			(void) se_signal_unblockall();
228 			syseventd_exit(1);
229 			/*NOTREACHED*/
230 		default:
231 			syseventd_err_print(FATAL_ERROR);
232 			(void) fflush(0);
233 
234 	}
235 }
236 
237 /*
238  * Daemon parent process only.
239  * Child process signal to indicate successful daemon initialization.
240  * This is the normal and expected exit path of the daemon parent.
241  */
242 /*ARGSUSED*/
243 static void
244 sigusr1(int sig)
245 {
246 	syseventd_exit(0);
247 }
248 
249 static void
250 sigwait_thr()
251 {
252 	int	sig;
253 	int	err;
254 	sigset_t signal_set;
255 
256 	for (;;) {
257 		syseventd_print(3, "sigwait thread waiting for signal\n");
258 		(void) sigfillset(&signal_set);
259 		err = sigwait(&signal_set, &sig);
260 		if (err) {
261 			syseventd_exit(2);
262 		}
263 
264 		/*
265 		 * Block all signals until the signal handler completes
266 		 */
267 		if (sig == SIGHUP) {
268 			hup_handler(sig);
269 		} else {
270 			flt_handler(sig);
271 		}
272 	}
273 	/* NOTREACHED */
274 }
275 
276 static void
277 set_root_dir(char *dir)
278 {
279 	root_dir = malloc(strlen(dir) + 1);
280 	if (root_dir == NULL) {
281 		syseventd_err_print(INIT_ROOT_DIR_ERR, strerror(errno));
282 		syseventd_exit(2);
283 	}
284 	(void) strcpy(root_dir, dir);
285 }
286 
287 int
288 main(int argc, char **argv)
289 {
290 	int i, c;
291 	int fd;
292 	pid_t pid;
293 	int has_forked = 0;
294 	extern char *optarg;
295 
296 	(void) setlocale(LC_ALL, "");
297 	(void) textdomain(TEXT_DOMAIN);
298 
299 	if (getuid() != 0) {
300 		(void) fprintf(stderr, "Must be root to run syseventd\n");
301 		syseventd_exit(1);
302 	}
303 
304 	if (argc > 5) {
305 		usage();
306 	}
307 
308 	if ((prog = strrchr(argv[0], '/')) == NULL) {
309 		prog = argv[0];
310 	} else {
311 		prog++;
312 	}
313 
314 	if ((c = getopt(argc, argv, "d:r:")) != EOF) {
315 		switch (c) {
316 		case 'd':
317 			debug_level = atoi(optarg);
318 			break;
319 		case 'r':
320 			/*
321 			 * Private flag for suninstall to run
322 			 * daemon during install.
323 			 */
324 			set_root_dir(optarg);
325 			break;
326 		case '?':
327 		default:
328 			usage();
329 		}
330 	}
331 
332 	/* demonize ourselves */
333 	if (debug_level < DEBUG_LEVEL_FORK) {
334 
335 		sigset_t mask;
336 
337 		(void) sigset(SIGUSR1, sigusr1);
338 
339 		(void) sigemptyset(&mask);
340 		(void) sigaddset(&mask, SIGUSR1);
341 		(void) sigprocmask(SIG_BLOCK, &mask, NULL);
342 
343 		if ((pid = fork()) == (pid_t)-1) {
344 			(void) fprintf(stderr,
345 			    "syseventd: fork failed - %s\n", strerror(errno));
346 			syseventd_exit(1);
347 		}
348 
349 		if (pid != 0) {
350 			/*
351 			 * parent
352 			 * handshake with the daemon so that dependents
353 			 * of the syseventd service don't start up until
354 			 * the service is actually functional
355 			 */
356 			int status;
357 			(void) sigprocmask(SIG_UNBLOCK, &mask, NULL);
358 
359 			if (waitpid(pid, &status, 0) != pid) {
360 				/*
361 				 * child process signal indicating
362 				 * successful daemon initialization
363 				 */
364 				syseventd_exit(0);
365 			}
366 			/* child exited implying unsuccessful startup */
367 			syseventd_exit(1);
368 		}
369 
370 		/* child */
371 
372 		has_forked = 1;
373 		(void) sigset(SIGUSR1, SIG_DFL);
374 		(void) sigprocmask(SIG_UNBLOCK, &mask, NULL);
375 
376 		(void) chdir("/");
377 		(void) setsid();
378 		if (debug_level <= 1) {
379 			closefrom(0);
380 			fd = open("/dev/null", 0);
381 			(void) dup2(fd, 1);
382 			(void) dup2(fd, 2);
383 			logflag = 1;
384 		}
385 	}
386 
387 	openlog("syseventd", LOG_PID, LOG_DAEMON);
388 
389 	(void) mutex_init(&err_mutex, USYNC_THREAD, NULL);
390 
391 	syseventd_print(8,
392 	    "syseventd started, debug level = %d\n", debug_level);
393 
394 	/* only one instance of syseventd can run at a time */
395 	if ((pid = enter_daemon_lock()) != getpid()) {
396 		syseventd_print(1,
397 		    "event daemon pid %ld already running\n", pid);
398 		exit(3);
399 	}
400 
401 	/* initialize semaphores and eventbuf */
402 	(void) sema_init(&sema_eventbuf, SE_EVENT_DISPATCH_CNT,
403 	    USYNC_THREAD, NULL);
404 	(void) sema_init(&sema_dispatch, 0, USYNC_THREAD, NULL);
405 	(void) sema_init(&sema_resource, SE_EVENT_DISPATCH_CNT,
406 	    USYNC_THREAD, NULL);
407 	(void) cond_init(&event_comp_cv, USYNC_THREAD, NULL);
408 	eventbuf = (sysevent_t **)calloc(SE_EVENT_DISPATCH_CNT,
409 	    sizeof (sysevent_t *));
410 	if (eventbuf == NULL) {
411 		syseventd_print(1, "Unable to allocate event buffer array\n");
412 		exit(2);
413 	}
414 	for (i = 0; i < SE_EVENT_DISPATCH_CNT; ++i) {
415 		eventbuf[i] = malloc(LOGEVENT_BUFSIZE);
416 		if (eventbuf[i] == NULL) {
417 			syseventd_print(1, "Unable to allocate event "
418 			    "buffers\n");
419 			exit(2);
420 		}
421 	}
422 
423 	(void) mutex_init(&client_tbl_lock, USYNC_THREAD, NULL);
424 	(void) mutex_init(&ev_comp_lock, USYNC_THREAD, NULL);
425 	(void) mutex_init(&door_lock, USYNC_THREAD, NULL);
426 	(void) rwlock_init(&mod_unload_lock, USYNC_THREAD, NULL);
427 
428 	event_compq = NULL;
429 
430 	syseventd_print(8, "start the message thread running\n");
431 
432 	/*
433 	 * Block all signals to all threads include the main thread.
434 	 * The sigwait_thr thread will process any signals and initiate
435 	 * a graceful recovery if possible.
436 	 */
437 	if (se_signal_blockall() < 0) {
438 		syseventd_err_print(INIT_SIG_BLOCK_ERR);
439 		syseventd_exit(2);
440 	}
441 
442 	if (thr_create(NULL, NULL, (void *(*)(void *))dispatch_message,
443 	    (void *)0, 0, NULL) < 0) {
444 		syseventd_err_print(INIT_THR_CREATE_ERR, strerror(errno));
445 		syseventd_exit(2);
446 	}
447 	if (thr_create(NULL, NULL,
448 	    (void *(*)(void *))event_completion_thr, NULL,
449 	    THR_BOUND, NULL) != 0) {
450 		syseventd_err_print(INIT_THR_CREATE_ERR, strerror(errno));
451 		syseventd_exit(2);
452 	}
453 	/* Create signal catching thread */
454 	if (thr_create(NULL, NULL, (void *(*)(void *))sigwait_thr,
455 	    NULL, 0, NULL) < 0) {
456 		syseventd_err_print(INIT_THR_CREATE_ERR, strerror(errno));
457 		syseventd_exit(2);
458 	}
459 
460 	setbuf(stdout, (char *)NULL);
461 
462 	/* Initialize and load SLM clients */
463 	initialize_client_tbl();
464 	syseventd_init();
465 
466 	/* signal parent to indicate successful daemon initialization */
467 	if (has_forked) {
468 		if (kill(getppid(), SIGUSR1) != 0) {
469 			syseventd_err_print(
470 			    "signal to the parent failed - %s\n",
471 			    strerror(errno));
472 			syseventd_exit(2);
473 		}
474 	}
475 
476 	syseventd_print(8, "Pausing\n");
477 
478 	for (;;) {
479 		(void) pause();
480 	}
481 	/* NOTREACHED */
482 	return (0);
483 }
484 
485 /*
486  * door_upcall - called from the kernel via kernel sysevent door
487  *		to upload event(s).
488  *
489  *		This routine should never block.  If resources are
490  *		not available to immediately accept the event buffer
491  *		EAGAIN is returned to the kernel.
492  *
493  *		Once resources are available, the kernel is notified
494  *		via a modctl interface to resume event delivery to
495  *		syseventd.
496  *
497  */
498 /*ARGSUSED*/
499 static void
500 door_upcall(void *cookie, char *args, size_t alen,
501     door_desc_t *ddp, uint_t ndid)
502 {
503 	sysevent_t *ev;
504 	int rval;
505 
506 
507 	(void) mutex_lock(&door_lock);
508 	if (args == NULL) {
509 		rval = EINVAL;
510 	} else if (sema_trywait(&sema_eventbuf)) {
511 		ev = (sysevent_t *)
512 		    &((log_event_upcall_arg_t *)(void *)args)->buf;
513 		syseventd_print(2, "door_upcall: busy event %llx "
514 		    "retry\n", sysevent_get_seq(ev));
515 		rval = door_upcall_retval = EAGAIN;
516 	} else {
517 		/*
518 		 * Copy received message to local buffer.
519 		 */
520 		size_t size;
521 		ev = (sysevent_t *)
522 		    &((log_event_upcall_arg_t *)(void *)args)->buf;
523 
524 		syseventd_print(2, "door_upcall: event %llx in eventbuf %d\n",
525 		    sysevent_get_seq(ev), deliver_buf);
526 		size = sysevent_get_size(ev) > LOGEVENT_BUFSIZE ?
527 		    LOGEVENT_BUFSIZE : sysevent_get_size(ev);
528 		(void) bcopy(ev, eventbuf[deliver_buf], size);
529 		deliver_buf = (deliver_buf + 1) % SE_EVENT_DISPATCH_CNT;
530 		rval = 0;
531 		(void) sema_post(&sema_dispatch);
532 	}
533 
534 	(void) mutex_unlock(&door_lock);
535 
536 	/*
537 	 * Filling in return values for door_return
538 	 */
539 	(void) door_return((void *)&rval, sizeof (rval), NULL, 0);
540 	(void) door_return(NULL, 0, NULL, 0);
541 }
542 
543 /*
544  * dispatch_message - dispatch message thread
545  *			This thread spins until an event buffer is delivered
546  *			delivered from the kernel.
547  *
548  *			It will wait to dispatch an event to any clients
549  *			until adequate resources are available to process
550  *			the event buffer.
551  */
552 static void
553 dispatch_message(void)
554 {
555 	int error;
556 
557 	for (;;) {
558 		syseventd_print(3, "dispatch_message: thread started\n");
559 		/*
560 		 * Spin till a message comes
561 		 */
562 		while (sema_wait(&sema_dispatch) != 0) {
563 			syseventd_print(1,
564 			    "dispatch_message: sema_wait failed\n");
565 			(void) sleep(1);
566 		}
567 
568 		syseventd_print(3, "dispatch_message: sema_dispatch\n");
569 
570 		/*
571 		 * Wait for available resources
572 		 */
573 		while (sema_wait(&sema_resource) != 0) {
574 			syseventd_print(1, "dispatch_message: sema_wait "
575 			    "failed\n");
576 			(void) sleep(1);
577 		}
578 
579 		syseventd_print(2, "dispatch_message: eventbuf %d\n",
580 		    dispatch_buf);
581 
582 		/*
583 		 * Client dispatch
584 		 */
585 		do {
586 			error = dispatch();
587 		} while (error == EAGAIN);
588 
589 		syseventd_print(2, "eventbuf %d dispatched\n", dispatch_buf);
590 		dispatch_buf = (dispatch_buf + 1) % SE_EVENT_DISPATCH_CNT;
591 
592 		/*
593 		 * kernel received a busy signal -
594 		 * kickstart the kernel delivery thread
595 		 * door_lock blocks the kernel so we hold it for the
596 		 * shortest time possible.
597 		 */
598 		(void) mutex_lock(&door_lock);
599 		if (door_upcall_retval == EAGAIN && !fini_pending) {
600 			syseventd_print(3, "dispatch_message: retrigger "
601 			    "door_upcall_retval = %d\n",
602 			    door_upcall_retval);
603 			(void) modctl(MODEVENTS, (uintptr_t)MODEVENTS_FLUSH,
604 			    NULL, NULL, NULL, 0);
605 			door_upcall_retval = 0;
606 		}
607 		(void) mutex_unlock(&door_lock);
608 	}
609 	/* NOTREACHED */
610 }
611 
612 /*
613  * drain_eventq - Called to drain all pending events from the client's
614  *		event queue.
615  */
616 static void
617 drain_eventq(struct sysevent_client *scp, int status)
618 {
619 	struct event_dispatch_pkg *d_pkg;
620 	struct event_dispatchq *eventq, *eventq_next;
621 
622 	syseventd_print(3, "Draining eventq for client %d\n",
623 	    scp->client_num);
624 
625 	eventq = scp->eventq;
626 	while (eventq) {
627 		/*
628 		 * Mark all dispatched events as completed, but indicate the
629 		 * error status
630 		 */
631 		d_pkg = eventq->d_pkg;
632 
633 		syseventd_print(4, "drain event 0X%llx for client %d\n",
634 		    sysevent_get_seq(d_pkg->ev), scp->client_num);
635 
636 		if (d_pkg->completion_state == SE_NOT_DISPATCHED) {
637 			d_pkg->completion_status = status;
638 			d_pkg->completion_state = SE_COMPLETE;
639 			(void) sema_post(d_pkg->completion_sema);
640 		}
641 
642 		eventq_next = eventq->next;
643 		free(eventq);
644 		eventq = eventq_next;
645 		scp->eventq = eventq;
646 	}
647 }
648 
649 /*
650  * client_deliver_event_thr - Client delivery thread
651  *				This thread will process any events on this
652  *				client's eventq.
653  */
654 static void
655 client_deliver_event_thr(void *arg)
656 {
657 	int flag, error, i;
658 	sysevent_t *ev;
659 	hrtime_t now;
660 	module_t *mod;
661 	struct event_dispatchq *eventq;
662 	struct sysevent_client *scp;
663 	struct event_dispatch_pkg *d_pkg;
664 
665 	scp = (struct sysevent_client *)arg;
666 	mod = (module_t *)scp->client_data;
667 
668 	(void) mutex_lock(&scp->client_lock);
669 	for (;;) {
670 		while (scp->eventq == NULL) {
671 
672 			/*
673 			 * Client has been suspended or unloaded, go no further.
674 			 */
675 			if (fini_pending) {
676 				scp->client_flags &= ~SE_CLIENT_THR_RUNNING;
677 				syseventd_print(3, "Client %d delivery thread "
678 				    "exiting flags: 0X%x\n",
679 				    scp->client_num, scp->client_flags);
680 				(void) mutex_unlock(&scp->client_lock);
681 				return;
682 			}
683 
684 			(void) cond_wait(&scp->client_cv, &scp->client_lock);
685 
686 		}
687 
688 		/*
689 		 * Process events from the head of the eventq, eventq is locked
690 		 * going into the processing.
691 		 */
692 		eventq = scp->eventq;
693 		while (eventq != NULL) {
694 			d_pkg = eventq->d_pkg;
695 			d_pkg->completion_state = SE_OUTSTANDING;
696 			(void) mutex_unlock(&scp->client_lock);
697 
698 
699 			flag = error = 0;
700 			ev = d_pkg->ev;
701 
702 			syseventd_print(3, "Start delivery for client %d "
703 			    "with retry count %d\n",
704 			    scp->client_num, d_pkg->retry_count);
705 
706 			/*
707 			 * Retry limit has been reached by this client, indicate
708 			 * that no further retries are allowed
709 			 */
710 			for (i = 0; i <= scp->retry_limit; ++i) {
711 				if (i == scp->retry_limit)
712 					flag = SE_NO_RETRY;
713 
714 				/* Start the clock for the event delivery */
715 				d_pkg->start_time = gethrtime();
716 
717 				syseventd_print(9, "Deliver to module client "
718 				    "%s\n", mod->name);
719 
720 				error = mod->deliver_event(ev, flag);
721 
722 				/* Can not allow another retry */
723 				if (i == scp->retry_limit)
724 					error = 0;
725 
726 				/* Stop the clock */
727 				now = gethrtime();
728 
729 				/*
730 				 * Suspend event processing and drain the
731 				 * event q for latent clients
732 				 */
733 				if (now - d_pkg->start_time >
734 				    ((hrtime_t)SE_TIMEOUT * NANOSEC)) {
735 					syseventd_print(1, "Unresponsive "
736 					    "client %d: Draining eventq and "
737 					    "suspending event delivery\n",
738 					    scp->client_num);
739 					(void) mutex_lock(&scp->client_lock);
740 					scp->client_flags &=
741 					    ~SE_CLIENT_THR_RUNNING;
742 					scp->client_flags |=
743 					    SE_CLIENT_SUSPENDED;
744 
745 					/* Cleanup current event */
746 					d_pkg->completion_status = EFAULT;
747 					d_pkg->completion_state = SE_COMPLETE;
748 					(void) sema_post(
749 					    d_pkg->completion_sema);
750 
751 					/*
752 					 * Drain the remaining events from the
753 					 * queue.
754 					 */
755 					drain_eventq(scp, EINVAL);
756 					(void) mutex_unlock(&scp->client_lock);
757 					return;
758 				}
759 
760 				/* Event delivery retry requested */
761 				if (fini_pending || error != EAGAIN) {
762 					break;
763 				} else {
764 					(void) sleep(SE_RETRY_TIME);
765 				}
766 			}
767 
768 			(void) mutex_lock(&scp->client_lock);
769 			d_pkg->completion_status = error;
770 			d_pkg->completion_state = SE_COMPLETE;
771 			(void) sema_post(d_pkg->completion_sema);
772 
773 			/* Update eventq pointer */
774 			if (scp->eventq != NULL) {
775 				scp->eventq = eventq->next;
776 				free(eventq);
777 				eventq = scp->eventq;
778 			} else {
779 				free(eventq);
780 				break;
781 			}
782 
783 			syseventd_print(3, "Completed delivery with "
784 			    "error %d\n", error);
785 		}
786 
787 		syseventd_print(3, "No more events to process for client %d\n",
788 		    scp->client_num);
789 
790 		/* Return if this was a synchronous delivery */
791 		if (!SE_CLIENT_IS_THR_RUNNING(scp)) {
792 			(void) mutex_unlock(&scp->client_lock);
793 			return;
794 		}
795 
796 	}
797 }
798 
799 /*
800  * client_deliver_event - Client specific event delivery
801  *			This routine will allocate and initialize the
802  *			neccessary per-client dispatch data.
803  *
804  *			If the eventq is not empty, it may be assumed that
805  *			a delivery thread exists for this client and the
806  *			dispatch data is appended to the eventq.
807  *
808  *			The dispatch package is freed by the event completion
809  *			thread (event_completion_thr) and the eventq entry
810  *			is freed by the event delivery thread.
811  */
812 static struct event_dispatch_pkg *
813 client_deliver_event(struct sysevent_client *scp, sysevent_t *ev,
814 	sema_t *completion_sema)
815 {
816 	size_t ev_sz = sysevent_get_size(ev);
817 	struct event_dispatchq *newq, *tmp;
818 	struct event_dispatch_pkg *d_pkg;
819 
820 	syseventd_print(3, "client_deliver_event: id 0x%llx size %d\n",
821 	    (longlong_t)sysevent_get_seq(ev), ev_sz);
822 	if (debug_level == 9) {
823 		se_print(stdout, ev);
824 	}
825 
826 	/*
827 	 * Check for suspended client
828 	 */
829 	(void) mutex_lock(&scp->client_lock);
830 	if (SE_CLIENT_IS_SUSPENDED(scp) || !SE_CLIENT_IS_THR_RUNNING(scp)) {
831 		(void) mutex_unlock(&scp->client_lock);
832 		return (NULL);
833 	}
834 
835 	/*
836 	 * Allocate a new dispatch package and eventq entry
837 	 */
838 	newq = (struct event_dispatchq *)malloc(
839 	    sizeof (struct event_dispatchq));
840 	if (newq == NULL) {
841 		(void) mutex_unlock(&scp->client_lock);
842 		return (NULL);
843 	}
844 
845 	d_pkg = (struct event_dispatch_pkg *)malloc(
846 	    sizeof (struct event_dispatch_pkg));
847 	if (d_pkg == NULL) {
848 		free(newq);
849 		(void) mutex_unlock(&scp->client_lock);
850 		return (NULL);
851 	}
852 
853 	/* Initialize the dispatch package */
854 	d_pkg->scp = scp;
855 	d_pkg->retry_count = 0;
856 	d_pkg->completion_status = 0;
857 	d_pkg->completion_state = SE_NOT_DISPATCHED;
858 	d_pkg->completion_sema = completion_sema;
859 	d_pkg->ev = ev;
860 	newq->d_pkg = d_pkg;
861 	newq->next = NULL;
862 
863 	if (scp->eventq != NULL) {
864 
865 		/* Add entry to the end of the eventq */
866 		tmp = scp->eventq;
867 		while (tmp->next != NULL)
868 			tmp = tmp->next;
869 		tmp->next = newq;
870 	} else {
871 		/* event queue empty, wakeup delivery thread */
872 		scp->eventq = newq;
873 		(void) cond_signal(&scp->client_cv);
874 	}
875 	(void) mutex_unlock(&scp->client_lock);
876 
877 	return (d_pkg);
878 }
879 
880 /*
881  * event_completion_thr - Event completion thread.  This thread routine
882  *			waits for all client delivery thread to complete
883  *			delivery of a particular event.
884  */
885 static void
886 event_completion_thr()
887 {
888 	int ret, i, client_count, ok_to_free;
889 	sysevent_id_t eid;
890 	struct sysevent_client *scp;
891 	struct ev_completion *ev_comp;
892 	struct event_dispatchq *dispatchq;
893 	struct event_dispatch_pkg *d_pkg;
894 
895 	(void) mutex_lock(&ev_comp_lock);
896 	for (;;) {
897 		while (event_compq == NULL) {
898 			(void) cond_wait(&event_comp_cv, &ev_comp_lock);
899 		}
900 
901 		/*
902 		 * Process event completions from the head of the
903 		 * completion queue
904 		 */
905 		ev_comp = event_compq;
906 		while (ev_comp) {
907 			(void) mutex_unlock(&ev_comp_lock);
908 			eid.eid_seq = sysevent_get_seq(ev_comp->ev);
909 			sysevent_get_time(ev_comp->ev, &eid.eid_ts);
910 			client_count = ev_comp->client_count;
911 			ok_to_free = 1;
912 
913 			syseventd_print(3, "Wait for event completion of "
914 			    "event 0X%llx on %d clients\n",
915 			    eid.eid_seq, client_count);
916 
917 			while (client_count) {
918 				syseventd_print(9, "Waiting for %d clients on "
919 				    "event id 0X%llx\n", client_count,
920 				    eid.eid_seq);
921 
922 				(void) sema_wait(&ev_comp->client_sema);
923 				--client_count;
924 			}
925 
926 			syseventd_print(3, "Cleaning up clients for event "
927 			    "0X%llx\n", eid.eid_seq);
928 			dispatchq = ev_comp->dispatch_list;
929 			while (dispatchq != NULL) {
930 				d_pkg = dispatchq->d_pkg;
931 				scp = d_pkg->scp;
932 
933 				if (d_pkg->completion_status == EAGAIN)
934 					ok_to_free = 0;
935 
936 				syseventd_print(4, "Delivery of 0X%llx "
937 				    "complete for client %d retry count %d "
938 				    "status %d\n", eid.eid_seq,
939 				    scp->client_num,
940 				    d_pkg->retry_count,
941 				    d_pkg->completion_status);
942 
943 				free(d_pkg);
944 				ev_comp->dispatch_list = dispatchq->next;
945 				free(dispatchq);
946 				dispatchq = ev_comp->dispatch_list;
947 			}
948 
949 			if (ok_to_free) {
950 				for (i = 0; i < MAX_MODCTL_RETRY; ++i) {
951 					if ((ret = modctl(MODEVENTS,
952 					    (uintptr_t)MODEVENTS_FREEDATA,
953 					    (uintptr_t)&eid, NULL,
954 					    NULL, 0)) != 0) {
955 						syseventd_print(1, "attempting "
956 						    "to free event 0X%llx\n",
957 						    eid.eid_seq);
958 
959 						/*
960 						 * Kernel may need time to
961 						 * move this event buffer to
962 						 * the sysevent sent queue
963 						 */
964 						(void) sleep(1);
965 					} else {
966 						break;
967 					}
968 				}
969 				if (ret) {
970 					syseventd_print(1, "Unable to free "
971 					    "event 0X%llx from the "
972 					    "kernel\n", eid.eid_seq);
973 				}
974 			} else {
975 				syseventd_print(1, "Not freeing event 0X%llx\n",
976 				    eid.eid_seq);
977 			}
978 
979 			syseventd_print(2, "Event delivery complete for id "
980 			    "0X%llx\n", eid.eid_seq);
981 
982 			(void) mutex_lock(&ev_comp_lock);
983 			event_compq = ev_comp->next;
984 			free(ev_comp->ev);
985 			free(ev_comp);
986 			ev_comp = event_compq;
987 			(void) sema_post(&sema_resource);
988 		}
989 
990 		/*
991 		 * Event completion queue is empty, signal possible unload
992 		 * operation
993 		 */
994 		(void) cond_signal(&event_comp_cv);
995 
996 		syseventd_print(3, "No more events\n");
997 	}
998 }
999 
1000 /*
1001  * dispatch - Dispatch the current event buffer to all valid SLM clients.
1002  */
1003 static int
1004 dispatch(void)
1005 {
1006 	int ev_sz, i, client_count = 0;
1007 	sysevent_t *new_ev;
1008 	sysevent_id_t eid;
1009 	struct ev_completion *ev_comp, *tmp;
1010 	struct event_dispatchq *dispatchq, *client_list;
1011 	struct event_dispatch_pkg *d_pkg;
1012 
1013 	/* Check for module unload operation */
1014 	if (rw_tryrdlock(&mod_unload_lock) != 0) {
1015 		syseventd_print(2, "unload in progress abort delivery\n");
1016 		(void) sema_post(&sema_eventbuf);
1017 		(void) sema_post(&sema_resource);
1018 		return (0);
1019 	}
1020 
1021 	syseventd_print(3, "deliver dispatch buffer %d", dispatch_buf);
1022 	eid.eid_seq = sysevent_get_seq(eventbuf[dispatch_buf]);
1023 	sysevent_get_time(eventbuf[dispatch_buf], &eid.eid_ts);
1024 	syseventd_print(3, "deliver msg id: 0x%llx\n", eid.eid_seq);
1025 
1026 	/*
1027 	 * ev_comp is used to hold event completion data.  It is freed
1028 	 * by the event completion thread (event_completion_thr).
1029 	 */
1030 	ev_comp = (struct ev_completion *)
1031 	    malloc(sizeof (struct ev_completion));
1032 	if (ev_comp == NULL) {
1033 		(void) rw_unlock(&mod_unload_lock);
1034 		syseventd_print(1, "Can not allocate event completion buffer "
1035 		    "for event id 0X%llx\n", eid.eid_seq);
1036 		return (EAGAIN);
1037 	}
1038 	ev_comp->dispatch_list = NULL;
1039 	ev_comp->next = NULL;
1040 	(void) sema_init(&ev_comp->client_sema, 0, USYNC_THREAD, NULL);
1041 
1042 	ev_sz = sysevent_get_size(eventbuf[dispatch_buf]);
1043 	new_ev = calloc(1, ev_sz);
1044 	if (new_ev == NULL) {
1045 		free(ev_comp);
1046 		(void) rw_unlock(&mod_unload_lock);
1047 		syseventd_print(1, "Can not allocate new event buffer "
1048 		"for event id 0X%llx\n", eid.eid_seq);
1049 		return (EAGAIN);
1050 	}
1051 
1052 
1053 	/*
1054 	 * For long messages, copy additional data from kernel
1055 	 */
1056 	if (ev_sz > LOGEVENT_BUFSIZE) {
1057 		int ret = 0;
1058 
1059 		/* Ok to release eventbuf for next event buffer from kernel */
1060 		(void) sema_post(&sema_eventbuf);
1061 
1062 		for (i = 0; i < MAX_MODCTL_RETRY; ++i) {
1063 			if ((ret = modctl(MODEVENTS,
1064 			    (uintptr_t)MODEVENTS_GETDATA,
1065 			    (uintptr_t)&eid,
1066 			    (uintptr_t)ev_sz,
1067 			    (uintptr_t)new_ev, 0))
1068 			    == 0)
1069 				break;
1070 			else
1071 				(void) sleep(1);
1072 		}
1073 		if (ret) {
1074 			syseventd_print(1, "GET_DATA failed for 0X%llx:%llx\n",
1075 			    eid.eid_ts, eid.eid_seq);
1076 			free(new_ev);
1077 			free(ev_comp);
1078 			(void) rw_unlock(&mod_unload_lock);
1079 			return (EAGAIN);
1080 		}
1081 	} else {
1082 		(void) bcopy(eventbuf[dispatch_buf], new_ev, ev_sz);
1083 		/* Ok to release eventbuf for next event buffer from kernel */
1084 		(void) sema_post(&sema_eventbuf);
1085 	}
1086 
1087 
1088 	/*
1089 	 * Deliver a copy of eventbuf to clients so
1090 	 * eventbuf can be used for the next message
1091 	 */
1092 	for (i = 0; i < MAX_SLM; ++i) {
1093 
1094 		/* Don't bother for suspended or unloaded clients */
1095 		if (!SE_CLIENT_IS_LOADED(sysevent_client_tbl[i]) ||
1096 		    SE_CLIENT_IS_SUSPENDED(sysevent_client_tbl[i]))
1097 			continue;
1098 
1099 		/*
1100 		 * Allocate event dispatch queue entry.  All queue entries
1101 		 * are freed by the event completion thread as client
1102 		 * delivery completes.
1103 		 */
1104 		dispatchq = (struct event_dispatchq *)malloc(
1105 		    sizeof (struct event_dispatchq));
1106 		if (dispatchq == NULL) {
1107 			syseventd_print(1, "Can not allocate dispatch q "
1108 			"for event id 0X%llx client %d\n", eid.eid_seq, i);
1109 			continue;
1110 		}
1111 		dispatchq->next = NULL;
1112 
1113 		/* Initiate client delivery */
1114 		d_pkg = client_deliver_event(sysevent_client_tbl[i],
1115 		    new_ev, &ev_comp->client_sema);
1116 		if (d_pkg == NULL) {
1117 			syseventd_print(1, "Can not allocate dispatch "
1118 			    "package for event id 0X%llx client %d\n",
1119 			    eid.eid_seq, i);
1120 			free(dispatchq);
1121 			continue;
1122 		}
1123 		dispatchq->d_pkg = d_pkg;
1124 		++client_count;
1125 
1126 		if (ev_comp->dispatch_list == NULL) {
1127 			ev_comp->dispatch_list = dispatchq;
1128 			client_list = dispatchq;
1129 		} else {
1130 			client_list->next = dispatchq;
1131 			client_list = client_list->next;
1132 		}
1133 	}
1134 
1135 	ev_comp->client_count = client_count;
1136 	ev_comp->ev = new_ev;
1137 
1138 	(void) mutex_lock(&ev_comp_lock);
1139 
1140 	if (event_compq == NULL) {
1141 		syseventd_print(3, "Wakeup event completion thread for "
1142 		    "id 0X%llx\n", eid.eid_seq);
1143 		event_compq = ev_comp;
1144 		(void) cond_signal(&event_comp_cv);
1145 	} else {
1146 
1147 		/* Add entry to the end of the event completion queue */
1148 		tmp = event_compq;
1149 		while (tmp->next != NULL)
1150 			tmp = tmp->next;
1151 		tmp->next = ev_comp;
1152 		syseventd_print(3, "event added to completion queue for "
1153 		    "id 0X%llx\n", eid.eid_seq);
1154 	}
1155 	(void) mutex_unlock(&ev_comp_lock);
1156 	(void) rw_unlock(&mod_unload_lock);
1157 
1158 	return (0);
1159 }
1160 
1161 #define	MODULE_DIR_HW	"/usr/platform/%s/lib/sysevent/modules/"
1162 #define	MODULE_DIR_GEN	"/usr/lib/sysevent/modules/"
1163 #define	MOD_DIR_NUM	3
1164 static char dirname[MOD_DIR_NUM][MAXPATHLEN];
1165 
1166 static char *
1167 dir_num2name(int dirnum)
1168 {
1169 	char infobuf[MAXPATHLEN];
1170 
1171 	if (dirnum >= MOD_DIR_NUM)
1172 		return (NULL);
1173 
1174 	if (dirname[0][0] == '\0') {
1175 		if (sysinfo(SI_PLATFORM, infobuf, MAXPATHLEN) == -1) {
1176 			syseventd_print(1, "dir_num2name: "
1177 			    "sysinfo error %s\n", strerror(errno));
1178 			return (NULL);
1179 		} else if (snprintf(dirname[0], sizeof (dirname[0]),
1180 		    MODULE_DIR_HW, infobuf) >= sizeof (dirname[0])) {
1181 			syseventd_print(1, "dir_num2name: "
1182 			    "platform name too long: %s\n",
1183 			    infobuf);
1184 			return (NULL);
1185 		}
1186 		if (sysinfo(SI_MACHINE, infobuf, MAXPATHLEN) == -1) {
1187 			syseventd_print(1, "dir_num2name: "
1188 			    "sysinfo error %s\n", strerror(errno));
1189 			return (NULL);
1190 		} else if (snprintf(dirname[1], sizeof (dirname[1]),
1191 		    MODULE_DIR_HW, infobuf) >= sizeof (dirname[1])) {
1192 			syseventd_print(1, "dir_num2name: "
1193 			    "machine name too long: %s\n",
1194 			    infobuf);
1195 			return (NULL);
1196 		}
1197 		(void) strcpy(dirname[2], MODULE_DIR_GEN);
1198 	}
1199 
1200 	return (dirname[dirnum]);
1201 }
1202 
1203 
1204 /*
1205  * load_modules - Load modules found in the common syseventd module directories
1206  *		Modules that do not provide valid interfaces are rejected.
1207  */
1208 static void
1209 load_modules(char *dirname)
1210 {
1211 	int client_id;
1212 	DIR *mod_dir;
1213 	module_t *mod;
1214 	struct dirent *entp;
1215 	struct slm_mod_ops *mod_ops;
1216 	struct sysevent_client *scp;
1217 
1218 	if (dirname == NULL)
1219 		return;
1220 
1221 	/* Return silently if module directory does not exist */
1222 	if ((mod_dir = opendir(dirname)) == NULL) {
1223 		syseventd_print(1, "Unable to open module directory %s: %s\n",
1224 		    dirname, strerror(errno));
1225 		return;
1226 	}
1227 
1228 	syseventd_print(3, "loading modules from %s\n", dirname);
1229 
1230 	/*
1231 	 * Go through directory, looking for files ending with .so
1232 	 */
1233 	while ((entp = readdir(mod_dir)) != NULL) {
1234 		void *dlh, *f;
1235 		char *tmp, modpath[MAXPATHLEN];
1236 
1237 		if (((tmp = strstr(entp->d_name, MODULE_SUFFIX)) == NULL) ||
1238 		    (tmp[strlen(MODULE_SUFFIX)] != '\0')) {
1239 			continue;
1240 		}
1241 
1242 		if (snprintf(modpath, sizeof (modpath), "%s%s",
1243 		    dirname, entp->d_name) >= sizeof (modpath)) {
1244 			syseventd_err_print(INIT_PATH_ERR, modpath);
1245 			continue;
1246 		}
1247 		if ((dlh = dlopen(modpath, RTLD_LAZY)) == NULL) {
1248 			syseventd_err_print(LOAD_MOD_DLOPEN_ERR,
1249 			    modpath, dlerror());
1250 			continue;
1251 		} else if ((f = dlsym(dlh, EVENT_INIT)) == NULL) {
1252 			syseventd_err_print(LOAD_MOD_NO_INIT,
1253 			    modpath, dlerror());
1254 			(void) dlclose(dlh);
1255 			continue;
1256 		}
1257 
1258 		mod = malloc(sizeof (*mod));
1259 		if (mod == NULL) {
1260 			syseventd_err_print(LOAD_MOD_ALLOC_ERR, "mod",
1261 			    strerror(errno));
1262 			(void) dlclose(dlh);
1263 			continue;
1264 		}
1265 
1266 		mod->name = strdup(entp->d_name);
1267 		if (mod->name == NULL) {
1268 			syseventd_err_print(LOAD_MOD_ALLOC_ERR, "mod->name",
1269 			    strerror(errno));
1270 			(void) dlclose(dlh);
1271 			free(mod);
1272 			continue;
1273 		}
1274 
1275 		mod->dlhandle = dlh;
1276 		mod->event_mod_init = (struct slm_mod_ops *(*)())f;
1277 
1278 		/* load in other module functions */
1279 		mod->event_mod_fini = (void (*)())dlsym(dlh, EVENT_FINI);
1280 		if (mod->event_mod_fini == NULL) {
1281 			syseventd_err_print(LOAD_MOD_DLSYM_ERR, mod->name,
1282 			    dlerror());
1283 			free(mod->name);
1284 			free(mod);
1285 			(void) dlclose(dlh);
1286 			continue;
1287 		}
1288 
1289 		/* Call module init routine */
1290 		if ((mod_ops = mod->event_mod_init()) == NULL) {
1291 			syseventd_err_print(LOAD_MOD_EINVAL, mod->name);
1292 			free(mod->name);
1293 			free(mod);
1294 			(void) dlclose(dlh);
1295 			continue;
1296 		}
1297 		if (mod_ops->major_version != SE_MAJOR_VERSION) {
1298 			syseventd_err_print(LOAD_MOD_VERSION_MISMATCH,
1299 			    mod->name, SE_MAJOR_VERSION,
1300 			    mod_ops->major_version);
1301 			mod->event_mod_fini();
1302 			free(mod->name);
1303 			free(mod);
1304 			(void) dlclose(dlh);
1305 			continue;
1306 		}
1307 
1308 		mod->deliver_event = mod_ops->deliver_event;
1309 		/* Add module entry to client list */
1310 		if ((client_id = insert_client((void *)mod, SLM_CLIENT,
1311 		    (mod_ops->retry_limit <= SE_MAX_RETRY_LIMIT ?
1312 		    mod_ops->retry_limit : SE_MAX_RETRY_LIMIT))) < 0) {
1313 			syseventd_err_print(LOAD_MOD_ALLOC_ERR, "insert_client",
1314 			    strerror(errno));
1315 			mod->event_mod_fini();
1316 			free(mod->name);
1317 			free(mod);
1318 			(void) dlclose(dlh);
1319 			continue;
1320 		}
1321 
1322 		scp = sysevent_client_tbl[client_id];
1323 		++concurrency_level;
1324 		(void) thr_setconcurrency(concurrency_level);
1325 		if (thr_create(NULL, 0,
1326 		    (void *(*)(void *))client_deliver_event_thr,
1327 		    (void *)scp, THR_BOUND, &scp->tid) != 0) {
1328 
1329 			syseventd_err_print(LOAD_MOD_ALLOC_ERR, "insert_client",
1330 			    strerror(errno));
1331 			mod->event_mod_fini();
1332 			free(mod->name);
1333 			free(mod);
1334 			(void) dlclose(dlh);
1335 			continue;
1336 		}
1337 		scp->client_flags |= SE_CLIENT_THR_RUNNING;
1338 
1339 		syseventd_print(3, "loaded module %s\n", entp->d_name);
1340 	}
1341 
1342 	(void) closedir(mod_dir);
1343 	syseventd_print(3, "modules loaded\n");
1344 }
1345 
1346 /*
1347  * unload_modules - modules are unloaded prior to graceful shutdown or
1348  *			before restarting the daemon upon receipt of
1349  *			SIGHUP.
1350  */
1351 static void
1352 unload_modules(int sig)
1353 {
1354 	int			i, count, done;
1355 	module_t		*mod;
1356 	struct sysevent_client	*scp;
1357 
1358 	/*
1359 	 * unload modules that are ready, skip those that have not
1360 	 * drained their event queues.
1361 	 */
1362 	count = done = 0;
1363 	while (done < MAX_SLM) {
1364 		/* Don't wait indefinitely for unresponsive clients */
1365 		if (sig != SIGHUP && count > SE_TIMEOUT) {
1366 			break;
1367 		}
1368 
1369 		done = 0;
1370 
1371 		/* Shutdown clients */
1372 		for (i = 0; i < MAX_SLM; ++i) {
1373 			scp = sysevent_client_tbl[i];
1374 			if (mutex_trylock(&scp->client_lock) == 0) {
1375 				if (scp->client_type != SLM_CLIENT ||
1376 				    scp->client_data == NULL) {
1377 					(void) mutex_unlock(&scp->client_lock);
1378 					done++;
1379 					continue;
1380 				}
1381 			} else {
1382 				syseventd_print(3, "Skipping unload of "
1383 				    "client %d: client locked\n",
1384 				    scp->client_num);
1385 				continue;
1386 			}
1387 
1388 			/*
1389 			 * Drain the eventq and wait for delivery thread to
1390 			 * cleanly exit
1391 			 */
1392 			drain_eventq(scp, EAGAIN);
1393 			(void) cond_signal(&scp->client_cv);
1394 			(void) mutex_unlock(&scp->client_lock);
1395 			(void) thr_join(scp->tid, NULL, NULL);
1396 
1397 			/*
1398 			 * It is now safe to unload the module
1399 			 */
1400 			mod = (module_t *)scp->client_data;
1401 			syseventd_print(2, "Unload %s\n", mod->name);
1402 			mod->event_mod_fini();
1403 			(void) dlclose(mod->dlhandle);
1404 			free(mod->name);
1405 			(void) mutex_lock(&client_tbl_lock);
1406 			delete_client(i);
1407 			(void) mutex_unlock(&client_tbl_lock);
1408 			++done;
1409 
1410 		}
1411 		++count;
1412 		(void) sleep(1);
1413 	}
1414 
1415 	/*
1416 	 * Wait for event completions
1417 	 */
1418 	syseventd_print(2, "waiting for event completions\n");
1419 	(void) mutex_lock(&ev_comp_lock);
1420 	while (event_compq != NULL) {
1421 		(void) cond_wait(&event_comp_cv, &ev_comp_lock);
1422 	}
1423 	(void) mutex_unlock(&ev_comp_lock);
1424 }
1425 
1426 /*
1427  * syseventd_init - Called at daemon (re)start-up time to load modules
1428  *			and kickstart the kernel delivery engine.
1429  */
1430 static void
1431 syseventd_init()
1432 {
1433 	int i, fd;
1434 	char local_door_file[PATH_MAX + 1];
1435 
1436 	fini_pending = 0;
1437 
1438 	concurrency_level = MIN_CONCURRENCY_LEVEL;
1439 	(void) thr_setconcurrency(concurrency_level);
1440 
1441 	/*
1442 	 * Load client modules for event delivering
1443 	 */
1444 	for (i = 0; i < MOD_DIR_NUM; ++i) {
1445 		load_modules(dir_num2name(i));
1446 	}
1447 
1448 	/*
1449 	 * Create kernel delivery door service
1450 	 */
1451 	syseventd_print(8, "Create a door for kernel upcalls\n");
1452 	if (snprintf(local_door_file, sizeof (local_door_file), "%s%s",
1453 	    root_dir, LOGEVENT_DOOR_UPCALL) >= sizeof (local_door_file)) {
1454 		syseventd_err_print(INIT_PATH_ERR, local_door_file);
1455 		syseventd_exit(5);
1456 	}
1457 
1458 	/*
1459 	 * Remove door file for robustness.
1460 	 */
1461 	if (unlink(local_door_file) != 0)
1462 		syseventd_print(8, "Unlink of %s failed.\n", local_door_file);
1463 
1464 	fd = open(local_door_file, O_CREAT|O_RDWR, S_IREAD|S_IWRITE);
1465 	if ((fd == -1) && (errno != EEXIST)) {
1466 		syseventd_err_print(INIT_OPEN_DOOR_ERR, strerror(errno));
1467 		syseventd_exit(5);
1468 	}
1469 	(void) close(fd);
1470 
1471 	upcall_door = door_create(door_upcall, NULL,
1472 	    DOOR_REFUSE_DESC | DOOR_NO_CANCEL);
1473 	if (upcall_door == -1) {
1474 		syseventd_err_print(INIT_CREATE_DOOR_ERR, strerror(errno));
1475 		syseventd_exit(5);
1476 	}
1477 
1478 	(void) fdetach(local_door_file);
1479 retry:
1480 	if (fattach(upcall_door, local_door_file) != 0) {
1481 		if (errno == EBUSY)
1482 			goto retry;
1483 		syseventd_err_print(INIT_FATTACH_ERR, strerror(errno));
1484 		(void) door_revoke(upcall_door);
1485 		syseventd_exit(5);
1486 	}
1487 
1488 	/*
1489 	 * Tell kernel the door name and start delivery
1490 	 */
1491 	syseventd_print(2,
1492 	    "local_door_file = %s\n", local_door_file);
1493 	if (modctl(MODEVENTS,
1494 	    (uintptr_t)MODEVENTS_SET_DOOR_UPCALL_FILENAME,
1495 	    (uintptr_t)local_door_file, NULL, NULL, 0) < 0) {
1496 		syseventd_err_print(INIT_DOOR_NAME_ERR, strerror(errno));
1497 		syseventd_exit(6);
1498 	}
1499 
1500 	door_upcall_retval = 0;
1501 
1502 	if (modctl(MODEVENTS, (uintptr_t)MODEVENTS_FLUSH, NULL, NULL, NULL, 0)
1503 	    < 0) {
1504 		syseventd_err_print(KERNEL_REPLAY_ERR, strerror(errno));
1505 		syseventd_exit(7);
1506 	}
1507 }
1508 
1509 /*
1510  * syseventd_fini - shut down daemon, but do not exit
1511  */
1512 static void
1513 syseventd_fini(int sig)
1514 {
1515 	/*
1516 	 * Indicate that event queues should be drained and no
1517 	 * additional events be accepted
1518 	 */
1519 	fini_pending = 1;
1520 
1521 	/* Close the kernel event door to halt delivery */
1522 	(void) door_revoke(upcall_door);
1523 
1524 	syseventd_print(1, "Unloading modules\n");
1525 	(void) rw_wrlock(&mod_unload_lock);
1526 	unload_modules(sig);
1527 	(void) rw_unlock(&mod_unload_lock);
1528 
1529 }
1530 
1531 /*
1532  * enter_daemon_lock - lock the daemon file lock
1533  *
1534  * Use an advisory lock to ensure that only one daemon process is active
1535  * in the system at any point in time.	If the lock is held by another
1536  * process, do not block but return the pid owner of the lock to the
1537  * caller immediately.	The lock is cleared if the holding daemon process
1538  * exits for any reason even if the lock file remains, so the daemon can
1539  * be restarted if necessary.  The lock file is DAEMON_LOCK_FILE.
1540  */
1541 static pid_t
1542 enter_daemon_lock(void)
1543 {
1544 	struct flock	lock;
1545 
1546 	syseventd_print(8, "enter_daemon_lock: lock file = %s\n",
1547 	    DAEMON_LOCK_FILE);
1548 
1549 	if (snprintf(local_lock_file, sizeof (local_lock_file), "%s%s",
1550 	    root_dir, DAEMON_LOCK_FILE) >= sizeof (local_lock_file)) {
1551 		syseventd_err_print(INIT_PATH_ERR, local_lock_file);
1552 		syseventd_exit(8);
1553 	}
1554 	daemon_lock_fd = open(local_lock_file, O_CREAT|O_RDWR, 0644);
1555 	if (daemon_lock_fd < 0) {
1556 		syseventd_err_print(INIT_LOCK_OPEN_ERR,
1557 		    local_lock_file, strerror(errno));
1558 		syseventd_exit(8);
1559 	}
1560 
1561 	lock.l_type = F_WRLCK;
1562 	lock.l_whence = SEEK_SET;
1563 	lock.l_start = 0;
1564 	lock.l_len = 0;
1565 
1566 	if (fcntl(daemon_lock_fd, F_SETLK, &lock) == -1) {
1567 		if (fcntl(daemon_lock_fd, F_GETLK, &lock) == -1) {
1568 			syseventd_err_print(INIT_LOCK_ERR,
1569 			    local_lock_file, strerror(errno));
1570 			exit(2);
1571 		}
1572 		return (lock.l_pid);
1573 	}
1574 	hold_daemon_lock = 1;
1575 
1576 	return (getpid());
1577 }
1578 
1579 /*
1580  * exit_daemon_lock - release the daemon file lock
1581  */
1582 static void
1583 exit_daemon_lock(void)
1584 {
1585 	struct flock lock;
1586 
1587 	lock.l_type = F_UNLCK;
1588 	lock.l_whence = SEEK_SET;
1589 	lock.l_start = 0;
1590 	lock.l_len = 0;
1591 
1592 	if (fcntl(daemon_lock_fd, F_SETLK, &lock) == -1) {
1593 		syseventd_err_print(INIT_UNLOCK_ERR,
1594 		    local_lock_file, strerror(errno));
1595 	}
1596 
1597 	if (close(daemon_lock_fd) == -1) {
1598 		syseventd_err_print(INIT_LOCK_CLOSE_ERR,
1599 		    local_lock_file, strerror(errno));
1600 		exit(-1);
1601 	}
1602 }
1603 
1604 /*
1605  * syseventd_err_print - print error messages to the terminal if not
1606  *			yet daemonized or to syslog.
1607  */
1608 /*PRINTFLIKE1*/
1609 void
1610 syseventd_err_print(char *message, ...)
1611 {
1612 	va_list ap;
1613 
1614 	(void) mutex_lock(&err_mutex);
1615 	va_start(ap, message);
1616 
1617 	if (logflag) {
1618 		(void) vsyslog(LOG_ERR, message, ap);
1619 	} else {
1620 		(void) fprintf(stderr, "%s: ", prog);
1621 		(void) vfprintf(stderr, message, ap);
1622 	}
1623 	va_end(ap);
1624 	(void) mutex_unlock(&err_mutex);
1625 }
1626 
1627 /*
1628  * syseventd_print -  print messages to the terminal or to syslog
1629  *			the following levels are implemented:
1630  *
1631  * 1 - transient errors that does not affect normal program flow
1632  * 2 - upcall/dispatch interaction
1633  * 3 - program flow trace as each message goes through the daemon
1634  * 8 - all the nit-gritty details of startup and shutdown
1635  * 9 - very verbose event flow tracing (no daemonization of syseventd)
1636  *
1637  */
1638 /*PRINTFLIKE2*/
1639 void
1640 syseventd_print(int level, char *message, ...)
1641 {
1642 	va_list ap;
1643 	static int newline = 1;
1644 
1645 	if (level > debug_level) {
1646 		return;
1647 	}
1648 
1649 	(void) mutex_lock(&err_mutex);
1650 	va_start(ap, message);
1651 	if (logflag) {
1652 		(void) syslog(LOG_DEBUG, "%s[%ld]: ",
1653 		    prog, getpid());
1654 		(void) vsyslog(LOG_DEBUG, message, ap);
1655 	} else {
1656 		if (newline) {
1657 			(void) fprintf(stdout, "%s[%ld]: ",
1658 			    prog, getpid());
1659 			(void) vfprintf(stdout, message, ap);
1660 		} else {
1661 			(void) vfprintf(stdout, message, ap);
1662 		}
1663 	}
1664 	if (message[strlen(message)-1] == '\n') {
1665 		newline = 1;
1666 	} else {
1667 		newline = 0;
1668 	}
1669 	va_end(ap);
1670 	(void) mutex_unlock(&err_mutex);
1671 }
1672