1 /* $NetBSD: dmeventd.c,v 1.1.1.1 2008/12/22 00:18:53 haad Exp $ */
2
3 /*
4 * Copyright (C) 2005-2007 Red Hat, Inc. All rights reserved.
5 *
6 * This file is part of the device-mapper userspace tools.
7 *
8 * This copyrighted material is made available to anyone wishing to use,
9 * modify, copy, or redistribute it subject to the terms and conditions
10 * of the GNU Lesser General Public License v.2.1.
11 *
12 * You should have received a copy of the GNU Lesser General Public License
13 * along with this program; if not, write to the Free Software Foundation,
14 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
15 */
16
17 /*
18 * dmeventd - dm event daemon to monitor active mapped devices
19 */
20
21 #define _GNU_SOURCE
22 #define _FILE_OFFSET_BITS 64
23
24 #include "configure.h"
25 #include "libdevmapper.h"
26 #include "libdevmapper-event.h"
27 #include "dmeventd.h"
28 //#include "libmultilog.h"
29 #include "dm-logging.h"
30
31 #include <dlfcn.h>
32 #include <errno.h>
33 #include <pthread.h>
34 #include <sys/file.h>
35 #include <sys/stat.h>
36 #include <sys/wait.h>
37 #include <sys/time.h>
38 #include <sys/resource.h>
39 #include <unistd.h>
40 #include <signal.h>
41 #include <arpa/inet.h> /* for htonl, ntohl */
42
43 #ifdef linux
44 # include <malloc.h>
45
46 # define OOM_ADJ_FILE "/proc/self/oom_adj"
47
48 /* From linux/oom.h */
49 # define OOM_DISABLE (-17)
50 # define OOM_ADJUST_MIN (-16)
51
52 #endif
53
54 /* FIXME We use syslog for now, because multilog is not yet implemented */
55 #include <syslog.h>
56
57 static volatile sig_atomic_t _exit_now = 0; /* set to '1' when signal is given to exit */
58 static volatile sig_atomic_t _thread_registries_empty = 1; /* registries are empty initially */
59 static int _debug = 0;
60
61 /* List (un)link macros. */
62 #define LINK(x, head) dm_list_add(head, &(x)->list)
63 #define LINK_DSO(dso) LINK(dso, &_dso_registry)
64 #define LINK_THREAD(thread) LINK(thread, &_thread_registry)
65
66 #define UNLINK(x) dm_list_del(&(x)->list)
67 #define UNLINK_DSO(x) UNLINK(x)
68 #define UNLINK_THREAD(x) UNLINK(x)
69
70 #define DAEMON_NAME "dmeventd"
71
72 /*
73 Global mutex for thread list access. Has to be held when:
74 - iterating thread list
75 - adding or removing elements from thread list
76 - changing or reading thread_status's fields:
77 processing, status, events
78 Use _lock_mutex() and _unlock_mutex() to hold/release it
79 */
80 static pthread_mutex_t _global_mutex;
81
82 /*
83 There are three states a thread can attain (see struct
84 thread_status, field int status):
85
86 - DM_THREAD_RUNNING: thread has started up and is either working or
87 waiting for events... transitions to either SHUTDOWN or DONE
88 - DM_THREAD_SHUTDOWN: thread is still doing something, but it is
89 supposed to terminate (and transition to DONE) as soon as it
90 finishes whatever it was doing at the point of flipping state to
91 SHUTDOWN... the thread is still on the thread list
92 - DM_THREAD_DONE: thread has terminated and has been moved over to
93 unused thread list, cleanup pending
94 */
95 #define DM_THREAD_RUNNING 0
96 #define DM_THREAD_SHUTDOWN 1
97 #define DM_THREAD_DONE 2
98
99 #define THREAD_STACK_SIZE (300*1024)
100
101 #define DEBUGLOG(fmt, args...) _debuglog(fmt, ## args)
102
103 /* Data kept about a DSO. */
104 struct dso_data {
105 struct dm_list list;
106
107 char *dso_name; /* DSO name (eg, "evms", "dmraid", "lvm2"). */
108
109 void *dso_handle; /* Opaque handle as returned from dlopen(). */
110 unsigned int ref_count; /* Library reference count. */
111
112 /*
113 * Event processing.
114 *
115 * The DSO can do whatever appropriate steps if an event
116 * happens such as changing the mapping in case a mirror
117 * fails, update the application metadata etc.
118 *
119 * This function gets a dm_task that is a result of
120 * DM_DEVICE_WAITEVENT ioctl (results equivalent to
121 * DM_DEVICE_STATUS). It should not destroy it.
122 * The caller must dispose of the task.
123 */
124 void (*process_event)(struct dm_task *dmt, enum dm_event_mask event, void **user);
125
126 /*
127 * Device registration.
128 *
129 * When an application registers a device for an event, the DSO
130 * can carry out appropriate steps so that a later call to
131 * the process_event() function is sane (eg, read metadata
132 * and activate a mapping).
133 */
134 int (*register_device)(const char *device, const char *uuid, int major,
135 int minor, void **user);
136
137 /*
138 * Device unregistration.
139 *
140 * In case all devices of a mapping (eg, RAID10) are unregistered
141 * for events, the DSO can recognize this and carry out appropriate
142 * steps (eg, deactivate mapping, metadata update).
143 */
144 int (*unregister_device)(const char *device, const char *uuid,
145 int major, int minor, void **user);
146 };
147 static DM_LIST_INIT(_dso_registry);
148
149 /* Structure to keep parsed register variables from client message. */
150 struct message_data {
151 char *id;
152 char *dso_name; /* Name of DSO. */
153 char *device_uuid; /* Mapped device path. */
154 union {
155 char *str; /* Events string as fetched from message. */
156 enum dm_event_mask field; /* Events bitfield. */
157 } events;
158 union {
159 char *str;
160 uint32_t secs;
161 } timeout;
162 struct dm_event_daemon_message *msg; /* Pointer to message buffer. */
163 };
164
165 /*
166 * Housekeeping of thread+device states.
167 *
168 * One thread per mapped device which can block on it until an event
169 * occurs and the event processing function of the DSO gets called.
170 */
171 struct thread_status {
172 struct dm_list list;
173
174 pthread_t thread;
175
176 struct dso_data *dso_data; /* DSO this thread accesses. */
177
178 struct {
179 char *uuid;
180 char *name;
181 int major, minor;
182 } device;
183 uint32_t event_nr; /* event number */
184 int processing; /* Set when event is being processed */
185
186 int status; /* see DM_THREAD_{RUNNING,SHUTDOWN,DONE}
187 constants above */
188 enum dm_event_mask events; /* bitfield for event filter. */
189 enum dm_event_mask current_events; /* bitfield for occured events. */
190 struct dm_task *current_task;
191 time_t next_time;
192 uint32_t timeout;
193 struct dm_list timeout_list;
194 void *dso_private; /* dso per-thread status variable */
195 };
196 static DM_LIST_INIT(_thread_registry);
197 static DM_LIST_INIT(_thread_registry_unused);
198
199 static int _timeout_running;
200 static DM_LIST_INIT(_timeout_registry);
201 static pthread_mutex_t _timeout_mutex = PTHREAD_MUTEX_INITIALIZER;
202 static pthread_cond_t _timeout_cond = PTHREAD_COND_INITIALIZER;
203
_debuglog(const char * fmt,...)204 static void _debuglog(const char *fmt, ...)
205 {
206 time_t P;
207 va_list ap;
208
209 if (!_debug)
210 return;
211
212 va_start(ap,fmt);
213
214 time(&P);
215 fprintf(stderr, "dmeventd[%p]: %.15s ", (void *) pthread_self(), ctime(&P)+4 );
216 vfprintf(stderr, fmt, ap);
217 fprintf(stderr, "\n");
218
219 va_end(ap);
220 }
221
222 /* Allocate/free the status structure for a monitoring thread. */
_alloc_thread_status(struct message_data * data,struct dso_data * dso_data)223 static struct thread_status *_alloc_thread_status(struct message_data *data,
224 struct dso_data *dso_data)
225 {
226 struct thread_status *ret = (typeof(ret)) dm_malloc(sizeof(*ret));
227
228 if (!ret)
229 return NULL;
230
231 memset(ret, 0, sizeof(*ret));
232 if (!(ret->device.uuid = dm_strdup(data->device_uuid))) {
233 dm_free(ret);
234 return NULL;
235 }
236
237 ret->current_task = NULL;
238 ret->device.name = NULL;
239 ret->device.major = ret->device.minor = 0;
240 ret->dso_data = dso_data;
241 ret->events = data->events.field;
242 ret->timeout = data->timeout.secs;
243 dm_list_init(&ret->timeout_list);
244
245 return ret;
246 }
247
_free_thread_status(struct thread_status * thread)248 static void _free_thread_status(struct thread_status *thread)
249 {
250 if (thread->current_task)
251 dm_task_destroy(thread->current_task);
252 dm_free(thread->device.uuid);
253 dm_free(thread->device.name);
254 dm_free(thread);
255 }
256
257 /* Allocate/free DSO data. */
_alloc_dso_data(struct message_data * data)258 static struct dso_data *_alloc_dso_data(struct message_data *data)
259 {
260 struct dso_data *ret = (typeof(ret)) dm_malloc(sizeof(*ret));
261
262 if (!ret)
263 return NULL;
264
265 memset(ret, 0, sizeof(*ret));
266 if (!(ret->dso_name = dm_strdup(data->dso_name))) {
267 dm_free(ret);
268 return NULL;
269 }
270
271 return ret;
272 }
273
274 /* Create a device monitoring thread. */
_pthread_create_smallstack(pthread_t * t,void * (* fun)(void *),void * arg)275 static int _pthread_create_smallstack(pthread_t *t, void *(*fun)(void *), void *arg)
276 {
277 pthread_attr_t attr;
278 pthread_attr_init(&attr);
279 /*
280 * We use a smaller stack since it gets preallocated in its entirety
281 */
282 pthread_attr_setstacksize(&attr, THREAD_STACK_SIZE);
283 return pthread_create(t, &attr, fun, arg);
284 }
285
_free_dso_data(struct dso_data * data)286 static void _free_dso_data(struct dso_data *data)
287 {
288 dm_free(data->dso_name);
289 dm_free(data);
290 }
291
292 /*
293 * Fetch a string off src and duplicate it into *ptr.
294 * Pay attention to zero-length strings.
295 */
296 /* FIXME? move to libdevmapper to share with the client lib (need to
297 make delimiter a parameter then) */
_fetch_string(char ** ptr,char ** src,const int delimiter)298 static int _fetch_string(char **ptr, char **src, const int delimiter)
299 {
300 int ret = 0;
301 char *p;
302 size_t len;
303
304 if ((p = strchr(*src, delimiter)))
305 *p = 0;
306
307 if ((*ptr = dm_strdup(*src))) {
308 if ((len = strlen(*ptr)))
309 *src += len;
310 else {
311 dm_free(*ptr);
312 *ptr = NULL;
313 }
314
315 (*src)++;
316 ret = 1;
317 }
318
319 if (p)
320 *p = delimiter;
321
322 return ret;
323 }
324
325 /* Free message memory. */
_free_message(struct message_data * message_data)326 static void _free_message(struct message_data *message_data)
327 {
328 if (message_data->id)
329 dm_free(message_data->id);
330 if (message_data->dso_name)
331 dm_free(message_data->dso_name);
332
333 if (message_data->device_uuid)
334 dm_free(message_data->device_uuid);
335
336 }
337
338 /* Parse a register message from the client. */
_parse_message(struct message_data * message_data)339 static int _parse_message(struct message_data *message_data)
340 {
341 int ret = 0;
342 char *p = message_data->msg->data;
343 struct dm_event_daemon_message *msg = message_data->msg;
344
345 if (!msg->data)
346 return 0;
347
348 /*
349 * Retrieve application identifier, mapped device
350 * path and events # string from message.
351 */
352 if (_fetch_string(&message_data->id, &p, ' ') &&
353 _fetch_string(&message_data->dso_name, &p, ' ') &&
354 _fetch_string(&message_data->device_uuid, &p, ' ') &&
355 _fetch_string(&message_data->events.str, &p, ' ') &&
356 _fetch_string(&message_data->timeout.str, &p, ' ')) {
357 if (message_data->events.str) {
358 enum dm_event_mask i = atoi(message_data->events.str);
359
360 /*
361 * Free string representaion of events.
362 * Not needed an more.
363 */
364 dm_free(message_data->events.str);
365 message_data->events.field = i;
366 }
367 if (message_data->timeout.str) {
368 uint32_t secs = atoi(message_data->timeout.str);
369 dm_free(message_data->timeout.str);
370 message_data->timeout.secs = secs ? secs :
371 DM_EVENT_DEFAULT_TIMEOUT;
372 }
373
374 ret = 1;
375 }
376
377 dm_free(msg->data);
378 msg->data = NULL;
379 msg->size = 0;
380 return ret;
381 };
382
383 /* Global mutex to lock access to lists et al. See _global_mutex
384 above. */
_lock_mutex(void)385 static int _lock_mutex(void)
386 {
387 return pthread_mutex_lock(&_global_mutex);
388 }
389
_unlock_mutex(void)390 static int _unlock_mutex(void)
391 {
392 return pthread_mutex_unlock(&_global_mutex);
393 }
394
395 /* Store pid in pidfile. */
_storepid(int lf)396 static int _storepid(int lf)
397 {
398 int len;
399 char pid[8];
400
401 if ((len = snprintf(pid, sizeof(pid), "%u\n", getpid())) < 0)
402 return 0;
403
404 if (len > (int) sizeof(pid))
405 len = (int) sizeof(pid);
406
407 if (write(lf, pid, (size_t) len) != len)
408 return 0;
409
410 fsync(lf);
411
412 return 1;
413 }
414
415 /* Check, if a device exists. */
_fill_device_data(struct thread_status * ts)416 static int _fill_device_data(struct thread_status *ts)
417 {
418 struct dm_task *dmt;
419 struct dm_info dmi;
420
421 if (!ts->device.uuid)
422 return 0;
423
424 ts->device.name = NULL;
425 ts->device.major = ts->device.minor = 0;
426
427 dmt = dm_task_create(DM_DEVICE_INFO);
428 if (!dmt)
429 return 0;
430
431 dm_task_set_uuid(dmt, ts->device.uuid);
432 if (!dm_task_run(dmt))
433 goto fail;
434
435 ts->device.name = dm_strdup(dm_task_get_name(dmt));
436 if (!ts->device.name)
437 goto fail;
438
439 if (!dm_task_get_info(dmt, &dmi))
440 goto fail;
441
442 ts->device.major = dmi.major;
443 ts->device.minor = dmi.minor;
444
445 dm_task_destroy(dmt);
446 return 1;
447
448 fail:
449 dm_task_destroy(dmt);
450 dm_free(ts->device.name);
451 return 0;
452 }
453
454 /*
455 * Find an existing thread for a device.
456 *
457 * Mutex must be held when calling this.
458 */
_lookup_thread_status(struct message_data * data)459 static struct thread_status *_lookup_thread_status(struct message_data *data)
460 {
461 struct thread_status *thread;
462
463 dm_list_iterate_items(thread, &_thread_registry)
464 if (!strcmp(data->device_uuid, thread->device.uuid))
465 return thread;
466
467 return NULL;
468 }
469
470 /* Cleanup at exit. */
_exit_dm_lib(void)471 static void _exit_dm_lib(void)
472 {
473 dm_lib_release();
474 dm_lib_exit();
475 }
476
_exit_timeout(void * unused __attribute ((unused)))477 static void _exit_timeout(void *unused __attribute((unused)))
478 {
479 _timeout_running = 0;
480 pthread_mutex_unlock(&_timeout_mutex);
481 }
482
483 /* Wake up monitor threads every so often. */
_timeout_thread(void * unused __attribute ((unused)))484 static void *_timeout_thread(void *unused __attribute((unused)))
485 {
486 struct timespec timeout;
487 time_t curr_time;
488
489 timeout.tv_nsec = 0;
490 pthread_cleanup_push(_exit_timeout, NULL);
491 pthread_mutex_lock(&_timeout_mutex);
492
493 while (!dm_list_empty(&_timeout_registry)) {
494 struct thread_status *thread;
495
496 timeout.tv_sec = 0;
497 curr_time = time(NULL);
498
499 dm_list_iterate_items_gen(thread, &_timeout_registry, timeout_list) {
500 if (thread->next_time <= curr_time) {
501 thread->next_time = curr_time + thread->timeout;
502 pthread_kill(thread->thread, SIGALRM);
503 }
504
505 if (thread->next_time < timeout.tv_sec || !timeout.tv_sec)
506 timeout.tv_sec = thread->next_time;
507 }
508
509 pthread_cond_timedwait(&_timeout_cond, &_timeout_mutex,
510 &timeout);
511 }
512
513 pthread_cleanup_pop(1);
514
515 return NULL;
516 }
517
_register_for_timeout(struct thread_status * thread)518 static int _register_for_timeout(struct thread_status *thread)
519 {
520 int ret = 0;
521
522 pthread_mutex_lock(&_timeout_mutex);
523
524 thread->next_time = time(NULL) + thread->timeout;
525
526 if (dm_list_empty(&thread->timeout_list)) {
527 dm_list_add(&_timeout_registry, &thread->timeout_list);
528 if (_timeout_running)
529 pthread_cond_signal(&_timeout_cond);
530 }
531
532 if (!_timeout_running) {
533 pthread_t timeout_id;
534
535 if (!(ret = -_pthread_create_smallstack(&timeout_id, _timeout_thread, NULL)))
536 _timeout_running = 1;
537 }
538
539 pthread_mutex_unlock(&_timeout_mutex);
540
541 return ret;
542 }
543
_unregister_for_timeout(struct thread_status * thread)544 static void _unregister_for_timeout(struct thread_status *thread)
545 {
546 pthread_mutex_lock(&_timeout_mutex);
547 if (!dm_list_empty(&thread->timeout_list)) {
548 dm_list_del(&thread->timeout_list);
549 dm_list_init(&thread->timeout_list);
550 }
551 pthread_mutex_unlock(&_timeout_mutex);
552 }
553
_no_intr_log(int level,const char * file,int line,const char * f,...)554 static void _no_intr_log(int level, const char *file, int line,
555 const char *f, ...)
556 {
557 va_list ap;
558
559 if (errno == EINTR)
560 return;
561 if (level > _LOG_WARN)
562 return;
563
564 va_start(ap, f);
565
566 if (level < _LOG_WARN)
567 vfprintf(stderr, f, ap);
568 else
569 vprintf(f, ap);
570
571 va_end(ap);
572
573 if (level < _LOG_WARN)
574 fprintf(stderr, "\n");
575 else
576 fprintf(stdout, "\n");
577 }
578
_unblock_sigalrm(void)579 static sigset_t _unblock_sigalrm(void)
580 {
581 sigset_t set, old;
582
583 sigemptyset(&set);
584 sigaddset(&set, SIGALRM);
585 pthread_sigmask(SIG_UNBLOCK, &set, &old);
586 return old;
587 }
588
589 #define DM_WAIT_RETRY 0
590 #define DM_WAIT_INTR 1
591 #define DM_WAIT_FATAL 2
592
593 /* Wait on a device until an event occurs. */
_event_wait(struct thread_status * thread,struct dm_task ** task)594 static int _event_wait(struct thread_status *thread, struct dm_task **task)
595 {
596 sigset_t set;
597 int ret = DM_WAIT_RETRY;
598 struct dm_task *dmt;
599 struct dm_info info;
600
601 *task = 0;
602
603 if (!(dmt = dm_task_create(DM_DEVICE_WAITEVENT)))
604 return DM_WAIT_RETRY;
605
606 thread->current_task = dmt;
607
608 if (!dm_task_set_uuid(dmt, thread->device.uuid) ||
609 !dm_task_set_event_nr(dmt, thread->event_nr))
610 goto out;
611
612 /*
613 * This is so that you can break out of waiting on an event,
614 * either for a timeout event, or to cancel the thread.
615 */
616 set = _unblock_sigalrm();
617 dm_log_init(_no_intr_log);
618 errno = 0;
619 if (dm_task_run(dmt)) {
620 thread->current_events |= DM_EVENT_DEVICE_ERROR;
621 ret = DM_WAIT_INTR;
622
623 if ((ret = dm_task_get_info(dmt, &info)))
624 thread->event_nr = info.event_nr;
625 } else if (thread->events & DM_EVENT_TIMEOUT && errno == EINTR) {
626 thread->current_events |= DM_EVENT_TIMEOUT;
627 ret = DM_WAIT_INTR;
628 } else if (thread->status == DM_THREAD_SHUTDOWN && errno == EINTR) {
629 ret = DM_WAIT_FATAL;
630 } else {
631 syslog(LOG_NOTICE, "dm_task_run failed, errno = %d, %s",
632 errno, strerror(errno));
633 if (errno == ENXIO) {
634 syslog(LOG_ERR, "%s disappeared, detaching",
635 thread->device.name);
636 ret = DM_WAIT_FATAL;
637 }
638 }
639
640 pthread_sigmask(SIG_SETMASK, &set, NULL);
641 dm_log_init(NULL);
642
643 out:
644 if (ret == DM_WAIT_FATAL || ret == DM_WAIT_RETRY) {
645 dm_task_destroy(dmt);
646 thread->current_task = NULL;
647 } else
648 *task = dmt;
649
650 return ret;
651 }
652
653 /* Register a device with the DSO. */
_do_register_device(struct thread_status * thread)654 static int _do_register_device(struct thread_status *thread)
655 {
656 return thread->dso_data->register_device(thread->device.name,
657 thread->device.uuid,
658 thread->device.major,
659 thread->device.minor,
660 &(thread->dso_private));
661 }
662
663 /* Unregister a device with the DSO. */
_do_unregister_device(struct thread_status * thread)664 static int _do_unregister_device(struct thread_status *thread)
665 {
666 return thread->dso_data->unregister_device(thread->device.name,
667 thread->device.uuid,
668 thread->device.major,
669 thread->device.minor,
670 &(thread->dso_private));
671 }
672
673 /* Process an event in the DSO. */
_do_process_event(struct thread_status * thread,struct dm_task * task)674 static void _do_process_event(struct thread_status *thread, struct dm_task *task)
675 {
676 thread->dso_data->process_event(task, thread->current_events, &(thread->dso_private));
677 }
678
679 /* Thread cleanup handler to unregister device. */
_monitor_unregister(void * arg)680 static void _monitor_unregister(void *arg)
681 {
682 struct thread_status *thread = arg, *thread_iter;
683
684 if (!_do_unregister_device(thread))
685 syslog(LOG_ERR, "%s: %s unregister failed\n", __func__,
686 thread->device.name);
687 if (thread->current_task)
688 dm_task_destroy(thread->current_task);
689 thread->current_task = NULL;
690
691 _lock_mutex();
692 if (thread->events & DM_EVENT_TIMEOUT) {
693 /* _unregister_for_timeout locks another mutex, we
694 don't want to deadlock so we release our mutex for
695 a bit */
696 _unlock_mutex();
697 _unregister_for_timeout(thread);
698 _lock_mutex();
699 }
700 /* we may have been relinked to unused registry since we were
701 called, so check that */
702 dm_list_iterate_items(thread_iter, &_thread_registry_unused)
703 if (thread_iter == thread) {
704 thread->status = DM_THREAD_DONE;
705 _unlock_mutex();
706 return;
707 }
708 thread->status = DM_THREAD_DONE;
709 UNLINK_THREAD(thread);
710 LINK(thread, &_thread_registry_unused);
711 _unlock_mutex();
712 }
713
_get_device_status(struct thread_status * ts)714 static struct dm_task *_get_device_status(struct thread_status *ts)
715 {
716 struct dm_task *dmt = dm_task_create(DM_DEVICE_STATUS);
717
718 if (!dmt)
719 return NULL;
720
721 dm_task_set_uuid(dmt, ts->device.uuid);
722
723 if (!dm_task_run(dmt)) {
724 dm_task_destroy(dmt);
725 return NULL;
726 }
727
728 return dmt;
729 }
730
731 /* Device monitoring thread. */
_monitor_thread(void * arg)732 static void *_monitor_thread(void *arg)
733 {
734 struct thread_status *thread = arg;
735 int wait_error = 0;
736 struct dm_task *task;
737
738 pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
739 pthread_cleanup_push(_monitor_unregister, thread);
740
741 /* Wait for do_process_request() to finish its task. */
742 _lock_mutex();
743 thread->status = DM_THREAD_RUNNING;
744 _unlock_mutex();
745
746 /* Loop forever awaiting/analyzing device events. */
747 while (1) {
748 thread->current_events = 0;
749
750 wait_error = _event_wait(thread, &task);
751 if (wait_error == DM_WAIT_RETRY)
752 continue;
753
754 if (wait_error == DM_WAIT_FATAL)
755 break;
756
757 /* Timeout occurred, task is not filled properly.
758 * We get device status here for processing it in DSO.
759 */
760 if (wait_error == DM_WAIT_INTR &&
761 thread->current_events & DM_EVENT_TIMEOUT) {
762 dm_task_destroy(task);
763 task = _get_device_status(thread);
764 /* FIXME: syslog fail here ? */
765 if (!(thread->current_task = task))
766 continue;
767 }
768
769 /*
770 * We know that wait succeeded and stored a
771 * pointer to dm_task with device status into task.
772 */
773
774 /*
775 * Check against filter.
776 *
777 * If there's current events delivered from _event_wait() AND
778 * the device got registered for those events AND
779 * those events haven't been processed yet, call
780 * the DSO's process_event() handler.
781 */
782 _lock_mutex();
783 if (thread->status == DM_THREAD_SHUTDOWN) {
784 _unlock_mutex();
785 break;
786 }
787 _unlock_mutex();
788
789 if (thread->events & thread->current_events) {
790 _lock_mutex();
791 thread->processing = 1;
792 _unlock_mutex();
793
794 _do_process_event(thread, task);
795 dm_task_destroy(task);
796 thread->current_task = NULL;
797
798 _lock_mutex();
799 thread->processing = 0;
800 _unlock_mutex();
801 } else {
802 dm_task_destroy(task);
803 thread->current_task = NULL;
804 }
805 }
806
807 pthread_cleanup_pop(1);
808
809 return NULL;
810 }
811
812 /* Create a device monitoring thread. */
_create_thread(struct thread_status * thread)813 static int _create_thread(struct thread_status *thread)
814 {
815 return _pthread_create_smallstack(&thread->thread, _monitor_thread, thread);
816 }
817
_terminate_thread(struct thread_status * thread)818 static int _terminate_thread(struct thread_status *thread)
819 {
820 return pthread_kill(thread->thread, SIGALRM);
821 }
822
823 /* DSO reference counting. Call with _global_mutex locked! */
_lib_get(struct dso_data * data)824 static void _lib_get(struct dso_data *data)
825 {
826 data->ref_count++;
827 }
828
_lib_put(struct dso_data * data)829 static void _lib_put(struct dso_data *data)
830 {
831 if (!--data->ref_count) {
832 dlclose(data->dso_handle);
833 UNLINK_DSO(data);
834 _free_dso_data(data);
835 }
836 }
837
838 /* Find DSO data. */
_lookup_dso(struct message_data * data)839 static struct dso_data *_lookup_dso(struct message_data *data)
840 {
841 struct dso_data *dso_data, *ret = NULL;
842
843 dm_list_iterate_items(dso_data, &_dso_registry)
844 if (!strcmp(data->dso_name, dso_data->dso_name)) {
845 _lib_get(dso_data);
846 ret = dso_data;
847 break;
848 }
849
850 return ret;
851 }
852
853 /* Lookup DSO symbols we need. */
_lookup_symbol(void * dl,void ** symbol,const char * name)854 static int _lookup_symbol(void *dl, void **symbol, const char *name)
855 {
856 if ((*symbol = dlsym(dl, name)))
857 return 1;
858
859 return 0;
860 }
861
lookup_symbols(void * dl,struct dso_data * data)862 static int lookup_symbols(void *dl, struct dso_data *data)
863 {
864 return _lookup_symbol(dl, (void *) &data->process_event,
865 "process_event") &&
866 _lookup_symbol(dl, (void *) &data->register_device,
867 "register_device") &&
868 _lookup_symbol(dl, (void *) &data->unregister_device,
869 "unregister_device");
870 }
871
872 /* Load an application specific DSO. */
_load_dso(struct message_data * data)873 static struct dso_data *_load_dso(struct message_data *data)
874 {
875 void *dl;
876 struct dso_data *ret = NULL;
877
878 if (!(dl = dlopen(data->dso_name, RTLD_NOW))) {
879 const char *dlerr = dlerror();
880 syslog(LOG_ERR, "dmeventd %s dlopen failed: %s", data->dso_name,
881 dlerr);
882 data->msg->size =
883 dm_asprintf(&(data->msg->data), "%s %s dlopen failed: %s",
884 data->id, data->dso_name, dlerr);
885 return NULL;
886 }
887
888 if (!(ret = _alloc_dso_data(data))) {
889 dlclose(dl);
890 return NULL;
891 }
892
893 if (!(lookup_symbols(dl, ret))) {
894 _free_dso_data(ret);
895 dlclose(dl);
896 return NULL;
897 }
898
899 /*
900 * Keep handle to close the library once
901 * we've got no references to it any more.
902 */
903 ret->dso_handle = dl;
904 _lib_get(ret);
905
906 _lock_mutex();
907 LINK_DSO(ret);
908 _unlock_mutex();
909
910 return ret;
911 }
912
913 /* Return success on daemon active check. */
_active(struct message_data * message_data)914 static int _active(struct message_data *message_data)
915 {
916 return 0;
917 }
918
919 /*
920 * Register for an event.
921 *
922 * Only one caller at a time here, because we use
923 * a FIFO and lock it against multiple accesses.
924 */
_register_for_event(struct message_data * message_data)925 static int _register_for_event(struct message_data *message_data)
926 {
927 int ret = 0;
928 struct thread_status *thread, *thread_new = NULL;
929 struct dso_data *dso_data;
930
931 if (!(dso_data = _lookup_dso(message_data)) &&
932 !(dso_data = _load_dso(message_data))) {
933 stack;
934 #ifdef ELIBACC
935 ret = -ELIBACC;
936 #else
937 ret = -ENODEV;
938 #endif
939 goto out;
940 }
941
942 /* Preallocate thread status struct to avoid deadlock. */
943 if (!(thread_new = _alloc_thread_status(message_data, dso_data))) {
944 stack;
945 ret = -ENOMEM;
946 goto out;
947 }
948
949 if (!_fill_device_data(thread_new)) {
950 stack;
951 ret = -ENODEV;
952 goto out;
953 }
954
955 _lock_mutex();
956
957 /* If creation of timeout thread fails (as it may), we fail
958 here completely. The client is responsible for either
959 retrying later or trying to register without timeout
960 events. However, if timeout thread cannot be started, it
961 usually means we are so starved on resources that we are
962 almost as good as dead already... */
963 if (thread_new->events & DM_EVENT_TIMEOUT) {
964 ret = -_register_for_timeout(thread_new);
965 if (ret) {
966 _unlock_mutex();
967 goto out;
968 }
969 }
970
971 if (!(thread = _lookup_thread_status(message_data))) {
972 _unlock_mutex();
973
974 if (!(ret = _do_register_device(thread_new)))
975 goto out;
976
977 thread = thread_new;
978 thread_new = NULL;
979
980 /* Try to create the monitoring thread for this device. */
981 _lock_mutex();
982 if ((ret = -_create_thread(thread))) {
983 _unlock_mutex();
984 _do_unregister_device(thread);
985 _free_thread_status(thread);
986 goto out;
987 } else
988 LINK_THREAD(thread);
989 }
990
991 /* Or event # into events bitfield. */
992 thread->events |= message_data->events.field;
993
994 _unlock_mutex();
995
996 out:
997 /*
998 * Deallocate thread status after releasing
999 * the lock in case we haven't used it.
1000 */
1001 if (thread_new)
1002 _free_thread_status(thread_new);
1003
1004 return ret;
1005 }
1006
1007 /*
1008 * Unregister for an event.
1009 *
1010 * Only one caller at a time here as with register_for_event().
1011 */
_unregister_for_event(struct message_data * message_data)1012 static int _unregister_for_event(struct message_data *message_data)
1013 {
1014 int ret = 0;
1015 struct thread_status *thread;
1016
1017 /*
1018 * Clear event in bitfield and deactivate
1019 * monitoring thread in case bitfield is 0.
1020 */
1021 _lock_mutex();
1022
1023 if (!(thread = _lookup_thread_status(message_data))) {
1024 _unlock_mutex();
1025 ret = -ENODEV;
1026 goto out;
1027 }
1028
1029 if (thread->status == DM_THREAD_DONE) {
1030 /* the thread has terminated while we were not
1031 watching */
1032 _unlock_mutex();
1033 return 0;
1034 }
1035
1036 thread->events &= ~message_data->events.field;
1037
1038 if (!(thread->events & DM_EVENT_TIMEOUT))
1039 _unregister_for_timeout(thread);
1040 /*
1041 * In case there's no events to monitor on this device ->
1042 * unlink and terminate its monitoring thread.
1043 */
1044 if (!thread->events) {
1045 UNLINK_THREAD(thread);
1046 LINK(thread, &_thread_registry_unused);
1047 }
1048 _unlock_mutex();
1049
1050 out:
1051 return ret;
1052 }
1053
1054 /*
1055 * Get registered device.
1056 *
1057 * Only one caller at a time here as with register_for_event().
1058 */
_registered_device(struct message_data * message_data,struct thread_status * thread)1059 static int _registered_device(struct message_data *message_data,
1060 struct thread_status *thread)
1061 {
1062 struct dm_event_daemon_message *msg = message_data->msg;
1063
1064 const char *fmt = "%s %s %s %u";
1065 const char *id = message_data->id;
1066 const char *dso = thread->dso_data->dso_name;
1067 const char *dev = thread->device.uuid;
1068 unsigned events = ((thread->status == DM_THREAD_RUNNING)
1069 && (thread->events)) ? thread->events : thread->
1070 events | DM_EVENT_REGISTRATION_PENDING;
1071
1072 if (msg->data)
1073 dm_free(msg->data);
1074
1075 msg->size = dm_asprintf(&(msg->data), fmt, id, dso, dev, events);
1076
1077 _unlock_mutex();
1078
1079 return 0;
1080 }
1081
_want_registered_device(char * dso_name,char * device_uuid,struct thread_status * thread)1082 static int _want_registered_device(char *dso_name, char *device_uuid,
1083 struct thread_status *thread)
1084 {
1085 /* If DSO names and device paths are equal. */
1086 if (dso_name && device_uuid)
1087 return !strcmp(dso_name, thread->dso_data->dso_name) &&
1088 !strcmp(device_uuid, thread->device.uuid) &&
1089 (thread->status == DM_THREAD_RUNNING ||
1090 (thread->events & DM_EVENT_REGISTRATION_PENDING));
1091
1092 /* If DSO names are equal. */
1093 if (dso_name)
1094 return !strcmp(dso_name, thread->dso_data->dso_name) &&
1095 (thread->status == DM_THREAD_RUNNING ||
1096 (thread->events & DM_EVENT_REGISTRATION_PENDING));
1097
1098 /* If device paths are equal. */
1099 if (device_uuid)
1100 return !strcmp(device_uuid, thread->device.uuid) &&
1101 (thread->status == DM_THREAD_RUNNING ||
1102 (thread->events & DM_EVENT_REGISTRATION_PENDING));
1103
1104 return 1;
1105 }
1106
_get_registered_dev(struct message_data * message_data,int next)1107 static int _get_registered_dev(struct message_data *message_data, int next)
1108 {
1109 struct thread_status *thread, *hit = NULL;
1110
1111 _lock_mutex();
1112
1113 /* Iterate list of threads checking if we want a particular one. */
1114 dm_list_iterate_items(thread, &_thread_registry)
1115 if (_want_registered_device(message_data->dso_name,
1116 message_data->device_uuid,
1117 thread)) {
1118 hit = thread;
1119 break;
1120 }
1121
1122 /*
1123 * If we got a registered device and want the next one ->
1124 * fetch next conforming element off the list.
1125 */
1126 if (hit && !next) {
1127 _unlock_mutex();
1128 return _registered_device(message_data, hit);
1129 }
1130
1131 if (!hit)
1132 goto out;
1133
1134 thread = hit;
1135
1136 while (1) {
1137 if (dm_list_end(&_thread_registry, &thread->list))
1138 goto out;
1139
1140 thread = dm_list_item(thread->list.n, struct thread_status);
1141 if (_want_registered_device(message_data->dso_name, NULL, thread)) {
1142 hit = thread;
1143 break;
1144 }
1145 }
1146
1147 _unlock_mutex();
1148 return _registered_device(message_data, hit);
1149
1150 out:
1151 _unlock_mutex();
1152
1153 return -ENOENT;
1154 }
1155
_get_registered_device(struct message_data * message_data)1156 static int _get_registered_device(struct message_data *message_data)
1157 {
1158 return _get_registered_dev(message_data, 0);
1159 }
1160
_get_next_registered_device(struct message_data * message_data)1161 static int _get_next_registered_device(struct message_data *message_data)
1162 {
1163 return _get_registered_dev(message_data, 1);
1164 }
1165
_set_timeout(struct message_data * message_data)1166 static int _set_timeout(struct message_data *message_data)
1167 {
1168 struct thread_status *thread;
1169
1170 _lock_mutex();
1171 if ((thread = _lookup_thread_status(message_data)))
1172 thread->timeout = message_data->timeout.secs;
1173 _unlock_mutex();
1174
1175 return thread ? 0 : -ENODEV;
1176 }
1177
_get_timeout(struct message_data * message_data)1178 static int _get_timeout(struct message_data *message_data)
1179 {
1180 struct thread_status *thread;
1181 struct dm_event_daemon_message *msg = message_data->msg;
1182
1183 if (msg->data)
1184 dm_free(msg->data);
1185
1186 _lock_mutex();
1187 if ((thread = _lookup_thread_status(message_data))) {
1188 msg->size =
1189 dm_asprintf(&(msg->data), "%s %" PRIu32, message_data->id,
1190 thread->timeout);
1191 } else {
1192 msg->data = NULL;
1193 msg->size = 0;
1194 }
1195 _unlock_mutex();
1196
1197 return thread ? 0 : -ENODEV;
1198 }
1199
1200 /* Initialize a fifos structure with path names. */
_init_fifos(struct dm_event_fifos * fifos)1201 static void _init_fifos(struct dm_event_fifos *fifos)
1202 {
1203 memset(fifos, 0, sizeof(*fifos));
1204
1205 fifos->client_path = DM_EVENT_FIFO_CLIENT;
1206 fifos->server_path = DM_EVENT_FIFO_SERVER;
1207 }
1208
1209 /* Open fifos used for client communication. */
_open_fifos(struct dm_event_fifos * fifos)1210 static int _open_fifos(struct dm_event_fifos *fifos)
1211 {
1212 /* Create fifos */
1213 if (((mkfifo(fifos->client_path, 0600) == -1) && errno != EEXIST) ||
1214 ((mkfifo(fifos->server_path, 0600) == -1) && errno != EEXIST)) {
1215 syslog(LOG_ERR, "%s: Failed to create a fifo.\n", __func__);
1216 stack;
1217 return -errno;
1218 }
1219
1220 struct stat st;
1221
1222 /* Warn about wrong permissions if applicable */
1223 if ((!stat(fifos->client_path, &st)) && (st.st_mode & 0777) != 0600)
1224 syslog(LOG_WARNING, "Fixing wrong permissions on %s",
1225 fifos->client_path);
1226
1227 if ((!stat(fifos->server_path, &st)) && (st.st_mode & 0777) != 0600)
1228 syslog(LOG_WARNING, "Fixing wrong permissions on %s",
1229 fifos->server_path);
1230
1231 /* If they were already there, make sure permissions are ok. */
1232 if (chmod(fifos->client_path, 0600)) {
1233 syslog(LOG_ERR, "Unable to set correct file permissions on %s",
1234 fifos->client_path);
1235 return -errno;
1236 }
1237
1238 if (chmod(fifos->server_path, 0600)) {
1239 syslog(LOG_ERR, "Unable to set correct file permissions on %s",
1240 fifos->server_path);
1241 return -errno;
1242 }
1243
1244 /* Need to open read+write or we will block or fail */
1245 if ((fifos->server = open(fifos->server_path, O_RDWR)) < 0) {
1246 stack;
1247 return -errno;
1248 }
1249
1250 /* Need to open read+write for select() to work. */
1251 if ((fifos->client = open(fifos->client_path, O_RDWR)) < 0) {
1252 stack;
1253 close(fifos->server);
1254 return -errno;
1255 }
1256
1257 return 0;
1258 }
1259
1260 /*
1261 * Read message from client making sure that data is available
1262 * and a complete message is read. Must not block indefinitely.
1263 */
_client_read(struct dm_event_fifos * fifos,struct dm_event_daemon_message * msg)1264 static int _client_read(struct dm_event_fifos *fifos,
1265 struct dm_event_daemon_message *msg)
1266 {
1267 struct timeval t;
1268 unsigned bytes = 0;
1269 int ret = 0;
1270 fd_set fds;
1271 int header = 1;
1272 size_t size = 2 * sizeof(uint32_t); /* status + size */
1273 char *buf = alloca(size);
1274
1275 msg->data = NULL;
1276
1277 errno = 0;
1278 while (bytes < size && errno != EOF) {
1279 /* Watch client read FIFO for input. */
1280 FD_ZERO(&fds);
1281 FD_SET(fifos->client, &fds);
1282 t.tv_sec = 1;
1283 t.tv_usec = 0;
1284 ret = select(fifos->client + 1, &fds, NULL, NULL, &t);
1285
1286 if (!ret && !bytes) /* nothing to read */
1287 return 0;
1288
1289 if (!ret) /* trying to finish read */
1290 continue;
1291
1292 if (ret < 0) /* error */
1293 return 0;
1294
1295 ret = read(fifos->client, buf + bytes, size - bytes);
1296 bytes += ret > 0 ? ret : 0;
1297 if (bytes == 2 * sizeof(uint32_t) && header) {
1298 msg->cmd = ntohl(*((uint32_t *) buf));
1299 msg->size = ntohl(*((uint32_t *) buf + 1));
1300 buf = msg->data = dm_malloc(msg->size);
1301 size = msg->size;
1302 bytes = 0;
1303 header = 0;
1304 }
1305 }
1306
1307 if (bytes != size) {
1308 if (msg->data)
1309 dm_free(msg->data);
1310 msg->data = NULL;
1311 msg->size = 0;
1312 }
1313
1314 return bytes == size;
1315 }
1316
1317 /*
1318 * Write a message to the client making sure that it is ready to write.
1319 */
_client_write(struct dm_event_fifos * fifos,struct dm_event_daemon_message * msg)1320 static int _client_write(struct dm_event_fifos *fifos,
1321 struct dm_event_daemon_message *msg)
1322 {
1323 unsigned bytes = 0;
1324 int ret = 0;
1325 fd_set fds;
1326
1327 size_t size = 2 * sizeof(uint32_t) + msg->size;
1328 char *buf = alloca(size);
1329
1330 *((uint32_t *)buf) = htonl(msg->cmd);
1331 *((uint32_t *)buf + 1) = htonl(msg->size);
1332 if (msg->data)
1333 memcpy(buf + 2 * sizeof(uint32_t), msg->data, msg->size);
1334
1335 errno = 0;
1336 while (bytes < size && errno != EIO) {
1337 do {
1338 /* Watch client write FIFO to be ready for output. */
1339 FD_ZERO(&fds);
1340 FD_SET(fifos->server, &fds);
1341 } while (select(fifos->server + 1, NULL, &fds, NULL, NULL) !=
1342 1);
1343
1344 ret = write(fifos->server, buf + bytes, size - bytes);
1345 bytes += ret > 0 ? ret : 0;
1346 }
1347
1348 return bytes == size;
1349 }
1350
1351 /*
1352 * Handle a client request.
1353 *
1354 * We put the request handling functions into
1355 * a list because of the growing number.
1356 */
_handle_request(struct dm_event_daemon_message * msg,struct message_data * message_data)1357 static int _handle_request(struct dm_event_daemon_message *msg,
1358 struct message_data *message_data)
1359 {
1360 static struct {
1361 unsigned int cmd;
1362 int (*f)(struct message_data *);
1363 } requests[] = {
1364 { DM_EVENT_CMD_REGISTER_FOR_EVENT, _register_for_event},
1365 { DM_EVENT_CMD_UNREGISTER_FOR_EVENT, _unregister_for_event},
1366 { DM_EVENT_CMD_GET_REGISTERED_DEVICE, _get_registered_device},
1367 { DM_EVENT_CMD_GET_NEXT_REGISTERED_DEVICE,
1368 _get_next_registered_device},
1369 { DM_EVENT_CMD_SET_TIMEOUT, _set_timeout},
1370 { DM_EVENT_CMD_GET_TIMEOUT, _get_timeout},
1371 { DM_EVENT_CMD_ACTIVE, _active},
1372 }, *req;
1373
1374 for (req = requests; req < requests + sizeof(requests); req++)
1375 if (req->cmd == msg->cmd)
1376 return req->f(message_data);
1377
1378 return -EINVAL;
1379 }
1380
1381 /* Process a request passed from the communication thread. */
_do_process_request(struct dm_event_daemon_message * msg)1382 static int _do_process_request(struct dm_event_daemon_message *msg)
1383 {
1384 int ret;
1385 char *answer;
1386 static struct message_data message_data;
1387
1388 /* Parse the message. */
1389 memset(&message_data, 0, sizeof(message_data));
1390 message_data.msg = msg;
1391 if (msg->cmd == DM_EVENT_CMD_HELLO) {
1392 ret = 0;
1393 answer = msg->data;
1394 if (answer) {
1395 msg->size = dm_asprintf(&(msg->data), "%s HELLO", answer);
1396 dm_free(answer);
1397 } else {
1398 msg->size = 0;
1399 msg->data = NULL;
1400 }
1401 } else if (msg->cmd != DM_EVENT_CMD_ACTIVE && !_parse_message(&message_data)) {
1402 stack;
1403 ret = -EINVAL;
1404 } else
1405 ret = _handle_request(msg, &message_data);
1406
1407 msg->cmd = ret;
1408 if (!msg->data)
1409 msg->size = dm_asprintf(&(msg->data), "%s %s", message_data.id, strerror(-ret));
1410
1411 _free_message(&message_data);
1412
1413 return ret;
1414 }
1415
1416 /* Only one caller at a time. */
_process_request(struct dm_event_fifos * fifos)1417 static void _process_request(struct dm_event_fifos *fifos)
1418 {
1419 struct dm_event_daemon_message msg;
1420
1421 memset(&msg, 0, sizeof(msg));
1422
1423 /*
1424 * Read the request from the client (client_read, client_write
1425 * give true on success and false on failure).
1426 */
1427 if (!_client_read(fifos, &msg))
1428 return;
1429
1430 /* _do_process_request fills in msg (if memory allows for
1431 data, otherwise just cmd and size = 0) */
1432 _do_process_request(&msg);
1433
1434 if (!_client_write(fifos, &msg))
1435 stack;
1436
1437 if (msg.data)
1438 dm_free(msg.data);
1439 }
1440
_cleanup_unused_threads(void)1441 static void _cleanup_unused_threads(void)
1442 {
1443 int ret;
1444 struct dm_list *l;
1445 struct thread_status *thread;
1446
1447 _lock_mutex();
1448 while ((l = dm_list_first(&_thread_registry_unused))) {
1449 thread = dm_list_item(l, struct thread_status);
1450 if (thread->processing)
1451 break; /* cleanup on the next round */
1452
1453 if (thread->status == DM_THREAD_RUNNING) {
1454 thread->status = DM_THREAD_SHUTDOWN;
1455 break;
1456 }
1457
1458 if (thread->status == DM_THREAD_SHUTDOWN) {
1459 if (!thread->events) {
1460 /* turn codes negative -- should we be returning this? */
1461 ret = _terminate_thread(thread);
1462
1463 if (ret == ESRCH) {
1464 thread->status = DM_THREAD_DONE;
1465 } else if (ret) {
1466 syslog(LOG_ERR,
1467 "Unable to terminate thread: %s\n",
1468 strerror(-ret));
1469 stack;
1470 }
1471 break;
1472 }
1473
1474 dm_list_del(l);
1475 syslog(LOG_ERR,
1476 "thread can't be on unused list unless !thread->events");
1477 thread->status = DM_THREAD_RUNNING;
1478 LINK_THREAD(thread);
1479
1480 continue;
1481 }
1482
1483 if (thread->status == DM_THREAD_DONE) {
1484 dm_list_del(l);
1485 pthread_join(thread->thread, NULL);
1486 _lib_put(thread->dso_data);
1487 _free_thread_status(thread);
1488 }
1489 }
1490
1491 _unlock_mutex();
1492 }
1493
_sig_alarm(int signum __attribute ((unused)))1494 static void _sig_alarm(int signum __attribute((unused)))
1495 {
1496 pthread_testcancel();
1497 }
1498
1499 /* Init thread signal handling. */
_init_thread_signals(void)1500 static void _init_thread_signals(void)
1501 {
1502 sigset_t my_sigset;
1503 struct sigaction act;
1504
1505 memset(&act, 0, sizeof(act));
1506 act.sa_handler = _sig_alarm;
1507 sigaction(SIGALRM, &act, NULL);
1508 sigfillset(&my_sigset);
1509
1510 /* These are used for exiting */
1511 sigdelset(&my_sigset, SIGTERM);
1512 sigdelset(&my_sigset, SIGINT);
1513 sigdelset(&my_sigset, SIGHUP);
1514 sigdelset(&my_sigset, SIGQUIT);
1515
1516 pthread_sigmask(SIG_BLOCK, &my_sigset, NULL);
1517 }
1518
1519 /*
1520 * exit_handler
1521 * @sig
1522 *
1523 * Set the global variable which the process should
1524 * be watching to determine when to exit.
1525 */
_exit_handler(int sig __attribute ((unused)))1526 static void _exit_handler(int sig __attribute((unused)))
1527 {
1528 /*
1529 * We exit when '_exit_now' is set.
1530 * That is, when a signal has been received.
1531 *
1532 * We can not simply set '_exit_now' unless all
1533 * threads are done processing.
1534 */
1535 if (!_thread_registries_empty) {
1536 syslog(LOG_ERR, "There are still devices being monitored.");
1537 syslog(LOG_ERR, "Refusing to exit.");
1538 } else
1539 _exit_now = 1;
1540
1541 }
1542
_lock_pidfile(void)1543 static int _lock_pidfile(void)
1544 {
1545 int lf;
1546 char pidfile[] = DMEVENTD_PIDFILE;
1547
1548 if ((lf = open(pidfile, O_CREAT | O_RDWR, 0644)) < 0)
1549 exit(EXIT_OPEN_PID_FAILURE);
1550
1551 if (flock(lf, LOCK_EX | LOCK_NB) < 0)
1552 exit(EXIT_LOCKFILE_INUSE);
1553
1554 if (!_storepid(lf))
1555 exit(EXIT_FAILURE);
1556
1557 return 0;
1558 }
1559
1560 #ifdef linux
1561 /*
1562 * Protection against OOM killer if kernel supports it
1563 */
_set_oom_adj(int val)1564 static int _set_oom_adj(int val)
1565 {
1566 FILE *fp;
1567
1568 struct stat st;
1569
1570 if (stat(OOM_ADJ_FILE, &st) == -1) {
1571 if (errno == ENOENT)
1572 DEBUGLOG(OOM_ADJ_FILE " not found");
1573 else
1574 perror(OOM_ADJ_FILE ": stat failed");
1575 return 1;
1576 }
1577
1578 if (!(fp = fopen(OOM_ADJ_FILE, "w"))) {
1579 perror(OOM_ADJ_FILE ": fopen failed");
1580 return 0;
1581 }
1582
1583 fprintf(fp, "%i", val);
1584 if (dm_fclose(fp))
1585 perror(OOM_ADJ_FILE ": fclose failed");
1586
1587 return 1;
1588 }
1589 #endif
1590
_daemonize(void)1591 static void _daemonize(void)
1592 {
1593 int child_status;
1594 int fd;
1595 pid_t pid;
1596 struct rlimit rlim;
1597 struct timeval tval;
1598 sigset_t my_sigset;
1599
1600 sigemptyset(&my_sigset);
1601 if (sigprocmask(SIG_SETMASK, &my_sigset, NULL) < 0) {
1602 fprintf(stderr, "Unable to restore signals.\n");
1603 exit(EXIT_FAILURE);
1604 }
1605 signal(SIGTERM, &_exit_handler);
1606
1607 switch (pid = fork()) {
1608 case -1:
1609 perror("fork failed:");
1610 exit(EXIT_FAILURE);
1611
1612 case 0: /* Child */
1613 break;
1614
1615 default:
1616 /* Wait for response from child */
1617 while (!waitpid(pid, &child_status, WNOHANG) && !_exit_now) {
1618 tval.tv_sec = 0;
1619 tval.tv_usec = 250000; /* .25 sec */
1620 select(0, NULL, NULL, NULL, &tval);
1621 }
1622
1623 if (_exit_now) /* Child has signaled it is ok - we can exit now */
1624 exit(EXIT_SUCCESS);
1625
1626 /* Problem with child. Determine what it is by exit code */
1627 switch (WEXITSTATUS(child_status)) {
1628 case EXIT_LOCKFILE_INUSE:
1629 fprintf(stderr, "Another dmeventd daemon is already running\n");
1630 break;
1631 case EXIT_DESC_CLOSE_FAILURE:
1632 case EXIT_DESC_OPEN_FAILURE:
1633 case EXIT_OPEN_PID_FAILURE:
1634 case EXIT_FIFO_FAILURE:
1635 case EXIT_CHDIR_FAILURE:
1636 default:
1637 fprintf(stderr, "Child exited with code %d\n", WEXITSTATUS(child_status));
1638 break;
1639 }
1640
1641 exit(WEXITSTATUS(child_status));
1642 }
1643
1644 if (chdir("/"))
1645 exit(EXIT_CHDIR_FAILURE);
1646
1647 if (getrlimit(RLIMIT_NOFILE, &rlim) < 0)
1648 fd = 256; /* just have to guess */
1649 else
1650 fd = rlim.rlim_cur;
1651
1652 for (--fd; fd >= 0; fd--)
1653 close(fd);
1654
1655 if ((open("/dev/null", O_RDONLY) < 0) ||
1656 (open("/dev/null", O_WRONLY) < 0) ||
1657 (open("/dev/null", O_WRONLY) < 0))
1658 exit(EXIT_DESC_OPEN_FAILURE);
1659
1660 setsid();
1661 }
1662
usage(char * prog,FILE * file)1663 static void usage(char *prog, FILE *file)
1664 {
1665 fprintf(file, "Usage:\n");
1666 fprintf(file, "%s [Vhd]\n", prog);
1667 fprintf(file, "\n");
1668 fprintf(file, " -V Show version of dmeventd\n");
1669 fprintf(file, " -h Show this help information\n");
1670 fprintf(file, " -d Don't fork, run in the foreground\n");
1671 fprintf(file, "\n");
1672 }
1673
main(int argc,char * argv[])1674 int main(int argc, char *argv[])
1675 {
1676 int ret;
1677 signed char opt;
1678 struct dm_event_fifos fifos;
1679 //struct sys_log logdata = {DAEMON_NAME, LOG_DAEMON};
1680
1681 opterr = 0;
1682 optind = 0;
1683
1684 while ((opt = getopt(argc, argv, "?hVd")) != EOF) {
1685 switch (opt) {
1686 case 'h':
1687 usage(argv[0], stdout);
1688 exit(0);
1689 case '?':
1690 usage(argv[0], stderr);
1691 exit(0);
1692 case 'd':
1693 _debug++;
1694 break;
1695 case 'V':
1696 printf("dmeventd version: %s\n", DM_LIB_VERSION);
1697 exit(1);
1698 break;
1699 }
1700 }
1701
1702 if (!_debug)
1703 _daemonize();
1704
1705 openlog("dmeventd", LOG_PID, LOG_DAEMON);
1706
1707 _lock_pidfile(); /* exits if failure */
1708
1709 /* Set the rest of the signals to cause '_exit_now' to be set */
1710 signal(SIGINT, &_exit_handler);
1711 signal(SIGHUP, &_exit_handler);
1712 signal(SIGQUIT, &_exit_handler);
1713
1714 #ifdef linux
1715 if (!_set_oom_adj(OOM_DISABLE) && !_set_oom_adj(OOM_ADJUST_MIN))
1716 syslog(LOG_ERR, "Failed to set oom_adj to protect against OOM killer");
1717 #endif
1718
1719 _init_thread_signals();
1720
1721 //multilog_clear_logging();
1722 //multilog_add_type(std_syslog, &logdata);
1723 //multilog_init_verbose(std_syslog, _LOG_DEBUG);
1724 //multilog_async(1);
1725
1726 _init_fifos(&fifos);
1727
1728 pthread_mutex_init(&_global_mutex, NULL);
1729
1730 #ifdef MCL_CURRENT
1731 if (mlockall(MCL_CURRENT | MCL_FUTURE) == -1)
1732 exit(EXIT_FAILURE);
1733 #endif
1734
1735 if ((ret = _open_fifos(&fifos)))
1736 exit(EXIT_FIFO_FAILURE);
1737
1738 /* Signal parent, letting them know we are ready to go. */
1739 kill(getppid(), SIGTERM);
1740 syslog(LOG_NOTICE, "dmeventd ready for processing.");
1741
1742 while (!_exit_now) {
1743 _process_request(&fifos);
1744 _cleanup_unused_threads();
1745 if (!dm_list_empty(&_thread_registry)
1746 || !dm_list_empty(&_thread_registry_unused))
1747 _thread_registries_empty = 0;
1748 else
1749 _thread_registries_empty = 1;
1750 }
1751
1752 _exit_dm_lib();
1753
1754 #ifdef MCL_CURRENT
1755 munlockall();
1756 #endif
1757 pthread_mutex_destroy(&_global_mutex);
1758
1759 syslog(LOG_NOTICE, "dmeventd shutting down.");
1760 closelog();
1761
1762 exit(EXIT_SUCCESS);
1763 }
1764