xref: /dragonfly/usr.sbin/nscd/nscd.c (revision 19fe1c42)
1 /*-
2  * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in thereg
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: src/usr.sbin/nscd/nscd.c,v 1.7 2008/10/23 00:27:35 delphij Exp $
27  */
28 
29 #include <sys/types.h>
30 #include <sys/event.h>
31 #include <sys/socket.h>
32 #include <sys/time.h>
33 #include <sys/param.h>
34 #include <sys/un.h>
35 #include <assert.h>
36 #include <err.h>
37 #include <errno.h>
38 #include <fcntl.h>
39 #include <libutil.h>
40 #include <pthread.h>
41 #include <signal.h>
42 #include <stdio.h>
43 #include <stdlib.h>
44 #include <string.h>
45 #include <unistd.h>
46 
47 #include "agents/passwd.h"
48 #include "agents/group.h"
49 #include "agents/services.h"
50 #include "cachelib.h"
51 #include "config.h"
52 #include "debug.h"
53 #include "log.h"
54 #include "nscdcli.h"
55 #include "parser.h"
56 #include "pidfile.h"
57 #include "query.h"
58 #include "singletons.h"
59 
60 #ifndef CONFIG_PATH
61 #define CONFIG_PATH "/etc/nscd.conf"
62 #endif
63 #define DEFAULT_CONFIG_PATH	"nscd.conf"
64 
65 #define MAX_SOCKET_IO_SIZE	4096
66 
67 struct processing_thread_args {
68 	cache	the_cache;
69 	struct configuration	*the_configuration;
70 	struct runtime_env		*the_runtime_env;
71 };
72 
73 static void accept_connection(struct kevent *, struct runtime_env *,
74 	struct configuration *);
75 static void destroy_cache_(cache);
76 static void destroy_runtime_env(struct runtime_env *);
77 static cache init_cache_(struct configuration *);
78 static struct runtime_env *init_runtime_env(struct configuration *);
79 static void processing_loop(cache, struct runtime_env *,
80 	struct configuration *);
81 static void process_socket_event(struct kevent *, struct runtime_env *,
82 	struct configuration *);
83 static void process_timer_event(struct kevent *, struct runtime_env *,
84 	struct configuration *);
85 static void *processing_thread(void *);
86 static void usage(void);
87 
88 void get_time_func(struct timeval *);
89 
90 static void
91 usage(void)
92 {
93 	fprintf(stderr,
94 	    "usage: nscd [-dnst] [-i cachename] [-I cachename]\n");
95 	exit(1);
96 }
97 
98 static cache
99 init_cache_(struct configuration *config)
100 {
101 	struct cache_params params;
102 	cache retval;
103 
104 	struct configuration_entry *config_entry;
105 	size_t	size, i;
106 	int res;
107 
108 	TRACE_IN(init_cache_);
109 
110 	memset(&params, 0, sizeof(struct cache_params));
111 	params.get_time_func = get_time_func;
112 	retval = init_cache(&params);
113 
114 	size = configuration_get_entries_size(config);
115 	for (i = 0; i < size; ++i) {
116 		config_entry = configuration_get_entry(config, i);
117 		/*
118 		 * We should register common entries now - multipart entries
119 		 * would be registered automatically during the queries.
120 		 */
121 		res = register_cache_entry(retval, (struct cache_entry_params *)
122 			&config_entry->positive_cache_params);
123 		config_entry->positive_cache_entry = find_cache_entry(retval,
124 			config_entry->positive_cache_params.entry_name);
125 		assert(config_entry->positive_cache_entry !=
126 			INVALID_CACHE_ENTRY);
127 
128 		res = register_cache_entry(retval, (struct cache_entry_params *)
129 			&config_entry->negative_cache_params);
130 		config_entry->negative_cache_entry = find_cache_entry(retval,
131 			config_entry->negative_cache_params.entry_name);
132 		assert(config_entry->negative_cache_entry !=
133 			INVALID_CACHE_ENTRY);
134 	}
135 
136 	LOG_MSG_2("cache", "cache was successfully initialized");
137 	TRACE_OUT(init_cache_);
138 	return (retval);
139 }
140 
141 static void
142 destroy_cache_(cache the_cache)
143 {
144 	TRACE_IN(destroy_cache_);
145 	destroy_cache(the_cache);
146 	TRACE_OUT(destroy_cache_);
147 }
148 
149 /*
150  * Socket and kqueues are prepared here. We have one global queue for both
151  * socket and timers events.
152  */
153 static struct runtime_env *
154 init_runtime_env(struct configuration *config)
155 {
156 	int serv_addr_len;
157 	struct sockaddr_un serv_addr;
158 
159 	struct kevent eventlist;
160 	struct timespec timeout;
161 
162 	struct runtime_env *retval;
163 
164 	TRACE_IN(init_runtime_env);
165 	retval = (struct runtime_env *)calloc(1, sizeof(struct runtime_env));
166 	assert(retval != NULL);
167 
168 	retval->sockfd = socket(PF_LOCAL, SOCK_STREAM, 0);
169 
170 	if (config->force_unlink == 1)
171 		unlink(config->socket_path);
172 
173 	memset(&serv_addr, 0, sizeof(struct sockaddr_un));
174 	serv_addr.sun_family = PF_LOCAL;
175 	strlcpy(serv_addr.sun_path, config->socket_path,
176 		sizeof(serv_addr.sun_path));
177 	serv_addr_len = sizeof(serv_addr.sun_family) +
178 		strlen(serv_addr.sun_path) + 1;
179 
180 	if (bind(retval->sockfd, (struct sockaddr *)&serv_addr,
181 		serv_addr_len) == -1) {
182 		close(retval->sockfd);
183 		free(retval);
184 
185 		LOG_ERR_2("runtime environment", "can't bind socket to path: "
186 			"%s", config->socket_path);
187 		TRACE_OUT(init_runtime_env);
188 		return (NULL);
189 	}
190 	LOG_MSG_2("runtime environment", "using socket %s",
191 		config->socket_path);
192 
193 	/*
194 	 * Here we're marking socket as non-blocking and setting its backlog
195 	 * to the maximum value
196 	 */
197 	chmod(config->socket_path, config->socket_mode);
198 	listen(retval->sockfd, -1);
199 	fcntl(retval->sockfd, F_SETFL, O_NONBLOCK);
200 
201 	retval->queue = kqueue();
202 	assert(retval->queue != -1);
203 
204 	EV_SET(&eventlist, retval->sockfd, EVFILT_READ, EV_ADD | EV_ONESHOT,
205 		0, 0, 0);
206 	memset(&timeout, 0, sizeof(struct timespec));
207 	kevent(retval->queue, &eventlist, 1, NULL, 0, &timeout);
208 
209 	LOG_MSG_2("runtime environment", "successfully initialized");
210 	TRACE_OUT(init_runtime_env);
211 	return (retval);
212 }
213 
214 static void
215 destroy_runtime_env(struct runtime_env *env)
216 {
217 	TRACE_IN(destroy_runtime_env);
218 	close(env->queue);
219 	close(env->sockfd);
220 	free(env);
221 	TRACE_OUT(destroy_runtime_env);
222 }
223 
224 static void
225 accept_connection(struct kevent *event_data, struct runtime_env *env,
226 	struct configuration *config)
227 {
228 	struct kevent	eventlist[2];
229 	struct timespec	timeout;
230 	struct query_state	*qstate;
231 
232 	int	fd;
233 	int	res;
234 
235 	uid_t	euid;
236 	gid_t	egid;
237 
238 	TRACE_IN(accept_connection);
239 	fd = accept(event_data->ident, NULL, NULL);
240 	if (fd == -1) {
241 		LOG_ERR_2("accept_connection", "error %d during accept()",
242 		    errno);
243 		TRACE_OUT(accept_connection);
244 		return;
245 	}
246 
247 	if (getpeereid(fd, &euid, &egid) != 0) {
248 		LOG_ERR_2("accept_connection", "error %d during getpeereid()",
249 			errno);
250 		TRACE_OUT(accept_connection);
251 		return;
252 	}
253 
254 	qstate = init_query_state(fd, sizeof(int), euid, egid);
255 	if (qstate == NULL) {
256 		LOG_ERR_2("accept_connection", "can't init query_state");
257 		TRACE_OUT(accept_connection);
258 		return;
259 	}
260 
261 	memset(&timeout, 0, sizeof(struct timespec));
262 	EV_SET(&eventlist[0], fd, EVFILT_TIMER, EV_ADD | EV_ONESHOT,
263 		0, qstate->timeout.tv_sec * 1000, qstate);
264 	EV_SET(&eventlist[1], fd, EVFILT_READ, EV_ADD | EV_ONESHOT,
265 		NOTE_LOWAT, qstate->kevent_watermark, qstate);
266 	res = kevent(env->queue, eventlist, 2, NULL, 0, &timeout);
267 	if (res < 0)
268 		LOG_ERR_2("accept_connection", "kevent error");
269 
270 	TRACE_OUT(accept_connection);
271 }
272 
273 static void
274 process_socket_event(struct kevent *event_data, struct runtime_env *env,
275 	struct configuration *config)
276 {
277 	struct kevent	eventlist[2];
278 	struct timeval	query_timeout;
279 	struct timespec	kevent_timeout;
280 	int	nevents;
281 	int	eof_res, res;
282 	ssize_t	io_res;
283 	struct query_state *qstate;
284 
285 	TRACE_IN(process_socket_event);
286 	eof_res = event_data->flags & EV_EOF ? 1 : 0;
287 	res = 0;
288 
289 	memset(&kevent_timeout, 0, sizeof(struct timespec));
290 	EV_SET(&eventlist[0], event_data->ident, EVFILT_TIMER, EV_DELETE,
291 		0, 0, NULL);
292 	nevents = kevent(env->queue, eventlist, 1, NULL, 0, &kevent_timeout);
293 	if (nevents == -1) {
294 		if (errno == ENOENT) {
295 			/* the timer is already handling this event */
296 			TRACE_OUT(process_socket_event);
297 			return;
298 		} else {
299 			/* some other error happened */
300 			LOG_ERR_2("process_socket_event", "kevent error, errno"
301 				" is %d", errno);
302 			TRACE_OUT(process_socket_event);
303 			return;
304 		}
305 	}
306 	qstate = (struct query_state *)event_data->udata;
307 
308 	/*
309 	 * If the buffer that is to be send/received is too large,
310 	 * we send it implicitly, by using query_io_buffer_read and
311 	 * query_io_buffer_write functions in the query_state. These functions
312 	 * use the temporary buffer, which is later send/received in parts.
313 	 * The code below implements buffer splitting/mergind for send/receive
314 	 * operations. It also does the actual socket IO operations.
315 	 */
316 	if (((qstate->use_alternate_io == 0) &&
317 		(qstate->kevent_watermark <= event_data->data)) ||
318 		((qstate->use_alternate_io != 0) &&
319 		(qstate->io_buffer_watermark <= event_data->data))) {
320 		if (qstate->use_alternate_io != 0) {
321 			switch (qstate->io_buffer_filter) {
322 			case EVFILT_READ:
323 				io_res = query_socket_read(qstate,
324 					qstate->io_buffer_p,
325 					qstate->io_buffer_watermark);
326 				if (io_res < 0) {
327 					qstate->use_alternate_io = 0;
328 					qstate->process_func = NULL;
329 				} else {
330 					qstate->io_buffer_p += io_res;
331 					if (qstate->io_buffer_p ==
332 						qstate->io_buffer +
333 						qstate->io_buffer_size) {
334 						qstate->io_buffer_p =
335 						    qstate->io_buffer;
336 						qstate->use_alternate_io = 0;
337 					}
338 				}
339 			break;
340 			default:
341 			break;
342 			}
343 		}
344 
345 		if (qstate->use_alternate_io == 0) {
346 			do {
347 				res = qstate->process_func(qstate);
348 			} while ((qstate->kevent_watermark == 0) &&
349 					(qstate->process_func != NULL) &&
350 					(res == 0));
351 
352 			if (res != 0)
353 				qstate->process_func = NULL;
354 		}
355 
356 		if ((qstate->use_alternate_io != 0) &&
357 			(qstate->io_buffer_filter == EVFILT_WRITE)) {
358 			io_res = query_socket_write(qstate, qstate->io_buffer_p,
359 				qstate->io_buffer_watermark);
360 			if (io_res < 0) {
361 				qstate->use_alternate_io = 0;
362 				qstate->process_func = NULL;
363 			} else
364 				qstate->io_buffer_p += io_res;
365 		}
366 	} else {
367 		/* assuming that socket was closed */
368 		qstate->process_func = NULL;
369 		qstate->use_alternate_io = 0;
370 	}
371 
372 	if (((qstate->process_func == NULL) &&
373 		(qstate->use_alternate_io == 0)) ||
374 		(eof_res != 0) || (res != 0)) {
375 		destroy_query_state(qstate);
376 		close(event_data->ident);
377 		TRACE_OUT(process_socket_event);
378 		return;
379 	}
380 
381 	/* updating the query_state lifetime variable */
382 	get_time_func(&query_timeout);
383 	query_timeout.tv_usec = 0;
384 	query_timeout.tv_sec -= qstate->creation_time.tv_sec;
385 	if (query_timeout.tv_sec > qstate->timeout.tv_sec)
386 		query_timeout.tv_sec = 0;
387 	else
388 		query_timeout.tv_sec = qstate->timeout.tv_sec -
389 			query_timeout.tv_sec;
390 
391 	if ((qstate->use_alternate_io != 0) && (qstate->io_buffer_p ==
392 		qstate->io_buffer + qstate->io_buffer_size))
393 		qstate->use_alternate_io = 0;
394 
395 	if (qstate->use_alternate_io == 0) {
396 		/*
397 		 * If we must send/receive the large block of data,
398 		 * we should prepare the query_state's io_XXX fields.
399 		 * We should also substitute its write_func and read_func
400 		 * with the query_io_buffer_write and query_io_buffer_read,
401 		 * which will allow us to implicitly send/receive this large
402 		 * buffer later (in the subsequent calls to the
403 		 * process_socket_event).
404 		 */
405 		if (qstate->kevent_watermark > MAX_SOCKET_IO_SIZE) {
406 			if (qstate->io_buffer != NULL)
407 				free(qstate->io_buffer);
408 
409 			qstate->io_buffer = (char *)calloc(1,
410 				qstate->kevent_watermark);
411 			assert(qstate->io_buffer != NULL);
412 
413 			qstate->io_buffer_p = qstate->io_buffer;
414 			qstate->io_buffer_size = qstate->kevent_watermark;
415 			qstate->io_buffer_filter = qstate->kevent_filter;
416 
417 			qstate->write_func = query_io_buffer_write;
418 			qstate->read_func = query_io_buffer_read;
419 
420 			if (qstate->kevent_filter == EVFILT_READ)
421 				qstate->use_alternate_io = 1;
422 
423 			qstate->io_buffer_watermark = MAX_SOCKET_IO_SIZE;
424 			EV_SET(&eventlist[1], event_data->ident,
425 				qstate->kevent_filter, EV_ADD | EV_ONESHOT,
426 				NOTE_LOWAT, MAX_SOCKET_IO_SIZE, qstate);
427 		} else {
428 			EV_SET(&eventlist[1], event_data->ident,
429 				qstate->kevent_filter, EV_ADD | EV_ONESHOT,
430 				NOTE_LOWAT, qstate->kevent_watermark, qstate);
431 		}
432 	} else {
433 		if (qstate->io_buffer + qstate->io_buffer_size -
434 			qstate->io_buffer_p <
435 			MAX_SOCKET_IO_SIZE) {
436 			qstate->io_buffer_watermark = qstate->io_buffer +
437 				qstate->io_buffer_size - qstate->io_buffer_p;
438 			EV_SET(&eventlist[1], event_data->ident,
439 				qstate->io_buffer_filter,
440 				EV_ADD | EV_ONESHOT, NOTE_LOWAT,
441 				qstate->io_buffer_watermark,
442 				qstate);
443 		} else {
444 			qstate->io_buffer_watermark = MAX_SOCKET_IO_SIZE;
445 			EV_SET(&eventlist[1], event_data->ident,
446 				qstate->io_buffer_filter, EV_ADD | EV_ONESHOT,
447 				NOTE_LOWAT, MAX_SOCKET_IO_SIZE, qstate);
448 		}
449 	}
450 	EV_SET(&eventlist[0], event_data->ident, EVFILT_TIMER,
451 		EV_ADD | EV_ONESHOT, 0, query_timeout.tv_sec * 1000, qstate);
452 	kevent(env->queue, eventlist, 2, NULL, 0, &kevent_timeout);
453 
454 	TRACE_OUT(process_socket_event);
455 }
456 
457 /*
458  * This routine is called if timer event has been signaled in the kqueue. It
459  * just closes the socket and destroys the query_state.
460  */
461 static void
462 process_timer_event(struct kevent *event_data, struct runtime_env *env,
463 	struct configuration *config)
464 {
465 	struct query_state	*qstate;
466 
467 	TRACE_IN(process_timer_event);
468 	qstate = (struct query_state *)event_data->udata;
469 	destroy_query_state(qstate);
470 	close(event_data->ident);
471 	TRACE_OUT(process_timer_event);
472 }
473 
474 /*
475  * Processing loop is the basic processing routine, that forms a body of each
476  * procssing thread
477  */
478 static void
479 processing_loop(cache the_cache, struct runtime_env *env,
480 	struct configuration *config)
481 {
482 	struct timespec timeout;
483 	const int eventlist_size = 1;
484 	struct kevent eventlist[eventlist_size];
485 	int nevents, i;
486 
487 	TRACE_MSG("=> processing_loop");
488 	memset(&timeout, 0, sizeof(struct timespec));
489 	memset(&eventlist, 0, sizeof(struct kevent) * eventlist_size);
490 
491 	for (;;) {
492 		nevents = kevent(env->queue, NULL, 0, eventlist,
493 			eventlist_size, NULL);
494 		/*
495 		 * we can only receive 1 event on success
496 		 */
497 		if (nevents == 1) {
498 			struct kevent *event_data;
499 			event_data = &eventlist[0];
500 
501 			if (event_data->ident == env->sockfd) {
502 				for (i = 0; i < event_data->data; ++i)
503 				    accept_connection(event_data, env, config);
504 
505 				EV_SET(eventlist, s_runtime_env->sockfd,
506 				    EVFILT_READ, EV_ADD | EV_ONESHOT,
507 				    0, 0, 0);
508 				memset(&timeout, 0,
509 				    sizeof(struct timespec));
510 				kevent(s_runtime_env->queue, eventlist,
511 				    1, NULL, 0, &timeout);
512 
513 			} else {
514 				switch (event_data->filter) {
515 				case EVFILT_READ:
516 				case EVFILT_WRITE:
517 					process_socket_event(event_data,
518 						env, config);
519 					break;
520 				case EVFILT_TIMER:
521 					process_timer_event(event_data,
522 						env, config);
523 					break;
524 				default:
525 					break;
526 				}
527 			}
528 		} else {
529 			/* this branch shouldn't be currently executed */
530 		}
531 	}
532 
533 	TRACE_MSG("<= processing_loop");
534 }
535 
536 /*
537  * Wrapper above the processing loop function. It sets the thread signal mask
538  * to avoid SIGPIPE signals (which can happen if the client works incorrectly).
539  */
540 static void *
541 processing_thread(void *data)
542 {
543 	struct processing_thread_args	*args;
544 	sigset_t new;
545 
546 	TRACE_MSG("=> processing_thread");
547 	args = (struct processing_thread_args *)data;
548 
549 	sigemptyset(&new);
550 	sigaddset(&new, SIGPIPE);
551 	if (pthread_sigmask(SIG_BLOCK, &new, NULL) != 0)
552 		LOG_ERR_1("processing thread",
553 			"thread can't block the SIGPIPE signal");
554 
555 	processing_loop(args->the_cache, args->the_runtime_env,
556 		args->the_configuration);
557 	free(args);
558 	TRACE_MSG("<= processing_thread");
559 
560 	return (NULL);
561 }
562 
563 void
564 get_time_func(struct timeval *time)
565 {
566 	struct timespec res;
567 	memset(&res, 0, sizeof(struct timespec));
568 	clock_gettime(CLOCK_MONOTONIC, &res);
569 
570 	time->tv_sec = res.tv_sec;
571 	time->tv_usec = 0;
572 }
573 
574 /*
575  * The idea of _nss_cache_cycle_prevention_function is that nsdispatch will
576  * search for this symbol in the executable. This symbol is the attribute of
577  * the caching daemon. So, if it exists, nsdispatch won't try to connect to
578  * the caching daemon and will just ignore the 'cache' source in the
579  * nsswitch.conf. This method helps to avoid cycles and organize
580  * self-performing requests.
581  */
582 void
583 _nss_cache_cycle_prevention_function(void)
584 {
585 }
586 
587 int
588 main(int argc, char *argv[])
589 {
590 	struct processing_thread_args *thread_args;
591 	pthread_t *threads;
592 
593 	struct pidfh *pidfile;
594 	pid_t pid;
595 
596 	char const *config_file;
597 	char const *error_str;
598 	int error_line;
599 	int i, res;
600 
601 	int trace_mode_enabled;
602 	int force_single_threaded;
603 	int do_not_daemonize;
604 	int clear_user_cache_entries, clear_all_cache_entries;
605 	char *user_config_entry_name, *global_config_entry_name;
606 	int show_statistics;
607 	int daemon_mode, interactive_mode;
608 
609 
610 	/* by default all debug messages are omitted */
611 	TRACE_OFF();
612 
613 	/* parsing command line arguments */
614 	trace_mode_enabled = 0;
615 	force_single_threaded = 0;
616 	do_not_daemonize = 0;
617 	clear_user_cache_entries = 0;
618 	clear_all_cache_entries = 0;
619 	show_statistics = 0;
620 	user_config_entry_name = NULL;
621 	global_config_entry_name = NULL;
622 	while ((res = getopt(argc, argv, "nstdi:I:")) != -1) {
623 		switch (res) {
624 		case 'n':
625 			do_not_daemonize = 1;
626 			break;
627 		case 's':
628 			force_single_threaded = 1;
629 			break;
630 		case 't':
631 			trace_mode_enabled = 1;
632 			break;
633 		case 'i':
634 			clear_user_cache_entries = 1;
635 			if (optarg != NULL)
636 				if (strcmp(optarg, "all") != 0)
637 					user_config_entry_name = strdup(optarg);
638 			break;
639 		case 'I':
640 			clear_all_cache_entries = 1;
641 			if (optarg != NULL)
642 				if (strcmp(optarg, "all") != 0)
643 					global_config_entry_name =
644 						strdup(optarg);
645 			break;
646 		case 'd':
647 			show_statistics = 1;
648 			break;
649 		case '?':
650 		default:
651 			usage();
652 			/* NOT REACHED */
653 		}
654 	}
655 
656 	daemon_mode = do_not_daemonize | force_single_threaded |
657 		trace_mode_enabled;
658 	interactive_mode = clear_user_cache_entries | clear_all_cache_entries |
659 		show_statistics;
660 
661 	if ((daemon_mode != 0) && (interactive_mode != 0)) {
662 		LOG_ERR_1("main", "daemon mode and interactive_mode arguments "
663 			"can't be used together");
664 		usage();
665 	}
666 
667 	if (interactive_mode != 0) {
668 		FILE *pidfin = fopen(DEFAULT_PIDFILE_PATH, "r");
669 		char pidbuf[256];
670 
671 		struct nscd_connection_params connection_params;
672 		nscd_connection connection;
673 
674 		int result;
675 
676 		if (pidfin == NULL)
677 			errx(EXIT_FAILURE, "There is no daemon running.");
678 
679 		memset(pidbuf, 0, sizeof(pidbuf));
680 		fread(pidbuf, sizeof(pidbuf) - 1, 1, pidfin);
681 		fclose(pidfin);
682 
683 		if (ferror(pidfin) != 0)
684 			errx(EXIT_FAILURE, "Can't read from pidfile.");
685 
686 		if (sscanf(pidbuf, "%d", &pid) != 1)
687 			errx(EXIT_FAILURE, "Invalid pidfile.");
688 		LOG_MSG_1("main", "daemon PID is %d", pid);
689 
690 
691 		memset(&connection_params, 0,
692 			sizeof(struct nscd_connection_params));
693 		connection_params.socket_path = DEFAULT_SOCKET_PATH;
694 		connection = open_nscd_connection__(&connection_params);
695 		if (connection == INVALID_NSCD_CONNECTION)
696 			errx(EXIT_FAILURE, "Can't connect to the daemon.");
697 
698 		if (clear_user_cache_entries != 0) {
699 			result = nscd_transform__(connection,
700 				user_config_entry_name, TT_USER);
701 			if (result != 0)
702 				LOG_MSG_1("main",
703 					"user cache transformation failed");
704 			else
705 				LOG_MSG_1("main",
706 					"user cache_transformation "
707 					"succeeded");
708 		}
709 
710 		if (clear_all_cache_entries != 0) {
711 			if (geteuid() != 0)
712 				errx(EXIT_FAILURE, "Only root can initiate "
713 					"global cache transformation.");
714 
715 			result = nscd_transform__(connection,
716 				global_config_entry_name, TT_ALL);
717 			if (result != 0)
718 				LOG_MSG_1("main",
719 					"global cache transformation "
720 					"failed");
721 			else
722 				LOG_MSG_1("main",
723 					"global cache transformation "
724 					"succeeded");
725 		}
726 
727 		close_nscd_connection__(connection);
728 
729 		free(user_config_entry_name);
730 		free(global_config_entry_name);
731 		return (EXIT_SUCCESS);
732 	}
733 
734 	pidfile = pidfile_open(DEFAULT_PIDFILE_PATH, 0644, &pid);
735 	if (pidfile == NULL) {
736 		if (errno == EEXIST)
737 			errx(EXIT_FAILURE, "Daemon already running, pid: %d.",
738 				pid);
739 		warn("Cannot open or create pidfile");
740 	}
741 
742 	if (trace_mode_enabled == 1)
743 		TRACE_ON();
744 
745 	/* blocking the main thread from receiving SIGPIPE signal */
746 	sigblock(sigmask(SIGPIPE));
747 
748 	/* daemonization */
749 	if (do_not_daemonize == 0) {
750 		res = daemon(0, trace_mode_enabled == 0 ? 0 : 1);
751 		if (res != 0) {
752 			LOG_ERR_1("main", "can't daemonize myself: %s",
753 				strerror(errno));
754 			pidfile_remove(pidfile);
755 			goto fin;
756 		} else
757 			LOG_MSG_1("main", "successfully daemonized");
758 	}
759 
760 	pidfile_write(pidfile);
761 
762 	s_agent_table = init_agent_table();
763 	register_agent(s_agent_table, init_passwd_agent());
764 	register_agent(s_agent_table, init_passwd_mp_agent());
765 	register_agent(s_agent_table, init_group_agent());
766 	register_agent(s_agent_table, init_group_mp_agent());
767 	register_agent(s_agent_table, init_services_agent());
768 	register_agent(s_agent_table, init_services_mp_agent());
769 	LOG_MSG_1("main", "request agents registered successfully");
770 
771 	/*
772 	 * Hosts agent can't work properly until we have access to the
773 	 * appropriate dtab structures, which are used in nsdispatch
774 	 * calls
775 	 *
776 	 register_agent(s_agent_table, init_hosts_agent());
777 	*/
778 
779 	/* configuration initialization */
780 	s_configuration = init_configuration();
781 	fill_configuration_defaults(s_configuration);
782 
783 	error_str = NULL;
784 	error_line = 0;
785 	config_file = CONFIG_PATH;
786 
787 	res = parse_config_file(s_configuration, config_file, &error_str,
788 		&error_line);
789 	if ((res != 0) && (error_str == NULL)) {
790 		config_file = DEFAULT_CONFIG_PATH;
791 		res = parse_config_file(s_configuration, config_file,
792 			&error_str, &error_line);
793 	}
794 
795 	if (res != 0) {
796 		if (error_str != NULL) {
797 		LOG_ERR_1("main", "error in configuration file(%s, %d): %s\n",
798 			config_file, error_line, error_str);
799 		} else {
800 		LOG_ERR_1("main", "no configuration file found "
801 			"- was looking for %s and %s",
802 			CONFIG_PATH, DEFAULT_CONFIG_PATH);
803 		}
804 		destroy_configuration(s_configuration);
805 		return (-1);
806 	}
807 
808 	if (force_single_threaded == 1)
809 		s_configuration->threads_num = 1;
810 
811 	/* cache initialization */
812 	s_cache = init_cache_(s_configuration);
813 	if (s_cache == NULL) {
814 		LOG_ERR_1("main", "can't initialize the cache");
815 		destroy_configuration(s_configuration);
816 		return (-1);
817 	}
818 
819 	/* runtime environment initialization */
820 	s_runtime_env = init_runtime_env(s_configuration);
821 	if (s_runtime_env == NULL) {
822 		LOG_ERR_1("main", "can't initialize the runtime environment");
823 		destroy_configuration(s_configuration);
824 		destroy_cache_(s_cache);
825 		return (-1);
826 	}
827 
828 	if (s_configuration->threads_num > 1) {
829 		threads = (pthread_t *)calloc(1, sizeof(pthread_t) *
830 			s_configuration->threads_num);
831 		for (i = 0; i < s_configuration->threads_num; ++i) {
832 			thread_args = (struct processing_thread_args *)malloc(
833 				sizeof(struct processing_thread_args));
834 			thread_args->the_cache = s_cache;
835 			thread_args->the_runtime_env = s_runtime_env;
836 			thread_args->the_configuration = s_configuration;
837 
838 			LOG_MSG_1("main", "thread #%d was successfully created",
839 				i);
840 			pthread_create(&threads[i], NULL, processing_thread,
841 				thread_args);
842 
843 			thread_args = NULL;
844 		}
845 
846 		for (i = 0; i < s_configuration->threads_num; ++i)
847 			pthread_join(threads[i], NULL);
848 	} else {
849 		LOG_MSG_1("main", "working in single-threaded mode");
850 		processing_loop(s_cache, s_runtime_env, s_configuration);
851 	}
852 
853 fin:
854 	/* runtime environment destruction */
855 	destroy_runtime_env(s_runtime_env);
856 
857 	/* cache destruction */
858 	destroy_cache_(s_cache);
859 
860 	/* configuration destruction */
861 	destroy_configuration(s_configuration);
862 
863 	/* agents table destruction */
864 	destroy_agent_table(s_agent_table);
865 
866 	pidfile_remove(pidfile);
867 	return (EXIT_SUCCESS);
868 }
869