xref: /dragonfly/usr.sbin/nscd/query.c (revision 9f7604d7)
1 /*-
2  * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: src/usr.sbin/nscd/query.c,v 1.5 2008/10/12 00:44:27 delphij Exp $
27  */
28 
29 #include <sys/types.h>
30 #include <sys/socket.h>
31 #include <sys/time.h>
32 #include <sys/event.h>
33 #include <sys/uio.h>
34 #include <assert.h>
35 #include <errno.h>
36 #include <nsswitch.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include "config.h"
41 #include "debug.h"
42 #include "query.h"
43 #include "log.h"
44 #include "mp_ws_query.h"
45 #include "mp_rs_query.h"
46 #include "singletons.h"
47 
48 static const char negative_data[1] = { 0 };
49 
50 extern	void get_time_func(struct timeval *);
51 
52 static	void clear_config_entry(struct configuration_entry *);
53 static	void clear_config_entry_part(struct configuration_entry *,
54 	const char *, size_t);
55 
56 static	int on_query_startup(struct query_state *);
57 static	void on_query_destroy(struct query_state *);
58 
59 static	int on_read_request_read1(struct query_state *);
60 static	int on_read_request_read2(struct query_state *);
61 static	int on_read_request_process(struct query_state *);
62 static	int on_read_response_write1(struct query_state *);
63 static	int on_read_response_write2(struct query_state *);
64 
65 static	int on_rw_mapper(struct query_state *);
66 
67 static	int on_transform_request_read1(struct query_state *);
68 static	int on_transform_request_read2(struct query_state *);
69 static	int on_transform_request_process(struct query_state *);
70 static	int on_transform_response_write1(struct query_state *);
71 
72 static	int on_write_request_read1(struct query_state *);
73 static	int on_write_request_read2(struct query_state *);
74 static	int on_negative_write_request_process(struct query_state *);
75 static	int on_write_request_process(struct query_state *);
76 static	int on_write_response_write1(struct query_state *);
77 
78 /*
79  * Clears the specified configuration entry (clears the cache for positive and
80  * and negative entries) and also for all multipart entries.
81  */
82 static void
83 clear_config_entry(struct configuration_entry *config_entry)
84 {
85 	size_t i;
86 
87 	TRACE_IN(clear_config_entry);
88 	configuration_lock_entry(config_entry, CELT_POSITIVE);
89 	if (config_entry->positive_cache_entry != NULL)
90 		transform_cache_entry(
91 			config_entry->positive_cache_entry,
92 			CTT_CLEAR);
93 	configuration_unlock_entry(config_entry, CELT_POSITIVE);
94 
95 	configuration_lock_entry(config_entry, CELT_NEGATIVE);
96 	if (config_entry->negative_cache_entry != NULL)
97 		transform_cache_entry(
98 			config_entry->negative_cache_entry,
99 			CTT_CLEAR);
100 	configuration_unlock_entry(config_entry, CELT_NEGATIVE);
101 
102 	configuration_lock_entry(config_entry, CELT_MULTIPART);
103 	for (i = 0; i < config_entry->mp_cache_entries_size; ++i)
104 		transform_cache_entry(
105 			config_entry->mp_cache_entries[i],
106 			CTT_CLEAR);
107 	configuration_unlock_entry(config_entry, CELT_MULTIPART);
108 
109 	TRACE_OUT(clear_config_entry);
110 }
111 
112 /*
113  * Clears the specified configuration entry by deleting only the elements,
114  * that are owned by the user with specified eid_str.
115  */
116 static void
117 clear_config_entry_part(struct configuration_entry *config_entry,
118 	const char *eid_str, size_t eid_str_length)
119 {
120 	cache_entry *start, *finish, *mp_entry;
121 	TRACE_IN(clear_config_entry_part);
122 	configuration_lock_entry(config_entry, CELT_POSITIVE);
123 	if (config_entry->positive_cache_entry != NULL)
124 		transform_cache_entry_part(
125 			config_entry->positive_cache_entry,
126 			CTT_CLEAR, eid_str, eid_str_length, KPPT_LEFT);
127 	configuration_unlock_entry(config_entry, CELT_POSITIVE);
128 
129 	configuration_lock_entry(config_entry, CELT_NEGATIVE);
130 	if (config_entry->negative_cache_entry != NULL)
131 		transform_cache_entry_part(
132 			config_entry->negative_cache_entry,
133 			CTT_CLEAR, eid_str, eid_str_length, KPPT_LEFT);
134 	configuration_unlock_entry(config_entry, CELT_NEGATIVE);
135 
136 	configuration_lock_entry(config_entry, CELT_MULTIPART);
137 	if (configuration_entry_find_mp_cache_entries(config_entry,
138 		eid_str, &start, &finish) == 0) {
139 		for (mp_entry = start; mp_entry != finish; ++mp_entry)
140 			transform_cache_entry(*mp_entry, CTT_CLEAR);
141 	}
142 	configuration_unlock_entry(config_entry, CELT_MULTIPART);
143 
144 	TRACE_OUT(clear_config_entry_part);
145 }
146 
147 /*
148  * This function is assigned to the query_state structue on its creation.
149  * It's main purpose is to receive credentials from the client.
150  */
151 static int
152 on_query_startup(struct query_state *qstate)
153 {
154 	struct msghdr	cred_hdr;
155 	struct iovec	iov;
156 	struct cmsgcred *cred;
157 	int elem_type;
158 
159 	struct {
160 		struct cmsghdr	hdr;
161 		char cred[CMSG_SPACE(sizeof(struct cmsgcred))];
162 	} cmsg;
163 
164 	TRACE_IN(on_query_startup);
165 	assert(qstate != NULL);
166 
167 	memset(&cred_hdr, 0, sizeof(struct msghdr));
168 	cred_hdr.msg_iov = &iov;
169 	cred_hdr.msg_iovlen = 1;
170 	cred_hdr.msg_control = (caddr_t)&cmsg;
171 	cred_hdr.msg_controllen = CMSG_LEN(sizeof(struct cmsgcred));
172 
173 	memset(&iov, 0, sizeof(struct iovec));
174 	iov.iov_base = &elem_type;
175 	iov.iov_len = sizeof(int);
176 
177 	if (recvmsg(qstate->sockfd, &cred_hdr, 0) == -1) {
178 		TRACE_OUT(on_query_startup);
179 		return (-1);
180 	}
181 
182 	if (cmsg.hdr.cmsg_len < CMSG_LEN(sizeof(struct cmsgcred))
183 		|| cmsg.hdr.cmsg_level != SOL_SOCKET
184 		|| cmsg.hdr.cmsg_type != SCM_CREDS) {
185 		TRACE_OUT(on_query_startup);
186 		return (-1);
187 	}
188 
189 	cred = (struct cmsgcred *)CMSG_DATA(&cmsg);
190 	qstate->uid = cred->cmcred_uid;
191 	qstate->gid = cred->cmcred_gid;
192 
193 #if defined(NS_NSCD_EID_CHECKING) || defined(NS_STRICT_NSCD_EID_CHECKING)
194 /*
195  * This check is probably a bit redundant - per-user cache is always separated
196  * by the euid/egid pair
197  */
198 	if (check_query_eids(qstate) != 0) {
199 #ifdef NS_STRICT_NSCD_EID_CHECKING
200 		TRACE_OUT(on_query_startup);
201 		return (-1);
202 #else
203 		if ((elem_type != CET_READ_REQUEST) &&
204 			(elem_type != CET_MP_READ_SESSION_REQUEST) &&
205 			(elem_type != CET_WRITE_REQUEST) &&
206 			(elem_type != CET_MP_WRITE_SESSION_REQUEST)) {
207 			TRACE_OUT(on_query_startup);
208 			return (-1);
209 		}
210 #endif
211 	}
212 #endif
213 
214 	switch (elem_type) {
215 	case CET_WRITE_REQUEST:
216 		qstate->process_func = on_write_request_read1;
217 		break;
218 	case CET_READ_REQUEST:
219 		qstate->process_func = on_read_request_read1;
220 		break;
221 	case CET_TRANSFORM_REQUEST:
222 		qstate->process_func = on_transform_request_read1;
223 		break;
224 	case CET_MP_WRITE_SESSION_REQUEST:
225 		qstate->process_func = on_mp_write_session_request_read1;
226 		break;
227 	case CET_MP_READ_SESSION_REQUEST:
228 		qstate->process_func = on_mp_read_session_request_read1;
229 		break;
230 	default:
231 		TRACE_OUT(on_query_startup);
232 		return (-1);
233 	}
234 
235 	qstate->kevent_watermark = 0;
236 	TRACE_OUT(on_query_startup);
237 	return (0);
238 }
239 
240 /*
241  * on_rw_mapper is used to process multiple read/write requests during
242  * one connection session. It's never called in the beginning (on query_state
243  * creation) as it does not process the multipart requests and does not
244  * receive credentials
245  */
246 static int
247 on_rw_mapper(struct query_state *qstate)
248 {
249 	ssize_t	result;
250 	int	elem_type;
251 
252 	TRACE_IN(on_rw_mapper);
253 	if (qstate->kevent_watermark == 0) {
254 		qstate->kevent_watermark = sizeof(int);
255 	} else {
256 		result = qstate->read_func(qstate, &elem_type, sizeof(int));
257 		if (result != sizeof(int)) {
258 			TRACE_OUT(on_rw_mapper);
259 			return (-1);
260 		}
261 
262 		switch (elem_type) {
263 		case CET_WRITE_REQUEST:
264 			qstate->kevent_watermark = sizeof(size_t);
265 			qstate->process_func = on_write_request_read1;
266 		break;
267 		case CET_READ_REQUEST:
268 			qstate->kevent_watermark = sizeof(size_t);
269 			qstate->process_func = on_read_request_read1;
270 		break;
271 		default:
272 			TRACE_OUT(on_rw_mapper);
273 			return (-1);
274 		break;
275 		}
276 	}
277 	TRACE_OUT(on_rw_mapper);
278 	return (0);
279 }
280 
281 /*
282  * The default query_destroy function
283  */
284 static void
285 on_query_destroy(struct query_state *qstate)
286 {
287 
288 	TRACE_IN(on_query_destroy);
289 	finalize_comm_element(&qstate->response);
290 	finalize_comm_element(&qstate->request);
291 	TRACE_OUT(on_query_destroy);
292 }
293 
294 /*
295  * The functions below are used to process write requests.
296  * - on_write_request_read1 and on_write_request_read2 read the request itself
297  * - on_write_request_process processes it (if the client requests to
298  *    cache the negative result, the on_negative_write_request_process is used)
299  * - on_write_response_write1 sends the response
300  */
301 static int
302 on_write_request_read1(struct query_state *qstate)
303 {
304 	struct cache_write_request	*write_request;
305 	ssize_t	result;
306 
307 	TRACE_IN(on_write_request_read1);
308 	if (qstate->kevent_watermark == 0)
309 		qstate->kevent_watermark = sizeof(size_t) * 3;
310 	else {
311 		init_comm_element(&qstate->request, CET_WRITE_REQUEST);
312 		write_request = get_cache_write_request(&qstate->request);
313 
314 		result = qstate->read_func(qstate, &write_request->entry_length,
315 			sizeof(size_t));
316 		result += qstate->read_func(qstate,
317 			&write_request->cache_key_size, sizeof(size_t));
318 		result += qstate->read_func(qstate,
319 			&write_request->data_size, sizeof(size_t));
320 
321 		if (result != sizeof(size_t) * 3) {
322 			TRACE_OUT(on_write_request_read1);
323 			return (-1);
324 		}
325 
326 		if (BUFSIZE_INVALID(write_request->entry_length) ||
327 			BUFSIZE_INVALID(write_request->cache_key_size) ||
328 			(BUFSIZE_INVALID(write_request->data_size) &&
329 			(write_request->data_size != 0))) {
330 			TRACE_OUT(on_write_request_read1);
331 			return (-1);
332 		}
333 
334 		write_request->entry = (char *)calloc(1,
335 			write_request->entry_length + 1);
336 		assert(write_request->entry != NULL);
337 
338 		write_request->cache_key = (char *)calloc(1,
339 			write_request->cache_key_size +
340 			qstate->eid_str_length);
341 		assert(write_request->cache_key != NULL);
342 		memcpy(write_request->cache_key, qstate->eid_str,
343 			qstate->eid_str_length);
344 
345 		if (write_request->data_size != 0) {
346 			write_request->data = (char *)calloc(1,
347 				write_request->data_size);
348 			assert(write_request->data != NULL);
349 		}
350 
351 		qstate->kevent_watermark = write_request->entry_length +
352 			write_request->cache_key_size +
353 			write_request->data_size;
354 		qstate->process_func = on_write_request_read2;
355 	}
356 
357 	TRACE_OUT(on_write_request_read1);
358 	return (0);
359 }
360 
361 static int
362 on_write_request_read2(struct query_state *qstate)
363 {
364 	struct cache_write_request	*write_request;
365 	ssize_t	result;
366 
367 	TRACE_IN(on_write_request_read2);
368 	write_request = get_cache_write_request(&qstate->request);
369 
370 	result = qstate->read_func(qstate, write_request->entry,
371 		write_request->entry_length);
372 	result += qstate->read_func(qstate, write_request->cache_key +
373 		qstate->eid_str_length, write_request->cache_key_size);
374 	if (write_request->data_size != 0)
375 		result += qstate->read_func(qstate, write_request->data,
376 			write_request->data_size);
377 
378 	if (result != qstate->kevent_watermark) {
379 		TRACE_OUT(on_write_request_read2);
380 		return (-1);
381 	}
382 	write_request->cache_key_size += qstate->eid_str_length;
383 
384 	qstate->kevent_watermark = 0;
385 	if (write_request->data_size != 0)
386 		qstate->process_func = on_write_request_process;
387 	else
388 		qstate->process_func = on_negative_write_request_process;
389 	TRACE_OUT(on_write_request_read2);
390 	return (0);
391 }
392 
393 static int
394 on_write_request_process(struct query_state *qstate)
395 {
396 	struct cache_write_request	*write_request;
397 	struct cache_write_response	*write_response;
398 	cache_entry c_entry;
399 
400 	TRACE_IN(on_write_request_process);
401 	init_comm_element(&qstate->response, CET_WRITE_RESPONSE);
402 	write_response = get_cache_write_response(&qstate->response);
403 	write_request = get_cache_write_request(&qstate->request);
404 
405 	qstate->config_entry = configuration_find_entry(
406 		s_configuration, write_request->entry);
407 
408 	if (qstate->config_entry == NULL) {
409 		write_response->error_code = ENOENT;
410 
411 		LOG_ERR_2("write_request", "can't find configuration"
412 		    " entry '%s'. aborting request", write_request->entry);
413 		goto fin;
414 	}
415 
416 	if (qstate->config_entry->enabled == 0) {
417 		write_response->error_code = EACCES;
418 
419 		LOG_ERR_2("write_request",
420 			"configuration entry '%s' is disabled",
421 			write_request->entry);
422 		goto fin;
423 	}
424 
425 	if (qstate->config_entry->perform_actual_lookups != 0) {
426 		write_response->error_code = EOPNOTSUPP;
427 
428 		LOG_ERR_2("write_request",
429 			"entry '%s' performs lookups by itself: "
430 			"can't write to it", write_request->entry);
431 		goto fin;
432 	}
433 
434 	configuration_lock_rdlock(s_configuration);
435 	c_entry = find_cache_entry(s_cache,
436 		qstate->config_entry->positive_cache_params.entry_name);
437 	configuration_unlock(s_configuration);
438 	if (c_entry != NULL) {
439 		configuration_lock_entry(qstate->config_entry, CELT_POSITIVE);
440 		qstate->config_entry->positive_cache_entry = c_entry;
441 		write_response->error_code = cache_write(c_entry,
442 			write_request->cache_key,
443 			write_request->cache_key_size,
444 			write_request->data,
445 			write_request->data_size);
446 		configuration_unlock_entry(qstate->config_entry, CELT_POSITIVE);
447 
448 		if ((qstate->config_entry->common_query_timeout.tv_sec != 0) ||
449 		    (qstate->config_entry->common_query_timeout.tv_usec != 0))
450 			memcpy(&qstate->timeout,
451 				&qstate->config_entry->common_query_timeout,
452 				sizeof(struct timeval));
453 
454 	} else
455 		write_response->error_code = -1;
456 
457 fin:
458 	qstate->kevent_filter = EVFILT_WRITE;
459 	qstate->kevent_watermark = sizeof(int);
460 	qstate->process_func = on_write_response_write1;
461 
462 	TRACE_OUT(on_write_request_process);
463 	return (0);
464 }
465 
466 static int
467 on_negative_write_request_process(struct query_state *qstate)
468 {
469 	struct cache_write_request	*write_request;
470 	struct cache_write_response	*write_response;
471 	cache_entry c_entry;
472 
473 	TRACE_IN(on_negative_write_request_process);
474 	init_comm_element(&qstate->response, CET_WRITE_RESPONSE);
475 	write_response = get_cache_write_response(&qstate->response);
476 	write_request = get_cache_write_request(&qstate->request);
477 
478 	qstate->config_entry = configuration_find_entry(
479 		s_configuration, write_request->entry);
480 
481 	if (qstate->config_entry == NULL) {
482 		write_response->error_code = ENOENT;
483 
484 		LOG_ERR_2("negative_write_request",
485 			"can't find configuration"
486 			" entry '%s'. aborting request", write_request->entry);
487 		goto fin;
488 	}
489 
490 	if (qstate->config_entry->enabled == 0) {
491 		write_response->error_code = EACCES;
492 
493 		LOG_ERR_2("negative_write_request",
494 			"configuration entry '%s' is disabled",
495 			write_request->entry);
496 		goto fin;
497 	}
498 
499 	if (qstate->config_entry->perform_actual_lookups != 0) {
500 		write_response->error_code = EOPNOTSUPP;
501 
502 		LOG_ERR_2("negative_write_request",
503 			"entry '%s' performs lookups by itself: "
504 			"can't write to it", write_request->entry);
505 		goto fin;
506 	} else {
507 #ifdef NS_NSCD_EID_CHECKING
508 		if (check_query_eids(qstate) != 0) {
509 			write_response->error_code = EPERM;
510 			goto fin;
511 		}
512 #endif
513 	}
514 
515 	configuration_lock_rdlock(s_configuration);
516 	c_entry = find_cache_entry(s_cache,
517 		qstate->config_entry->negative_cache_params.entry_name);
518 	configuration_unlock(s_configuration);
519 	if (c_entry != NULL) {
520 		configuration_lock_entry(qstate->config_entry, CELT_NEGATIVE);
521 		qstate->config_entry->negative_cache_entry = c_entry;
522 		write_response->error_code = cache_write(c_entry,
523 			write_request->cache_key,
524 			write_request->cache_key_size,
525 			negative_data,
526 			sizeof(negative_data));
527 		configuration_unlock_entry(qstate->config_entry, CELT_NEGATIVE);
528 
529 		if ((qstate->config_entry->common_query_timeout.tv_sec != 0) ||
530 		    (qstate->config_entry->common_query_timeout.tv_usec != 0))
531 			memcpy(&qstate->timeout,
532 				&qstate->config_entry->common_query_timeout,
533 				sizeof(struct timeval));
534 	} else
535 		write_response->error_code = -1;
536 
537 fin:
538 	qstate->kevent_filter = EVFILT_WRITE;
539 	qstate->kevent_watermark = sizeof(int);
540 	qstate->process_func = on_write_response_write1;
541 
542 	TRACE_OUT(on_negative_write_request_process);
543 	return (0);
544 }
545 
546 static int
547 on_write_response_write1(struct query_state *qstate)
548 {
549 	struct cache_write_response	*write_response;
550 	ssize_t	result;
551 
552 	TRACE_IN(on_write_response_write1);
553 	write_response = get_cache_write_response(&qstate->response);
554 	result = qstate->write_func(qstate, &write_response->error_code,
555 		sizeof(int));
556 	if (result != sizeof(int)) {
557 		TRACE_OUT(on_write_response_write1);
558 		return (-1);
559 	}
560 
561 	finalize_comm_element(&qstate->request);
562 	finalize_comm_element(&qstate->response);
563 
564 	qstate->kevent_watermark = sizeof(int);
565 	qstate->kevent_filter = EVFILT_READ;
566 	qstate->process_func = on_rw_mapper;
567 
568 	TRACE_OUT(on_write_response_write1);
569 	return (0);
570 }
571 
572 /*
573  * The functions below are used to process read requests.
574  * - on_read_request_read1 and on_read_request_read2 read the request itself
575  * - on_read_request_process processes it
576  * - on_read_response_write1 and on_read_response_write2 send the response
577  */
578 static int
579 on_read_request_read1(struct query_state *qstate)
580 {
581 	struct cache_read_request *read_request;
582 	ssize_t	result;
583 
584 	TRACE_IN(on_read_request_read1);
585 	if (qstate->kevent_watermark == 0)
586 		qstate->kevent_watermark = sizeof(size_t) * 2;
587 	else {
588 		init_comm_element(&qstate->request, CET_READ_REQUEST);
589 		read_request = get_cache_read_request(&qstate->request);
590 
591 		result = qstate->read_func(qstate,
592 			&read_request->entry_length, sizeof(size_t));
593 		result += qstate->read_func(qstate,
594 			&read_request->cache_key_size, sizeof(size_t));
595 
596 		if (result != sizeof(size_t) * 2) {
597 			TRACE_OUT(on_read_request_read1);
598 			return (-1);
599 		}
600 
601 		if (BUFSIZE_INVALID(read_request->entry_length) ||
602 			BUFSIZE_INVALID(read_request->cache_key_size)) {
603 			TRACE_OUT(on_read_request_read1);
604 			return (-1);
605 		}
606 
607 		read_request->entry = (char *)calloc(1,
608 			read_request->entry_length + 1);
609 		assert(read_request->entry != NULL);
610 
611 		read_request->cache_key = (char *)calloc(1,
612 			read_request->cache_key_size +
613 			qstate->eid_str_length);
614 		assert(read_request->cache_key != NULL);
615 		memcpy(read_request->cache_key, qstate->eid_str,
616 			qstate->eid_str_length);
617 
618 		qstate->kevent_watermark = read_request->entry_length +
619 			read_request->cache_key_size;
620 		qstate->process_func = on_read_request_read2;
621 	}
622 
623 	TRACE_OUT(on_read_request_read1);
624 	return (0);
625 }
626 
627 static int
628 on_read_request_read2(struct query_state *qstate)
629 {
630 	struct cache_read_request	*read_request;
631 	ssize_t	result;
632 
633 	TRACE_IN(on_read_request_read2);
634 	read_request = get_cache_read_request(&qstate->request);
635 
636 	result = qstate->read_func(qstate, read_request->entry,
637 		read_request->entry_length);
638 	result += qstate->read_func(qstate,
639 		read_request->cache_key + qstate->eid_str_length,
640 		read_request->cache_key_size);
641 
642 	if (result != qstate->kevent_watermark) {
643 		TRACE_OUT(on_read_request_read2);
644 		return (-1);
645 	}
646 	read_request->cache_key_size += qstate->eid_str_length;
647 
648 	qstate->kevent_watermark = 0;
649 	qstate->process_func = on_read_request_process;
650 
651 	TRACE_OUT(on_read_request_read2);
652 	return (0);
653 }
654 
655 static int
656 on_read_request_process(struct query_state *qstate)
657 {
658 	struct cache_read_request *read_request;
659 	struct cache_read_response *read_response;
660 	cache_entry	c_entry, neg_c_entry;
661 
662 	struct agent	*lookup_agent;
663 	struct common_agent *c_agent;
664 	int res;
665 
666 	TRACE_IN(on_read_request_process);
667 	init_comm_element(&qstate->response, CET_READ_RESPONSE);
668 	read_response = get_cache_read_response(&qstate->response);
669 	read_request = get_cache_read_request(&qstate->request);
670 
671 	qstate->config_entry = configuration_find_entry(
672 		s_configuration, read_request->entry);
673 	if (qstate->config_entry == NULL) {
674 		read_response->error_code = ENOENT;
675 
676 		LOG_ERR_2("read_request",
677 			"can't find configuration "
678 			"entry '%s'. aborting request", read_request->entry);
679 		goto fin;
680 	}
681 
682 	if (qstate->config_entry->enabled == 0) {
683 		read_response->error_code = EACCES;
684 
685 		LOG_ERR_2("read_request",
686 			"configuration entry '%s' is disabled",
687 			read_request->entry);
688 		goto fin;
689 	}
690 
691 	/*
692 	 * if we perform lookups by ourselves, then we don't need to separate
693 	 * cache entries by euid and egid
694 	 */
695 	if (qstate->config_entry->perform_actual_lookups != 0)
696 		memset(read_request->cache_key, 0, qstate->eid_str_length);
697 	else {
698 #ifdef NS_NSCD_EID_CHECKING
699 		if (check_query_eids(qstate) != 0) {
700 		/* if the lookup is not self-performing, we check for clients euid/egid */
701 			read_response->error_code = EPERM;
702 			goto fin;
703 		}
704 #endif
705 	}
706 
707 	configuration_lock_rdlock(s_configuration);
708 	c_entry = find_cache_entry(s_cache,
709 		qstate->config_entry->positive_cache_params.entry_name);
710 	neg_c_entry = find_cache_entry(s_cache,
711 		qstate->config_entry->negative_cache_params.entry_name);
712 	configuration_unlock(s_configuration);
713 	if ((c_entry != NULL) && (neg_c_entry != NULL)) {
714 		configuration_lock_entry(qstate->config_entry, CELT_POSITIVE);
715 		qstate->config_entry->positive_cache_entry = c_entry;
716 		read_response->error_code = cache_read(c_entry,
717 			read_request->cache_key,
718 			read_request->cache_key_size, NULL,
719 			&read_response->data_size);
720 
721 		if (read_response->error_code == -2) {
722 			read_response->data = (char *)malloc(
723 				read_response->data_size);
724 			assert(read_response != NULL);
725 			read_response->error_code = cache_read(c_entry,
726 				read_request->cache_key,
727 				read_request->cache_key_size,
728 				read_response->data,
729 				&read_response->data_size);
730 		}
731 		configuration_unlock_entry(qstate->config_entry, CELT_POSITIVE);
732 
733 		configuration_lock_entry(qstate->config_entry, CELT_NEGATIVE);
734 		qstate->config_entry->negative_cache_entry = neg_c_entry;
735 		if (read_response->error_code == -1) {
736 			read_response->error_code = cache_read(neg_c_entry,
737 				read_request->cache_key,
738 				read_request->cache_key_size, NULL,
739 				&read_response->data_size);
740 
741 			if (read_response->error_code == -2) {
742 				read_response->error_code = 0;
743 				read_response->data = NULL;
744 				read_response->data_size = 0;
745 			}
746 		}
747 		configuration_unlock_entry(qstate->config_entry, CELT_NEGATIVE);
748 
749 		if ((read_response->error_code == -1) &&
750 			(qstate->config_entry->perform_actual_lookups != 0)) {
751 			free(read_response->data);
752 			read_response->data = NULL;
753 			read_response->data_size = 0;
754 
755 			lookup_agent = find_agent(s_agent_table,
756 				read_request->entry, COMMON_AGENT);
757 
758 			if ((lookup_agent != NULL) &&
759 			(lookup_agent->type == COMMON_AGENT)) {
760 				c_agent = (struct common_agent *)lookup_agent;
761 				res = c_agent->lookup_func(
762 					read_request->cache_key +
763 						qstate->eid_str_length,
764 					read_request->cache_key_size -
765 						qstate->eid_str_length,
766 					&read_response->data,
767 					&read_response->data_size);
768 
769 				if (res == NS_SUCCESS) {
770 					read_response->error_code = 0;
771 					configuration_lock_entry(
772 						qstate->config_entry,
773 						CELT_POSITIVE);
774 					cache_write(c_entry,
775 						read_request->cache_key,
776 						read_request->cache_key_size,
777 						read_response->data,
778 						read_response->data_size);
779 					configuration_unlock_entry(
780 						qstate->config_entry,
781 						CELT_POSITIVE);
782 				} else if ((res == NS_NOTFOUND) ||
783 					  (res == NS_RETURN)) {
784 					configuration_lock_entry(
785 						  qstate->config_entry,
786 						  CELT_NEGATIVE);
787 					cache_write(neg_c_entry,
788 						read_request->cache_key,
789 						read_request->cache_key_size,
790 						negative_data,
791 						sizeof(negative_data));
792 					configuration_unlock_entry(
793 						  qstate->config_entry,
794 						  CELT_NEGATIVE);
795 
796 					read_response->error_code = 0;
797 					read_response->data = NULL;
798 					read_response->data_size = 0;
799 				}
800 			}
801 		}
802 
803 		if ((qstate->config_entry->common_query_timeout.tv_sec != 0) ||
804 		    (qstate->config_entry->common_query_timeout.tv_usec != 0))
805 			memcpy(&qstate->timeout,
806 				&qstate->config_entry->common_query_timeout,
807 				sizeof(struct timeval));
808 	} else
809 		read_response->error_code = -1;
810 
811 fin:
812 	qstate->kevent_filter = EVFILT_WRITE;
813 	if (read_response->error_code == 0)
814 		qstate->kevent_watermark = sizeof(int) + sizeof(size_t);
815 	else
816 		qstate->kevent_watermark = sizeof(int);
817 	qstate->process_func = on_read_response_write1;
818 
819 	TRACE_OUT(on_read_request_process);
820 	return (0);
821 }
822 
823 static int
824 on_read_response_write1(struct query_state *qstate)
825 {
826 	struct cache_read_response	*read_response;
827 	ssize_t	result;
828 
829 	TRACE_IN(on_read_response_write1);
830 	read_response = get_cache_read_response(&qstate->response);
831 
832 	result = qstate->write_func(qstate, &read_response->error_code,
833 		sizeof(int));
834 
835 	if (read_response->error_code == 0) {
836 		result += qstate->write_func(qstate, &read_response->data_size,
837 			sizeof(size_t));
838 		if (result != qstate->kevent_watermark) {
839 			TRACE_OUT(on_read_response_write1);
840 			return (-1);
841 		}
842 
843 		qstate->kevent_watermark = read_response->data_size;
844 		qstate->process_func = on_read_response_write2;
845 	} else {
846 		if (result != qstate->kevent_watermark) {
847 			TRACE_OUT(on_read_response_write1);
848 			return (-1);
849 		}
850 
851 		qstate->kevent_watermark = 0;
852 		qstate->process_func = NULL;
853 	}
854 
855 	TRACE_OUT(on_read_response_write1);
856 	return (0);
857 }
858 
859 static int
860 on_read_response_write2(struct query_state *qstate)
861 {
862 	struct cache_read_response	*read_response;
863 	ssize_t	result;
864 
865 	TRACE_IN(on_read_response_write2);
866 	read_response = get_cache_read_response(&qstate->response);
867 	if (read_response->data_size > 0) {
868 		result = qstate->write_func(qstate, read_response->data,
869 			read_response->data_size);
870 		if (result != qstate->kevent_watermark) {
871 			TRACE_OUT(on_read_response_write2);
872 			return (-1);
873 		}
874 	}
875 
876 	finalize_comm_element(&qstate->request);
877 	finalize_comm_element(&qstate->response);
878 
879 	qstate->kevent_watermark = sizeof(int);
880 	qstate->kevent_filter = EVFILT_READ;
881 	qstate->process_func = on_rw_mapper;
882 	TRACE_OUT(on_read_response_write2);
883 	return (0);
884 }
885 
886 /*
887  * The functions below are used to process write requests.
888  * - on_transform_request_read1 and on_transform_request_read2 read the
889  *   request itself
890  * - on_transform_request_process processes it
891  * - on_transform_response_write1 sends the response
892  */
893 static int
894 on_transform_request_read1(struct query_state *qstate)
895 {
896 	struct cache_transform_request *transform_request;
897 	ssize_t	result;
898 
899 	TRACE_IN(on_transform_request_read1);
900 	if (qstate->kevent_watermark == 0)
901 		qstate->kevent_watermark = sizeof(size_t) + sizeof(int);
902 	else {
903 		init_comm_element(&qstate->request, CET_TRANSFORM_REQUEST);
904 		transform_request =
905 			get_cache_transform_request(&qstate->request);
906 
907 		result = qstate->read_func(qstate,
908 			&transform_request->entry_length, sizeof(size_t));
909 		result += qstate->read_func(qstate,
910 			&transform_request->transformation_type, sizeof(int));
911 
912 		if (result != sizeof(size_t) + sizeof(int)) {
913 			TRACE_OUT(on_transform_request_read1);
914 			return (-1);
915 		}
916 
917 		if ((transform_request->transformation_type != TT_USER) &&
918 		    (transform_request->transformation_type != TT_ALL)) {
919 			TRACE_OUT(on_transform_request_read1);
920 			return (-1);
921 		}
922 
923 		if (transform_request->entry_length != 0) {
924 			if (BUFSIZE_INVALID(transform_request->entry_length)) {
925 				TRACE_OUT(on_transform_request_read1);
926 				return (-1);
927 			}
928 
929 			transform_request->entry = (char *)calloc(1,
930 				transform_request->entry_length + 1);
931 			assert(transform_request->entry != NULL);
932 
933 			qstate->process_func = on_transform_request_read2;
934 		} else
935 			qstate->process_func = on_transform_request_process;
936 
937 		qstate->kevent_watermark = transform_request->entry_length;
938 	}
939 
940 	TRACE_OUT(on_transform_request_read1);
941 	return (0);
942 }
943 
944 static int
945 on_transform_request_read2(struct query_state *qstate)
946 {
947 	struct cache_transform_request	*transform_request;
948 	ssize_t	result;
949 
950 	TRACE_IN(on_transform_request_read2);
951 	transform_request = get_cache_transform_request(&qstate->request);
952 
953 	result = qstate->read_func(qstate, transform_request->entry,
954 		transform_request->entry_length);
955 
956 	if (result != qstate->kevent_watermark) {
957 		TRACE_OUT(on_transform_request_read2);
958 		return (-1);
959 	}
960 
961 	qstate->kevent_watermark = 0;
962 	qstate->process_func = on_transform_request_process;
963 
964 	TRACE_OUT(on_transform_request_read2);
965 	return (0);
966 }
967 
968 static int
969 on_transform_request_process(struct query_state *qstate)
970 {
971 	struct cache_transform_request *transform_request;
972 	struct cache_transform_response *transform_response;
973 	struct configuration_entry *config_entry;
974 	size_t	i, size;
975 
976 	TRACE_IN(on_transform_request_process);
977 	init_comm_element(&qstate->response, CET_TRANSFORM_RESPONSE);
978 	transform_response = get_cache_transform_response(&qstate->response);
979 	transform_request = get_cache_transform_request(&qstate->request);
980 
981 	switch (transform_request->transformation_type) {
982 	case TT_USER:
983 		if (transform_request->entry == NULL) {
984 			size = configuration_get_entries_size(s_configuration);
985 			for (i = 0; i < size; ++i) {
986 			    config_entry = configuration_get_entry(
987 				s_configuration, i);
988 
989 			    if (config_entry->perform_actual_lookups == 0)
990 				clear_config_entry_part(config_entry,
991 				    qstate->eid_str, qstate->eid_str_length);
992 			}
993 		} else {
994 			qstate->config_entry = configuration_find_entry(
995 				s_configuration, transform_request->entry);
996 
997 			if (qstate->config_entry == NULL) {
998 				LOG_ERR_2("transform_request",
999 					"can't find configuration"
1000 					" entry '%s'. aborting request",
1001 					transform_request->entry);
1002 				transform_response->error_code = -1;
1003 				goto fin;
1004 			}
1005 
1006 			if (qstate->config_entry->perform_actual_lookups != 0) {
1007 				LOG_ERR_2("transform_request",
1008 					"can't transform the cache entry %s"
1009 					", because it ised for actual lookups",
1010 					transform_request->entry);
1011 				transform_response->error_code = -1;
1012 				goto fin;
1013 			}
1014 
1015 			clear_config_entry_part(qstate->config_entry,
1016 				qstate->eid_str, qstate->eid_str_length);
1017 		}
1018 		break;
1019 	case TT_ALL:
1020 		if (qstate->euid != 0)
1021 			transform_response->error_code = -1;
1022 		else {
1023 			if (transform_request->entry == NULL) {
1024 				size = configuration_get_entries_size(
1025 					s_configuration);
1026 				for (i = 0; i < size; ++i) {
1027 				    clear_config_entry(
1028 					configuration_get_entry(
1029 						s_configuration, i));
1030 				}
1031 			} else {
1032 				qstate->config_entry = configuration_find_entry(
1033 					s_configuration,
1034 					transform_request->entry);
1035 
1036 				if (qstate->config_entry == NULL) {
1037 					LOG_ERR_2("transform_request",
1038 						"can't find configuration"
1039 						" entry '%s'. aborting request",
1040 						transform_request->entry);
1041 					transform_response->error_code = -1;
1042 					goto fin;
1043 				}
1044 
1045 				clear_config_entry(qstate->config_entry);
1046 			}
1047 		}
1048 		break;
1049 	default:
1050 		transform_response->error_code = -1;
1051 	}
1052 
1053 fin:
1054 	qstate->kevent_watermark = 0;
1055 	qstate->process_func = on_transform_response_write1;
1056 	TRACE_OUT(on_transform_request_process);
1057 	return (0);
1058 }
1059 
1060 static int
1061 on_transform_response_write1(struct query_state *qstate)
1062 {
1063 	struct cache_transform_response	*transform_response;
1064 	ssize_t	result;
1065 
1066 	TRACE_IN(on_transform_response_write1);
1067 	transform_response = get_cache_transform_response(&qstate->response);
1068 	result = qstate->write_func(qstate, &transform_response->error_code,
1069 		sizeof(int));
1070 	if (result != sizeof(int)) {
1071 		TRACE_OUT(on_transform_response_write1);
1072 		return (-1);
1073 	}
1074 
1075 	finalize_comm_element(&qstate->request);
1076 	finalize_comm_element(&qstate->response);
1077 
1078 	qstate->kevent_watermark = 0;
1079 	qstate->process_func = NULL;
1080 	TRACE_OUT(on_transform_response_write1);
1081 	return (0);
1082 }
1083 
1084 /*
1085  * Checks if the client's euid and egid do not differ from its uid and gid.
1086  * Returns 0 on success.
1087  */
1088 int
1089 check_query_eids(struct query_state *qstate)
1090 {
1091 
1092 	return ((qstate->uid != qstate->euid) || (qstate->gid != qstate->egid) ? -1 : 0);
1093 }
1094 
1095 /*
1096  * Uses the qstate fields to process an "alternate" read - when the buffer is
1097  * too large to be received during one socket read operation
1098  */
1099 ssize_t
1100 query_io_buffer_read(struct query_state *qstate, void *buf, size_t nbytes)
1101 {
1102 	ssize_t	result;
1103 
1104 	TRACE_IN(query_io_buffer_read);
1105 	if ((qstate->io_buffer_size == 0) || (qstate->io_buffer == NULL))
1106 		return (-1);
1107 
1108 	if (nbytes < qstate->io_buffer + qstate->io_buffer_size -
1109 			qstate->io_buffer_p)
1110 		result = nbytes;
1111 	else
1112 		result = qstate->io_buffer + qstate->io_buffer_size -
1113 			qstate->io_buffer_p;
1114 
1115 	memcpy(buf, qstate->io_buffer_p, result);
1116 	qstate->io_buffer_p += result;
1117 
1118 	if (qstate->io_buffer_p == qstate->io_buffer + qstate->io_buffer_size) {
1119 		free(qstate->io_buffer);
1120 		qstate->io_buffer = NULL;
1121 
1122 		qstate->write_func = query_socket_write;
1123 		qstate->read_func = query_socket_read;
1124 	}
1125 
1126 	TRACE_OUT(query_io_buffer_read);
1127 	return (result);
1128 }
1129 
1130 /*
1131  * Uses the qstate fields to process an "alternate" write - when the buffer is
1132  * too large to be sent during one socket write operation
1133  */
1134 ssize_t
1135 query_io_buffer_write(struct query_state *qstate, const void *buf,
1136 	size_t nbytes)
1137 {
1138 	ssize_t	result;
1139 
1140 	TRACE_IN(query_io_buffer_write);
1141 	if ((qstate->io_buffer_size == 0) || (qstate->io_buffer == NULL))
1142 		return (-1);
1143 
1144 	if (nbytes < qstate->io_buffer + qstate->io_buffer_size -
1145 			qstate->io_buffer_p)
1146 		result = nbytes;
1147 	else
1148 		result = qstate->io_buffer + qstate->io_buffer_size -
1149 		qstate->io_buffer_p;
1150 
1151 	memcpy(qstate->io_buffer_p, buf, result);
1152 	qstate->io_buffer_p += result;
1153 
1154 	if (qstate->io_buffer_p == qstate->io_buffer + qstate->io_buffer_size) {
1155 		qstate->use_alternate_io = 1;
1156 		qstate->io_buffer_p = qstate->io_buffer;
1157 
1158 		qstate->write_func = query_socket_write;
1159 		qstate->read_func = query_socket_read;
1160 	}
1161 
1162 	TRACE_OUT(query_io_buffer_write);
1163 	return (result);
1164 }
1165 
1166 /*
1167  * The default "read" function, which reads data directly from socket
1168  */
1169 ssize_t
1170 query_socket_read(struct query_state *qstate, void *buf, size_t nbytes)
1171 {
1172 	ssize_t	result;
1173 
1174 	TRACE_IN(query_socket_read);
1175 	if (qstate->socket_failed != 0) {
1176 		TRACE_OUT(query_socket_read);
1177 		return (-1);
1178 	}
1179 
1180 	result = read(qstate->sockfd, buf, nbytes);
1181 	if ((result == -1) || (result < nbytes))
1182 		qstate->socket_failed = 1;
1183 
1184 	TRACE_OUT(query_socket_read);
1185 	return (result);
1186 }
1187 
1188 /*
1189  * The default "write" function, which writes data directly to socket
1190  */
1191 ssize_t
1192 query_socket_write(struct query_state *qstate, const void *buf, size_t nbytes)
1193 {
1194 	ssize_t	result;
1195 
1196 	TRACE_IN(query_socket_write);
1197 	if (qstate->socket_failed != 0) {
1198 		TRACE_OUT(query_socket_write);
1199 		return (-1);
1200 	}
1201 
1202 	result = write(qstate->sockfd, buf, nbytes);
1203 	if ((result == -1) || (result < nbytes))
1204 		qstate->socket_failed = 1;
1205 
1206 	TRACE_OUT(query_socket_write);
1207 	return (result);
1208 }
1209 
1210 /*
1211  * Initializes the query_state structure by filling it with the default values.
1212  */
1213 struct query_state *
1214 init_query_state(int sockfd, size_t kevent_watermark, uid_t euid, gid_t egid)
1215 {
1216 	struct query_state	*retval;
1217 
1218 	TRACE_IN(init_query_state);
1219 	retval = (struct query_state *)calloc(1, sizeof(struct query_state));
1220 	assert(retval != NULL);
1221 
1222 	retval->sockfd = sockfd;
1223 	retval->kevent_filter = EVFILT_READ;
1224 	retval->kevent_watermark = kevent_watermark;
1225 
1226 	retval->euid = euid;
1227 	retval->egid = egid;
1228 	retval->uid = retval->gid = -1;
1229 
1230 	if (asprintf(&retval->eid_str, "%d_%d_", retval->euid,
1231 		retval->egid) == -1) {
1232 		free(retval);
1233 		return (NULL);
1234 	}
1235 	retval->eid_str_length = strlen(retval->eid_str);
1236 
1237 	init_comm_element(&retval->request, CET_UNDEFINED);
1238 	init_comm_element(&retval->response, CET_UNDEFINED);
1239 	retval->process_func = on_query_startup;
1240 	retval->destroy_func = on_query_destroy;
1241 
1242 	retval->write_func = query_socket_write;
1243 	retval->read_func = query_socket_read;
1244 
1245 	get_time_func(&retval->creation_time);
1246 	memcpy(&retval->timeout, &s_configuration->query_timeout,
1247 		sizeof(struct timeval));
1248 
1249 	TRACE_OUT(init_query_state);
1250 	return (retval);
1251 }
1252 
1253 void
1254 destroy_query_state(struct query_state *qstate)
1255 {
1256 
1257 	TRACE_IN(destroy_query_state);
1258 	if (qstate->eid_str != NULL)
1259 	    free(qstate->eid_str);
1260 
1261 	if (qstate->io_buffer != NULL)
1262 		free(qstate->io_buffer);
1263 
1264 	qstate->destroy_func(qstate);
1265 	free(qstate);
1266 	TRACE_OUT(destroy_query_state);
1267 }
1268