1 /*	$OpenBSD: kqueue.c,v 1.5 2002/07/10 14:41:31 art Exp $	*/
2 
3 /*
4  * Copyright 2000-2007 Niels Provos <provos@citi.umich.edu>
5  * Copyright 2007-2012 Niels Provos and Nick Mathewson
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 #include "event2/event-config.h"
30 
31 #define _GNU_SOURCE
32 
33 #include <sys/types.h>
34 #ifdef _EVENT_HAVE_SYS_TIME_H
35 #include <sys/time.h>
36 #endif
37 #include <sys/queue.h>
38 #include <sys/event.h>
39 #include <signal.h>
40 #include <stdio.h>
41 #include <stdlib.h>
42 #include <string.h>
43 #include <unistd.h>
44 #include <errno.h>
45 #ifdef _EVENT_HAVE_INTTYPES_H
46 #include <inttypes.h>
47 #endif
48 
49 /* Some platforms apparently define the udata field of struct kevent as
50  * intptr_t, whereas others define it as void*.  There doesn't seem to be an
51  * easy way to tell them apart via autoconf, so we need to use OS macros. */
52 #if defined(_EVENT_HAVE_INTTYPES_H) && !defined(__OpenBSD__) && !defined(__FreeBSD__) && !defined(__darwin__) && !defined(__APPLE__)
53 #define PTR_TO_UDATA(x)	((intptr_t)(x))
54 #define INT_TO_UDATA(x) ((intptr_t)(x))
55 #else
56 #define PTR_TO_UDATA(x)	(x)
57 #define INT_TO_UDATA(x) ((void*)(x))
58 #endif
59 
60 #include "event-internal.h"
61 #include "log-internal.h"
62 #include "evmap-internal.h"
63 #include "event2/thread.h"
64 #include "evthread-internal.h"
65 #include "changelist-internal.h"
66 
67 #define NEVENT		64
68 
69 struct kqop {
70 	struct kevent *changes;
71 	int changes_size;
72 
73 	struct kevent *events;
74 	int events_size;
75 	int kq;
76 	pid_t pid;
77 };
78 
79 static void kqop_free(struct kqop *kqop);
80 
81 static void *kq_init(struct event_base *);
82 static int kq_sig_add(struct event_base *, int, short, short, void *);
83 static int kq_sig_del(struct event_base *, int, short, short, void *);
84 static int kq_dispatch(struct event_base *, struct timeval *);
85 static void kq_dealloc(struct event_base *);
86 
87 const struct eventop kqops = {
88 	"kqueue",
89 	kq_init,
90 	event_changelist_add,
91 	event_changelist_del,
92 	kq_dispatch,
93 	kq_dealloc,
94 	1 /* need reinit */,
95     EV_FEATURE_ET|EV_FEATURE_O1|EV_FEATURE_FDS,
96 	EVENT_CHANGELIST_FDINFO_SIZE
97 };
98 
99 static const struct eventop kqsigops = {
100 	"kqueue_signal",
101 	NULL,
102 	kq_sig_add,
103 	kq_sig_del,
104 	NULL,
105 	NULL,
106 	1 /* need reinit */,
107 	0,
108 	0
109 };
110 
111 static void *
kq_init(struct event_base * base)112 kq_init(struct event_base *base)
113 {
114 	int kq = -1;
115 	struct kqop *kqueueop = NULL;
116 
117 	if (!(kqueueop = mm_calloc(1, sizeof(struct kqop))))
118 		return (NULL);
119 
120 /* Initialize the kernel queue */
121 
122 	if ((kq = kqueue()) == -1) {
123 		event_warn("kqueue");
124 		goto err;
125 	}
126 
127 	kqueueop->kq = kq;
128 
129 	kqueueop->pid = getpid();
130 
131 	/* Initialize fields */
132 	kqueueop->changes = mm_calloc(NEVENT, sizeof(struct kevent));
133 	if (kqueueop->changes == NULL)
134 		goto err;
135 	kqueueop->events = mm_calloc(NEVENT, sizeof(struct kevent));
136 	if (kqueueop->events == NULL)
137 		goto err;
138 	kqueueop->events_size = kqueueop->changes_size = NEVENT;
139 
140 	base->evsigsel = &kqsigops;
141 
142 	return (kqueueop);
143 err:
144 	if (kqueueop)
145 		kqop_free(kqueueop);
146 
147 	return (NULL);
148 }
149 
150 #define ADD_UDATA 0x30303
151 
152 static void
kq_setup_kevent(struct kevent * out,evutil_socket_t fd,int filter,short change)153 kq_setup_kevent(struct kevent *out, evutil_socket_t fd, int filter, short change)
154 {
155 	memset(out, 0, sizeof(struct kevent));
156 	out->ident = fd;
157 	out->filter = filter;
158 
159 	if (change & EV_CHANGE_ADD) {
160 		out->flags = EV_ADD;
161 		/* We set a magic number here so that we can tell 'add'
162 		 * errors from 'del' errors. */
163 		out->udata = INT_TO_UDATA(ADD_UDATA);
164 		if (change & EV_ET)
165 			out->flags |= EV_CLEAR;
166 #ifdef NOTE_EOF
167 		/* Make it behave like select() and poll() */
168 		if (filter == EVFILT_READ)
169 			out->fflags = NOTE_EOF;
170 #endif
171 	} else {
172 		EVUTIL_ASSERT(change & EV_CHANGE_DEL);
173 		out->flags = EV_DELETE;
174 	}
175 }
176 
177 static int
kq_build_changes_list(const struct event_changelist * changelist,struct kqop * kqop)178 kq_build_changes_list(const struct event_changelist *changelist,
179     struct kqop *kqop)
180 {
181 	int i;
182 	int n_changes = 0;
183 
184 	for (i = 0; i < changelist->n_changes; ++i) {
185 		struct event_change *in_ch = &changelist->changes[i];
186 		struct kevent *out_ch;
187 		if (n_changes >= kqop->changes_size - 1) {
188 			int newsize = kqop->changes_size * 2;
189 			struct kevent *newchanges;
190 
191 			newchanges = mm_realloc(kqop->changes,
192 			    newsize * sizeof(struct kevent));
193 			if (newchanges == NULL) {
194 				event_warn("%s: realloc", __func__);
195 				return (-1);
196 			}
197 			kqop->changes = newchanges;
198 			kqop->changes_size = newsize;
199 		}
200 		if (in_ch->read_change) {
201 			out_ch = &kqop->changes[n_changes++];
202 			kq_setup_kevent(out_ch, in_ch->fd, EVFILT_READ,
203 			    in_ch->read_change);
204 		}
205 		if (in_ch->write_change) {
206 			out_ch = &kqop->changes[n_changes++];
207 			kq_setup_kevent(out_ch, in_ch->fd, EVFILT_WRITE,
208 			    in_ch->write_change);
209 		}
210 	}
211 	return n_changes;
212 }
213 
214 static int
kq_grow_events(struct kqop * kqop,size_t new_size)215 kq_grow_events(struct kqop *kqop, size_t new_size)
216 {
217 	struct kevent *newresult;
218 
219 	newresult = mm_realloc(kqop->events,
220 	    new_size * sizeof(struct kevent));
221 
222 	if (newresult) {
223 		kqop->events = newresult;
224 		kqop->events_size = new_size;
225 		return 0;
226 	} else {
227 		return -1;
228 	}
229 }
230 
231 static int
kq_dispatch(struct event_base * base,struct timeval * tv)232 kq_dispatch(struct event_base *base, struct timeval *tv)
233 {
234 	struct kqop *kqop = base->evbase;
235 	struct kevent *events = kqop->events;
236 	struct kevent *changes;
237 	struct timespec ts, *ts_p = NULL;
238 	int i, n_changes, res;
239 
240 	if (tv != NULL) {
241 		TIMEVAL_TO_TIMESPEC(tv, &ts);
242 		ts_p = &ts;
243 	}
244 
245 	/* Build "changes" from "base->changes" */
246 	EVUTIL_ASSERT(kqop->changes);
247 	n_changes = kq_build_changes_list(&base->changelist, kqop);
248 	if (n_changes < 0)
249 		return -1;
250 
251 	event_changelist_remove_all(&base->changelist, base);
252 
253 	/* steal the changes array in case some broken code tries to call
254 	 * dispatch twice at once. */
255 	changes = kqop->changes;
256 	kqop->changes = NULL;
257 
258 	/* Make sure that 'events' is at least as long as the list of changes:
259 	 * otherwise errors in the changes can get reported as a -1 return
260 	 * value from kevent() rather than as EV_ERROR events in the events
261 	 * array.
262 	 *
263 	 * (We could instead handle -1 return values from kevent() by
264 	 * retrying with a smaller changes array or a larger events array,
265 	 * but this approach seems less risky for now.)
266 	 */
267 	if (kqop->events_size < n_changes) {
268 		int new_size = kqop->events_size;
269 		do {
270 			new_size *= 2;
271 		} while (new_size < n_changes);
272 
273 		kq_grow_events(kqop, new_size);
274 		events = kqop->events;
275 	}
276 
277 	EVBASE_RELEASE_LOCK(base, th_base_lock);
278 
279 	res = kevent(kqop->kq, changes, n_changes,
280 	    events, kqop->events_size, ts_p);
281 
282 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
283 
284 	EVUTIL_ASSERT(kqop->changes == NULL);
285 	kqop->changes = changes;
286 
287 	if (res == -1) {
288 		if (errno != EINTR) {
289 			event_warn("kevent");
290 			return (-1);
291 		}
292 
293 		return (0);
294 	}
295 
296 	event_debug(("%s: kevent reports %d", __func__, res));
297 
298 	for (i = 0; i < res; i++) {
299 		int which = 0;
300 
301 		if (events[i].flags & EV_ERROR) {
302 			switch (events[i].data) {
303 
304 			/* Can occur on delete if we are not currently
305 			 * watching any events on this fd.  That can
306 			 * happen when the fd was closed and another
307 			 * file was opened with that fd. */
308 			case ENOENT:
309 			/* Can occur for reasons not fully understood
310 			 * on FreeBSD. */
311 			case EINVAL:
312 				continue;
313 
314 			/* Can occur on a delete if the fd is closed. */
315 			case EBADF:
316 				/* XXXX On NetBSD, we can also get EBADF if we
317 				 * try to add the write side of a pipe, but
318 				 * the read side has already been closed.
319 				 * Other BSDs call this situation 'EPIPE'. It
320 				 * would be good if we had a way to report
321 				 * this situation. */
322 				continue;
323 			/* These two can occur on an add if the fd was one side
324 			 * of a pipe, and the other side was closed. */
325 			case EPERM:
326 			case EPIPE:
327 				/* Report read events, if we're listening for
328 				 * them, so that the user can learn about any
329 				 * add errors.  (If the operation was a
330 				 * delete, then udata should be cleared.) */
331 				if (events[i].udata) {
332 					/* The operation was an add:
333 					 * report the error as a read. */
334 					which |= EV_READ;
335 					break;
336 				} else {
337 					/* The operation was a del:
338 					 * report nothing. */
339 					continue;
340 				}
341 
342 			/* Other errors shouldn't occur. */
343 			default:
344 				errno = events[i].data;
345 				return (-1);
346 			}
347 		} else if (events[i].filter == EVFILT_READ) {
348 			which |= EV_READ;
349 		} else if (events[i].filter == EVFILT_WRITE) {
350 			which |= EV_WRITE;
351 		} else if (events[i].filter == EVFILT_SIGNAL) {
352 			which |= EV_SIGNAL;
353 		}
354 
355 		if (!which)
356 			continue;
357 
358 		if (events[i].filter == EVFILT_SIGNAL) {
359 			evmap_signal_active(base, events[i].ident, 1);
360 		} else {
361 			evmap_io_active(base, events[i].ident, which | EV_ET);
362 		}
363 	}
364 
365 	if (res == kqop->events_size) {
366 		/* We used all the events space that we have. Maybe we should
367 		   make it bigger. */
368 		kq_grow_events(kqop, kqop->events_size * 2);
369 	}
370 
371 	return (0);
372 }
373 
374 static void
kqop_free(struct kqop * kqop)375 kqop_free(struct kqop *kqop)
376 {
377 	if (kqop->changes)
378 		mm_free(kqop->changes);
379 	if (kqop->events)
380 		mm_free(kqop->events);
381 	if (kqop->kq >= 0 && kqop->pid == getpid())
382 		close(kqop->kq);
383 	memset(kqop, 0, sizeof(struct kqop));
384 	mm_free(kqop);
385 }
386 
387 static void
kq_dealloc(struct event_base * base)388 kq_dealloc(struct event_base *base)
389 {
390 	struct kqop *kqop = base->evbase;
391 	evsig_dealloc(base);
392 	kqop_free(kqop);
393 }
394 
395 /* signal handling */
396 static int
kq_sig_add(struct event_base * base,int nsignal,short old,short events,void * p)397 kq_sig_add(struct event_base *base, int nsignal, short old, short events, void *p)
398 {
399 	struct kqop *kqop = base->evbase;
400 	struct kevent kev;
401 	struct timespec timeout = { 0, 0 };
402 	(void)p;
403 
404 	EVUTIL_ASSERT(nsignal >= 0 && nsignal < NSIG);
405 
406 	memset(&kev, 0, sizeof(kev));
407 	kev.ident = nsignal;
408 	kev.filter = EVFILT_SIGNAL;
409 	kev.flags = EV_ADD;
410 
411 	/* Be ready for the signal if it is sent any
412 	 * time between now and the next call to
413 	 * kq_dispatch. */
414 	if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1)
415 		return (-1);
416 
417 	/* Backported from
418 	 * https://github.com/nmathewson/Libevent/commit/148458e0a1fd25e167aa2ef229d1c9a70b27c3e9 */
419 	/* We can set the handler for most signals to SIG_IGN and
420 	 * still have them reported to us in the queue.  However,
421 	 * if the handler for SIGCHLD is SIG_IGN, the system reaps
422 	 * zombie processes for us, and we don't get any notification.
423 	 * This appears to be the only signal with this quirk. */
424 	if (_evsig_set_handler(base, nsignal,
425 	                       nsignal == SIGCHLD ? SIG_DFL : SIG_IGN) == -1) {
426 		return (-1);
427 	}
428 
429 	return (0);
430 }
431 
432 static int
kq_sig_del(struct event_base * base,int nsignal,short old,short events,void * p)433 kq_sig_del(struct event_base *base, int nsignal, short old, short events, void *p)
434 {
435 	struct kqop *kqop = base->evbase;
436 	struct kevent kev;
437 
438 	struct timespec timeout = { 0, 0 };
439 	(void)p;
440 
441 	EVUTIL_ASSERT(nsignal >= 0 && nsignal < NSIG);
442 
443 	memset(&kev, 0, sizeof(kev));
444 	kev.ident = nsignal;
445 	kev.filter = EVFILT_SIGNAL;
446 	kev.flags = EV_DELETE;
447 
448 	/* Because we insert signal events
449 	 * immediately, we need to delete them
450 	 * immediately, too */
451 	if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1)
452 		return (-1);
453 
454 	if (_evsig_restore_handler(base, nsignal) == -1)
455 		return (-1);
456 
457 	return (0);
458 }
459