xref: /netbsd/external/bsd/ntp/dist/lib/isc/task.c (revision 9034ec65)
1 /*	$NetBSD: task.c,v 1.5 2020/05/25 20:47:20 christos Exp $	*/
2 
3 /*
4  * Copyright (C) 2004-2012  Internet Systems Consortium, Inc. ("ISC")
5  * Copyright (C) 1998-2003  Internet Software Consortium.
6  *
7  * Permission to use, copy, modify, and/or distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
12  * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
13  * AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
14  * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
15  * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
16  * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /* Id */
21 
22 /*! \file
23  * \author Principal Author: Bob Halley
24  */
25 
26 /*
27  * XXXRTH  Need to document the states a task can be in, and the rules
28  * for changing states.
29  */
30 
31 #include <config.h>
32 
33 #include <isc/condition.h>
34 #include <isc/event.h>
35 #include <isc/magic.h>
36 #include <isc/mem.h>
37 #include <isc/msgs.h>
38 #include <isc/platform.h>
39 #include <isc/string.h>
40 #include <isc/task.h>
41 #include <isc/thread.h>
42 #include <isc/util.h>
43 #include <isc/xml.h>
44 
45 #ifdef OPENSSL_LEAKS
46 #include <openssl/err.h>
47 #endif
48 
49 /*%
50  * For BIND9 internal applications:
51  * when built with threads we use multiple worker threads shared by the whole
52  * application.
53  * when built without threads we share a single global task manager and use
54  * an integrated event loop for socket, timer, and other generic task events.
55  * For generic library:
56  * we don't use either of them: an application can have multiple task managers
57  * whether or not it's threaded, and if the application is threaded each thread
58  * is expected to have a separate manager; no "worker threads" are shared by
59  * the application threads.
60  */
61 #ifdef BIND9
62 #ifdef ISC_PLATFORM_USETHREADS
63 #define USE_WORKER_THREADS
64 #else
65 #define USE_SHARED_MANAGER
66 #endif	/* ISC_PLATFORM_USETHREADS */
67 #endif	/* BIND9 */
68 
69 #include "task_p.h"
70 
71 #ifdef ISC_TASK_TRACE
72 #define XTRACE(m)		fprintf(stderr, "task %p thread %lu: %s\n", \
73 				       task, isc_thread_self(), (m))
74 #define XTTRACE(t, m)		fprintf(stderr, "task %p thread %lu: %s\n", \
75 				       (t), isc_thread_self(), (m))
76 #define XTHREADTRACE(m)		fprintf(stderr, "thread %lu: %s\n", \
77 				       isc_thread_self(), (m))
78 #else
79 #define XTRACE(m)
80 #define XTTRACE(t, m)
81 #define XTHREADTRACE(m)
82 #endif
83 
84 /***
85  *** Types.
86  ***/
87 
88 typedef enum {
89 	task_state_idle, task_state_ready, task_state_running,
90 	task_state_done
91 } task_state_t;
92 
93 #if defined(HAVE_LIBXML2) && defined(BIND9)
94 static const char *statenames[] = {
95 	"idle", "ready", "running", "done",
96 };
97 #endif
98 
99 #define TASK_MAGIC			ISC_MAGIC('T', 'A', 'S', 'K')
100 #define VALID_TASK(t)			ISC_MAGIC_VALID(t, TASK_MAGIC)
101 
102 typedef struct isc__task isc__task_t;
103 typedef struct isc__taskmgr isc__taskmgr_t;
104 
105 struct isc__task {
106 	/* Not locked. */
107 	isc_task_t			common;
108 	isc__taskmgr_t *		manager;
109 	isc_mutex_t			lock;
110 	/* Locked by task lock. */
111 	task_state_t			state;
112 	unsigned int			references;
113 	isc_eventlist_t			events;
114 	isc_eventlist_t			on_shutdown;
115 	unsigned int			quantum;
116 	unsigned int			flags;
117 	isc_stdtime_t			now;
118 	char				name[16];
119 	void *				tag;
120 	/* Locked by task manager lock. */
121 	LINK(isc__task_t)		link;
122 	LINK(isc__task_t)		ready_link;
123 	LINK(isc__task_t)		ready_priority_link;
124 };
125 
126 #define TASK_F_SHUTTINGDOWN		0x01
127 #define TASK_F_PRIVILEGED		0x02
128 
129 #define TASK_SHUTTINGDOWN(t)		(((t)->flags & TASK_F_SHUTTINGDOWN) \
130 					 != 0)
131 
132 #define TASK_MANAGER_MAGIC		ISC_MAGIC('T', 'S', 'K', 'M')
133 #define VALID_MANAGER(m)		ISC_MAGIC_VALID(m, TASK_MANAGER_MAGIC)
134 
135 typedef ISC_LIST(isc__task_t)	isc__tasklist_t;
136 
137 struct isc__taskmgr {
138 	/* Not locked. */
139 	isc_taskmgr_t			common;
140 	isc_mem_t *			mctx;
141 	isc_mutex_t			lock;
142 #ifdef ISC_PLATFORM_USETHREADS
143 	unsigned int			workers;
144 	isc_thread_t *			threads;
145 #endif /* ISC_PLATFORM_USETHREADS */
146 	/* Locked by task manager lock. */
147 	unsigned int			default_quantum;
148 	LIST(isc__task_t)		tasks;
149 	isc__tasklist_t			ready_tasks;
150 	isc__tasklist_t			ready_priority_tasks;
151 	isc_taskmgrmode_t		mode;
152 #ifdef ISC_PLATFORM_USETHREADS
153 	isc_condition_t			work_available;
154 	isc_condition_t			exclusive_granted;
155 	isc_condition_t			paused;
156 #endif /* ISC_PLATFORM_USETHREADS */
157 	unsigned int			tasks_running;
158 	isc_boolean_t			pause_requested;
159 	isc_boolean_t			exclusive_requested;
160 	isc_boolean_t			exiting;
161 #ifdef USE_SHARED_MANAGER
162 	unsigned int			refs;
163 #endif /* ISC_PLATFORM_USETHREADS */
164 };
165 
166 #define DEFAULT_TASKMGR_QUANTUM		10
167 #define DEFAULT_DEFAULT_QUANTUM		5
168 #define FINISHED(m)			((m)->exiting && EMPTY((m)->tasks))
169 
170 #ifdef USE_SHARED_MANAGER
171 static isc__taskmgr_t *taskmgr = NULL;
172 #endif /* USE_SHARED_MANAGER */
173 
174 /*%
175  * The following can be either static or public, depending on build environment.
176  */
177 
178 #ifdef BIND9
179 #define ISC_TASKFUNC_SCOPE
180 #else
181 #define ISC_TASKFUNC_SCOPE static
182 #endif
183 
184 ISC_TASKFUNC_SCOPE isc_result_t
185 isc__task_create(isc_taskmgr_t *manager0, unsigned int quantum,
186 		 isc_task_t **taskp);
187 ISC_TASKFUNC_SCOPE void
188 isc__task_attach(isc_task_t *source0, isc_task_t **targetp);
189 ISC_TASKFUNC_SCOPE void
190 isc__task_detach(isc_task_t **taskp);
191 ISC_TASKFUNC_SCOPE void
192 isc__task_send(isc_task_t *task0, isc_event_t **eventp);
193 ISC_TASKFUNC_SCOPE void
194 isc__task_sendanddetach(isc_task_t **taskp, isc_event_t **eventp);
195 ISC_TASKFUNC_SCOPE unsigned int
196 isc__task_purgerange(isc_task_t *task0, void *sender, isc_eventtype_t first,
197 		     isc_eventtype_t last, void *tag);
198 ISC_TASKFUNC_SCOPE unsigned int
199 isc__task_purge(isc_task_t *task, void *sender, isc_eventtype_t type,
200 		void *tag);
201 ISC_TASKFUNC_SCOPE isc_boolean_t
202 isc__task_purgeevent(isc_task_t *task0, isc_event_t *event);
203 ISC_TASKFUNC_SCOPE unsigned int
204 isc__task_unsendrange(isc_task_t *task, void *sender, isc_eventtype_t first,
205 		      isc_eventtype_t last, void *tag,
206 		      isc_eventlist_t *events);
207 ISC_TASKFUNC_SCOPE unsigned int
208 isc__task_unsend(isc_task_t *task, void *sender, isc_eventtype_t type,
209 		 void *tag, isc_eventlist_t *events);
210 ISC_TASKFUNC_SCOPE isc_result_t
211 isc__task_onshutdown(isc_task_t *task0, isc_taskaction_t action,
212 		     const void *arg);
213 ISC_TASKFUNC_SCOPE void
214 isc__task_shutdown(isc_task_t *task0);
215 ISC_TASKFUNC_SCOPE void
216 isc__task_destroy(isc_task_t **taskp);
217 ISC_TASKFUNC_SCOPE void
218 isc__task_setname(isc_task_t *task0, const char *name, void *tag);
219 ISC_TASKFUNC_SCOPE const char *
220 isc__task_getname(isc_task_t *task0);
221 ISC_TASKFUNC_SCOPE void *
222 isc__task_gettag(isc_task_t *task0);
223 ISC_TASKFUNC_SCOPE void
224 isc__task_getcurrenttime(isc_task_t *task0, isc_stdtime_t *t);
225 ISC_TASKFUNC_SCOPE isc_result_t
226 isc__taskmgr_create(isc_mem_t *mctx, unsigned int workers,
227 		    unsigned int default_quantum, isc_taskmgr_t **managerp);
228 ISC_TASKFUNC_SCOPE void
229 isc__taskmgr_destroy(isc_taskmgr_t **managerp);
230 ISC_TASKFUNC_SCOPE isc_result_t
231 isc__task_beginexclusive(isc_task_t *task);
232 ISC_TASKFUNC_SCOPE void
233 isc__task_endexclusive(isc_task_t *task0);
234 ISC_TASKFUNC_SCOPE void
235 isc__task_setprivilege(isc_task_t *task0, isc_boolean_t priv);
236 ISC_TASKFUNC_SCOPE isc_boolean_t
237 isc__task_privilege(isc_task_t *task0);
238 ISC_TASKFUNC_SCOPE void
239 isc__taskmgr_setmode(isc_taskmgr_t *manager0, isc_taskmgrmode_t mode);
240 ISC_TASKFUNC_SCOPE isc_taskmgrmode_t
241 isc__taskmgr_mode(isc_taskmgr_t *manager0);
242 
243 static inline isc_boolean_t
244 empty_readyq(isc__taskmgr_t *manager);
245 
246 static inline isc__task_t *
247 pop_readyq(isc__taskmgr_t *manager);
248 
249 static inline void
250 push_readyq(isc__taskmgr_t *manager, isc__task_t *task);
251 
252 static struct isc__taskmethods {
253 	isc_taskmethods_t methods;
254 
255 	/*%
256 	 * The following are defined just for avoiding unused static functions.
257 	 */
258 #ifndef BIND9
259 	void *purgeevent, *unsendrange, *getname, *gettag, *getcurrenttime;
260 #endif
261 } taskmethods = {
262 	{
263 		isc__task_attach,
264 		isc__task_detach,
265 		isc__task_destroy,
266 		isc__task_send,
267 		isc__task_sendanddetach,
268 		isc__task_unsend,
269 		isc__task_onshutdown,
270 		isc__task_shutdown,
271 		isc__task_setname,
272 		isc__task_purge,
273 		isc__task_purgerange,
274 		isc__task_beginexclusive,
275 		isc__task_endexclusive,
276 		isc__task_setprivilege,
277 		isc__task_privilege
278 	}
279 #ifndef BIND9
280 	,
281 	(void *)isc__task_purgeevent, (void *)isc__task_unsendrange,
282 	(void *)isc__task_getname, (void *)isc__task_gettag,
283 	(void *)isc__task_getcurrenttime
284 #endif
285 };
286 
287 static isc_taskmgrmethods_t taskmgrmethods = {
288 	isc__taskmgr_destroy,
289 	isc__taskmgr_setmode,
290 	isc__taskmgr_mode,
291 	isc__task_create
292 };
293 
294 /***
295  *** Tasks.
296  ***/
297 
298 static void
task_finished(isc__task_t * task)299 task_finished(isc__task_t *task) {
300 	isc__taskmgr_t *manager = task->manager;
301 
302 	REQUIRE(EMPTY(task->events));
303 	REQUIRE(EMPTY(task->on_shutdown));
304 	REQUIRE(task->references == 0);
305 	REQUIRE(task->state == task_state_done);
306 
307 	XTRACE("task_finished");
308 
309 	LOCK(&manager->lock);
310 	UNLINK(manager->tasks, task, link);
311 #ifdef USE_WORKER_THREADS
312 	if (FINISHED(manager)) {
313 		/*
314 		 * All tasks have completed and the
315 		 * task manager is exiting.  Wake up
316 		 * any idle worker threads so they
317 		 * can exit.
318 		 */
319 		BROADCAST(&manager->work_available);
320 	}
321 #endif /* USE_WORKER_THREADS */
322 	UNLOCK(&manager->lock);
323 
324 	DESTROYLOCK(&task->lock);
325 	task->common.impmagic = 0;
326 	task->common.magic = 0;
327 	isc_mem_put(manager->mctx, task, sizeof(*task));
328 }
329 
330 ISC_TASKFUNC_SCOPE isc_result_t
isc__task_create(isc_taskmgr_t * manager0,unsigned int quantum,isc_task_t ** taskp)331 isc__task_create(isc_taskmgr_t *manager0, unsigned int quantum,
332 		 isc_task_t **taskp)
333 {
334 	isc__taskmgr_t *manager = (void*)manager0;
335 	isc__task_t *task;
336 	isc_boolean_t exiting;
337 	isc_result_t result;
338 
339 	REQUIRE(VALID_MANAGER(manager));
340 	REQUIRE(taskp != NULL && *taskp == NULL);
341 
342 	task = isc_mem_get(manager->mctx, sizeof(*task));
343 	if (task == NULL)
344 		return (ISC_R_NOMEMORY);
345 	XTRACE("isc_task_create");
346 	result = isc_mutex_init(&task->lock);
347 	if (result != ISC_R_SUCCESS) {
348 		isc_mem_put(manager->mctx, task, sizeof(*task));
349 		return (result);
350 	}
351 	LOCK(&manager->lock);
352 	LOCK(&task->lock);	/* helps coverity analysis noise ratio */
353 	task->manager = manager;
354 	task->state = task_state_idle;
355 	task->references = 1;
356 	INIT_LIST(task->events);
357 	INIT_LIST(task->on_shutdown);
358 	task->quantum = quantum;
359 	task->flags = 0;
360 	task->now = 0;
361 	memset(task->name, 0, sizeof(task->name));
362 	task->tag = NULL;
363 	INIT_LINK(task, link);
364 	INIT_LINK(task, ready_link);
365 	INIT_LINK(task, ready_priority_link);
366 	UNLOCK(&task->lock);
367 	UNLOCK(&manager->lock);
368 
369 	exiting = ISC_FALSE;
370 	LOCK(&manager->lock);
371 	if (!manager->exiting) {
372 		if (task->quantum == 0)
373 			task->quantum = manager->default_quantum;
374 		APPEND(manager->tasks, task, link);
375 	} else
376 		exiting = ISC_TRUE;
377 	UNLOCK(&manager->lock);
378 
379 	if (exiting) {
380 		DESTROYLOCK(&task->lock);
381 		isc_mem_put(manager->mctx, task, sizeof(*task));
382 		return (ISC_R_SHUTTINGDOWN);
383 	}
384 
385 	task->common.methods = (isc_taskmethods_t *)&taskmethods;
386 	task->common.magic = ISCAPI_TASK_MAGIC;
387 	task->common.impmagic = TASK_MAGIC;
388 	*taskp = (isc_task_t *)task;
389 
390 	return (ISC_R_SUCCESS);
391 }
392 
393 ISC_TASKFUNC_SCOPE void
isc__task_attach(isc_task_t * source0,isc_task_t ** targetp)394 isc__task_attach(isc_task_t *source0, isc_task_t **targetp) {
395 	isc__task_t *source = (isc__task_t *)source0;
396 
397 	/*
398 	 * Attach *targetp to source.
399 	 */
400 
401 	REQUIRE(VALID_TASK(source));
402 	REQUIRE(targetp != NULL && *targetp == NULL);
403 
404 	XTTRACE(source, "isc_task_attach");
405 
406 	LOCK(&source->lock);
407 	source->references++;
408 	UNLOCK(&source->lock);
409 
410 	*targetp = (isc_task_t *)source;
411 }
412 
413 static inline isc_boolean_t
task_shutdown(isc__task_t * task)414 task_shutdown(isc__task_t *task) {
415 	isc_boolean_t was_idle = ISC_FALSE;
416 	isc_event_t *event, *prev;
417 
418 	/*
419 	 * Caller must be holding the task's lock.
420 	 */
421 
422 	XTRACE("task_shutdown");
423 
424 	if (! TASK_SHUTTINGDOWN(task)) {
425 		XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
426 				      ISC_MSG_SHUTTINGDOWN, "shutting down"));
427 		task->flags |= TASK_F_SHUTTINGDOWN;
428 		if (task->state == task_state_idle) {
429 			INSIST(EMPTY(task->events));
430 			task->state = task_state_ready;
431 			was_idle = ISC_TRUE;
432 		}
433 		INSIST(task->state == task_state_ready ||
434 		       task->state == task_state_running);
435 
436 		/*
437 		 * Note that we post shutdown events LIFO.
438 		 */
439 		for (event = TAIL(task->on_shutdown);
440 		     event != NULL;
441 		     event = prev) {
442 			prev = PREV(event, ev_link);
443 			DEQUEUE(task->on_shutdown, event, ev_link);
444 			ENQUEUE(task->events, event, ev_link);
445 		}
446 	}
447 
448 	return (was_idle);
449 }
450 
451 /*
452  * Moves a task onto the appropriate run queue.
453  *
454  * Caller must NOT hold manager lock.
455  */
456 static inline void
task_ready(isc__task_t * task)457 task_ready(isc__task_t *task) {
458 	isc__taskmgr_t *manager = task->manager;
459 #ifdef USE_WORKER_THREADS
460 	isc_boolean_t has_privilege = isc__task_privilege((isc_task_t *) task);
461 #endif /* USE_WORKER_THREADS */
462 
463 	REQUIRE(VALID_MANAGER(manager));
464 	REQUIRE(task->state == task_state_ready);
465 
466 	XTRACE("task_ready");
467 
468 	LOCK(&manager->lock);
469 	push_readyq(manager, task);
470 #ifdef USE_WORKER_THREADS
471 	if (manager->mode == isc_taskmgrmode_normal || has_privilege)
472 		SIGNAL(&manager->work_available);
473 #endif /* USE_WORKER_THREADS */
474 	UNLOCK(&manager->lock);
475 }
476 
477 static inline isc_boolean_t
task_detach(isc__task_t * task)478 task_detach(isc__task_t *task) {
479 
480 	/*
481 	 * Caller must be holding the task lock.
482 	 */
483 
484 	REQUIRE(task->references > 0);
485 
486 	XTRACE("detach");
487 
488 	task->references--;
489 	if (task->references == 0 && task->state == task_state_idle) {
490 		INSIST(EMPTY(task->events));
491 		/*
492 		 * There are no references to this task, and no
493 		 * pending events.  We could try to optimize and
494 		 * either initiate shutdown or clean up the task,
495 		 * depending on its state, but it's easier to just
496 		 * make the task ready and allow run() or the event
497 		 * loop to deal with shutting down and termination.
498 		 */
499 		task->state = task_state_ready;
500 		return (ISC_TRUE);
501 	}
502 
503 	return (ISC_FALSE);
504 }
505 
506 ISC_TASKFUNC_SCOPE void
isc__task_detach(isc_task_t ** taskp)507 isc__task_detach(isc_task_t **taskp) {
508 	isc__task_t *task;
509 	isc_boolean_t was_idle;
510 
511 	/*
512 	 * Detach *taskp from its task.
513 	 */
514 
515 	REQUIRE(taskp != NULL);
516 	task = (isc__task_t *)*taskp;
517 	REQUIRE(VALID_TASK(task));
518 
519 	XTRACE("isc_task_detach");
520 
521 	LOCK(&task->lock);
522 	was_idle = task_detach(task);
523 	UNLOCK(&task->lock);
524 
525 	if (was_idle)
526 		task_ready(task);
527 
528 	*taskp = NULL;
529 }
530 
531 static inline isc_boolean_t
task_send(isc__task_t * task,isc_event_t ** eventp)532 task_send(isc__task_t *task, isc_event_t **eventp) {
533 	isc_boolean_t was_idle = ISC_FALSE;
534 	isc_event_t *event;
535 
536 	/*
537 	 * Caller must be holding the task lock.
538 	 */
539 
540 	REQUIRE(eventp != NULL);
541 	event = *eventp;
542 	REQUIRE(event != NULL);
543 	REQUIRE(event->ev_type > 0);
544 	REQUIRE(task->state != task_state_done);
545 
546 	XTRACE("task_send");
547 
548 	if (task->state == task_state_idle) {
549 		was_idle = ISC_TRUE;
550 		INSIST(EMPTY(task->events));
551 		task->state = task_state_ready;
552 	}
553 	INSIST(task->state == task_state_ready ||
554 	       task->state == task_state_running);
555 	ENQUEUE(task->events, event, ev_link);
556 	*eventp = NULL;
557 
558 	return (was_idle);
559 }
560 
561 ISC_TASKFUNC_SCOPE void
isc__task_send(isc_task_t * task0,isc_event_t ** eventp)562 isc__task_send(isc_task_t *task0, isc_event_t **eventp) {
563 	isc__task_t *task = (isc__task_t *)task0;
564 	isc_boolean_t was_idle;
565 
566 	/*
567 	 * Send '*event' to 'task'.
568 	 */
569 
570 	REQUIRE(VALID_TASK(task));
571 
572 	XTRACE("isc_task_send");
573 
574 	/*
575 	 * We're trying hard to hold locks for as short a time as possible.
576 	 * We're also trying to hold as few locks as possible.  This is why
577 	 * some processing is deferred until after the lock is released.
578 	 */
579 	LOCK(&task->lock);
580 	was_idle = task_send(task, eventp);
581 	UNLOCK(&task->lock);
582 
583 	if (was_idle) {
584 		/*
585 		 * We need to add this task to the ready queue.
586 		 *
587 		 * We've waited until now to do it because making a task
588 		 * ready requires locking the manager.  If we tried to do
589 		 * this while holding the task lock, we could deadlock.
590 		 *
591 		 * We've changed the state to ready, so no one else will
592 		 * be trying to add this task to the ready queue.  The
593 		 * only way to leave the ready state is by executing the
594 		 * task.  It thus doesn't matter if events are added,
595 		 * removed, or a shutdown is started in the interval
596 		 * between the time we released the task lock, and the time
597 		 * we add the task to the ready queue.
598 		 */
599 		task_ready(task);
600 	}
601 }
602 
603 ISC_TASKFUNC_SCOPE void
isc__task_sendanddetach(isc_task_t ** taskp,isc_event_t ** eventp)604 isc__task_sendanddetach(isc_task_t **taskp, isc_event_t **eventp) {
605 	isc_boolean_t idle1, idle2;
606 	isc__task_t *task;
607 
608 	/*
609 	 * Send '*event' to '*taskp' and then detach '*taskp' from its
610 	 * task.
611 	 */
612 
613 	REQUIRE(taskp != NULL);
614 	task = (isc__task_t *)*taskp;
615 	REQUIRE(VALID_TASK(task));
616 
617 	XTRACE("isc_task_sendanddetach");
618 
619 	LOCK(&task->lock);
620 	idle1 = task_send(task, eventp);
621 	idle2 = task_detach(task);
622 	UNLOCK(&task->lock);
623 
624 	/*
625 	 * If idle1, then idle2 shouldn't be true as well since we're holding
626 	 * the task lock, and thus the task cannot switch from ready back to
627 	 * idle.
628 	 */
629 	INSIST(!(idle1 && idle2));
630 
631 	if (idle1 || idle2)
632 		task_ready(task);
633 
634 	*taskp = NULL;
635 }
636 
637 #define PURGE_OK(event)	(((event)->ev_attributes & ISC_EVENTATTR_NOPURGE) == 0)
638 
639 static unsigned int
dequeue_events(isc__task_t * task,void * sender,isc_eventtype_t first,isc_eventtype_t last,void * tag,isc_eventlist_t * events,isc_boolean_t purging)640 dequeue_events(isc__task_t *task, void *sender, isc_eventtype_t first,
641 	       isc_eventtype_t last, void *tag,
642 	       isc_eventlist_t *events, isc_boolean_t purging)
643 {
644 	isc_event_t *event, *next_event;
645 	unsigned int count = 0;
646 
647 	REQUIRE(VALID_TASK(task));
648 	REQUIRE(last >= first);
649 
650 	XTRACE("dequeue_events");
651 
652 	/*
653 	 * Events matching 'sender', whose type is >= first and <= last, and
654 	 * whose tag is 'tag' will be dequeued.  If 'purging', matching events
655 	 * which are marked as unpurgable will not be dequeued.
656 	 *
657 	 * sender == NULL means "any sender", and tag == NULL means "any tag".
658 	 */
659 
660 	LOCK(&task->lock);
661 
662 	for (event = HEAD(task->events); event != NULL; event = next_event) {
663 		next_event = NEXT(event, ev_link);
664 		if (event->ev_type >= first && event->ev_type <= last &&
665 		    (sender == NULL || event->ev_sender == sender) &&
666 		    (tag == NULL || event->ev_tag == tag) &&
667 		    (!purging || PURGE_OK(event))) {
668 			DEQUEUE(task->events, event, ev_link);
669 			ENQUEUE(*events, event, ev_link);
670 			count++;
671 		}
672 	}
673 
674 	UNLOCK(&task->lock);
675 
676 	return (count);
677 }
678 
679 ISC_TASKFUNC_SCOPE unsigned int
isc__task_purgerange(isc_task_t * task0,void * sender,isc_eventtype_t first,isc_eventtype_t last,void * tag)680 isc__task_purgerange(isc_task_t *task0, void *sender, isc_eventtype_t first,
681 		     isc_eventtype_t last, void *tag)
682 {
683 	isc__task_t *task = (isc__task_t *)task0;
684 	unsigned int count;
685 	isc_eventlist_t events;
686 	isc_event_t *event, *next_event;
687 
688 	/*
689 	 * Purge events from a task's event queue.
690 	 */
691 
692 	XTRACE("isc_task_purgerange");
693 
694 	ISC_LIST_INIT(events);
695 
696 	count = dequeue_events(task, sender, first, last, tag, &events,
697 			       ISC_TRUE);
698 
699 	for (event = HEAD(events); event != NULL; event = next_event) {
700 		next_event = NEXT(event, ev_link);
701 		isc_event_free(&event);
702 	}
703 
704 	/*
705 	 * Note that purging never changes the state of the task.
706 	 */
707 
708 	return (count);
709 }
710 
711 ISC_TASKFUNC_SCOPE unsigned int
isc__task_purge(isc_task_t * task,void * sender,isc_eventtype_t type,void * tag)712 isc__task_purge(isc_task_t *task, void *sender, isc_eventtype_t type,
713 		void *tag)
714 {
715 	/*
716 	 * Purge events from a task's event queue.
717 	 */
718 
719 	XTRACE("isc_task_purge");
720 
721 	return (isc__task_purgerange(task, sender, type, type, tag));
722 }
723 
724 ISC_TASKFUNC_SCOPE isc_boolean_t
isc__task_purgeevent(isc_task_t * task0,isc_event_t * event)725 isc__task_purgeevent(isc_task_t *task0, isc_event_t *event) {
726 	isc__task_t *task = (isc__task_t *)task0;
727 	isc_event_t *curr_event, *next_event;
728 
729 	/*
730 	 * Purge 'event' from a task's event queue.
731 	 *
732 	 * XXXRTH:  WARNING:  This method may be removed before beta.
733 	 */
734 
735 	REQUIRE(VALID_TASK(task));
736 
737 	/*
738 	 * If 'event' is on the task's event queue, it will be purged,
739 	 * unless it is marked as unpurgeable.  'event' does not have to be
740 	 * on the task's event queue; in fact, it can even be an invalid
741 	 * pointer.  Purging only occurs if the event is actually on the task's
742 	 * event queue.
743 	 *
744 	 * Purging never changes the state of the task.
745 	 */
746 
747 	LOCK(&task->lock);
748 	for (curr_event = HEAD(task->events);
749 	     curr_event != NULL;
750 	     curr_event = next_event) {
751 		next_event = NEXT(curr_event, ev_link);
752 		if (curr_event == event && PURGE_OK(event)) {
753 			DEQUEUE(task->events, curr_event, ev_link);
754 			break;
755 		}
756 	}
757 	UNLOCK(&task->lock);
758 
759 	if (curr_event == NULL)
760 		return (ISC_FALSE);
761 
762 	isc_event_free(&curr_event);
763 
764 	return (ISC_TRUE);
765 }
766 
767 ISC_TASKFUNC_SCOPE unsigned int
isc__task_unsendrange(isc_task_t * task,void * sender,isc_eventtype_t first,isc_eventtype_t last,void * tag,isc_eventlist_t * events)768 isc__task_unsendrange(isc_task_t *task, void *sender, isc_eventtype_t first,
769 		      isc_eventtype_t last, void *tag,
770 		      isc_eventlist_t *events)
771 {
772 	/*
773 	 * Remove events from a task's event queue.
774 	 */
775 
776 	XTRACE("isc_task_unsendrange");
777 
778 	return (dequeue_events((isc__task_t *)task, sender, first,
779 			       last, tag, events, ISC_FALSE));
780 }
781 
782 ISC_TASKFUNC_SCOPE unsigned int
isc__task_unsend(isc_task_t * task,void * sender,isc_eventtype_t type,void * tag,isc_eventlist_t * events)783 isc__task_unsend(isc_task_t *task, void *sender, isc_eventtype_t type,
784 		 void *tag, isc_eventlist_t *events)
785 {
786 	/*
787 	 * Remove events from a task's event queue.
788 	 */
789 
790 	XTRACE("isc_task_unsend");
791 
792 	return (dequeue_events((isc__task_t *)task, sender, type,
793 			       type, tag, events, ISC_FALSE));
794 }
795 
796 ISC_TASKFUNC_SCOPE isc_result_t
isc__task_onshutdown(isc_task_t * task0,isc_taskaction_t action,const void * arg)797 isc__task_onshutdown(isc_task_t *task0, isc_taskaction_t action,
798 		     const void *arg)
799 {
800 	isc__task_t *task = (isc__task_t *)task0;
801 	isc_boolean_t disallowed = ISC_FALSE;
802 	isc_result_t result = ISC_R_SUCCESS;
803 	isc_event_t *event;
804 
805 	/*
806 	 * Send a shutdown event with action 'action' and argument 'arg' when
807 	 * 'task' is shutdown.
808 	 */
809 
810 	REQUIRE(VALID_TASK(task));
811 	REQUIRE(action != NULL);
812 
813 	event = isc_event_allocate(task->manager->mctx,
814 				   NULL,
815 				   ISC_TASKEVENT_SHUTDOWN,
816 				   action,
817 				   arg,
818 				   sizeof(*event));
819 	if (event == NULL)
820 		return (ISC_R_NOMEMORY);
821 
822 	LOCK(&task->lock);
823 	if (TASK_SHUTTINGDOWN(task)) {
824 		disallowed = ISC_TRUE;
825 		result = ISC_R_SHUTTINGDOWN;
826 	} else
827 		ENQUEUE(task->on_shutdown, event, ev_link);
828 	UNLOCK(&task->lock);
829 
830 	if (disallowed)
831 		isc_mem_put(task->manager->mctx, event, sizeof(*event));
832 
833 	return (result);
834 }
835 
836 ISC_TASKFUNC_SCOPE void
isc__task_shutdown(isc_task_t * task0)837 isc__task_shutdown(isc_task_t *task0) {
838 	isc__task_t *task = (isc__task_t *)task0;
839 	isc_boolean_t was_idle;
840 
841 	/*
842 	 * Shutdown 'task'.
843 	 */
844 
845 	REQUIRE(VALID_TASK(task));
846 
847 	LOCK(&task->lock);
848 	was_idle = task_shutdown(task);
849 	UNLOCK(&task->lock);
850 
851 	if (was_idle)
852 		task_ready(task);
853 }
854 
855 ISC_TASKFUNC_SCOPE void
isc__task_destroy(isc_task_t ** taskp)856 isc__task_destroy(isc_task_t **taskp) {
857 
858 	/*
859 	 * Destroy '*taskp'.
860 	 */
861 
862 	REQUIRE(taskp != NULL);
863 
864 	isc_task_shutdown(*taskp);
865 	isc_task_detach(taskp);
866 }
867 
868 ISC_TASKFUNC_SCOPE void
isc__task_setname(isc_task_t * task0,const char * name,void * tag)869 isc__task_setname(isc_task_t *task0, const char *name, void *tag) {
870 	isc__task_t *task = (isc__task_t *)task0;
871 
872 	/*
873 	 * Name 'task'.
874 	 */
875 
876 	REQUIRE(VALID_TASK(task));
877 
878 	LOCK(&task->lock);
879 	memset(task->name, 0, sizeof(task->name));
880 	strncpy(task->name, name, sizeof(task->name) - 1);
881 	task->tag = tag;
882 	UNLOCK(&task->lock);
883 }
884 
885 ISC_TASKFUNC_SCOPE const char *
isc__task_getname(isc_task_t * task0)886 isc__task_getname(isc_task_t *task0) {
887 	isc__task_t *task = (isc__task_t *)task0;
888 
889 	REQUIRE(VALID_TASK(task));
890 
891 	return (task->name);
892 }
893 
894 ISC_TASKFUNC_SCOPE void *
isc__task_gettag(isc_task_t * task0)895 isc__task_gettag(isc_task_t *task0) {
896 	isc__task_t *task = (isc__task_t *)task0;
897 
898 	REQUIRE(VALID_TASK(task));
899 
900 	return (task->tag);
901 }
902 
903 ISC_TASKFUNC_SCOPE void
isc__task_getcurrenttime(isc_task_t * task0,isc_stdtime_t * t)904 isc__task_getcurrenttime(isc_task_t *task0, isc_stdtime_t *t) {
905 	isc__task_t *task = (isc__task_t *)task0;
906 
907 	REQUIRE(VALID_TASK(task));
908 	REQUIRE(t != NULL);
909 
910 	LOCK(&task->lock);
911 	*t = task->now;
912 	UNLOCK(&task->lock);
913 }
914 
915 /***
916  *** Task Manager.
917  ***/
918 
919 /*
920  * Return ISC_TRUE if the current ready list for the manager, which is
921  * either ready_tasks or the ready_priority_tasks, depending on whether
922  * the manager is currently in normal or privileged execution mode.
923  *
924  * Caller must hold the task manager lock.
925  */
926 static inline isc_boolean_t
empty_readyq(isc__taskmgr_t * manager)927 empty_readyq(isc__taskmgr_t *manager) {
928 	isc__tasklist_t queue;
929 
930 	if (manager->mode == isc_taskmgrmode_normal)
931 		queue = manager->ready_tasks;
932 	else
933 		queue = manager->ready_priority_tasks;
934 
935 	return (ISC_TF(EMPTY(queue)));
936 }
937 
938 /*
939  * Dequeue and return a pointer to the first task on the current ready
940  * list for the manager.
941  * If the task is privileged, dequeue it from the other ready list
942  * as well.
943  *
944  * Caller must hold the task manager lock.
945  */
946 static inline isc__task_t *
pop_readyq(isc__taskmgr_t * manager)947 pop_readyq(isc__taskmgr_t *manager) {
948 	isc__task_t *task;
949 
950 	if (manager->mode == isc_taskmgrmode_normal)
951 		task = HEAD(manager->ready_tasks);
952 	else
953 		task = HEAD(manager->ready_priority_tasks);
954 
955 	if (task != NULL) {
956 		DEQUEUE(manager->ready_tasks, task, ready_link);
957 		if (ISC_LINK_LINKED(task, ready_priority_link))
958 			DEQUEUE(manager->ready_priority_tasks, task,
959 				ready_priority_link);
960 	}
961 
962 	return (task);
963 }
964 
965 /*
966  * Push 'task' onto the ready_tasks queue.  If 'task' has the privilege
967  * flag set, then also push it onto the ready_priority_tasks queue.
968  *
969  * Caller must hold the task manager lock.
970  */
971 static inline void
push_readyq(isc__taskmgr_t * manager,isc__task_t * task)972 push_readyq(isc__taskmgr_t *manager, isc__task_t *task) {
973 	ENQUEUE(manager->ready_tasks, task, ready_link);
974 	if ((task->flags & TASK_F_PRIVILEGED) != 0)
975 		ENQUEUE(manager->ready_priority_tasks, task,
976 			ready_priority_link);
977 }
978 
979 static void
dispatch(isc__taskmgr_t * manager)980 dispatch(isc__taskmgr_t *manager) {
981 	isc__task_t *task;
982 #ifndef USE_WORKER_THREADS
983 	unsigned int total_dispatch_count = 0;
984 	isc__tasklist_t new_ready_tasks;
985 	isc__tasklist_t new_priority_tasks;
986 #endif /* USE_WORKER_THREADS */
987 
988 	REQUIRE(VALID_MANAGER(manager));
989 
990 	/*
991 	 * Again we're trying to hold the lock for as short a time as possible
992 	 * and to do as little locking and unlocking as possible.
993 	 *
994 	 * In both while loops, the appropriate lock must be held before the
995 	 * while body starts.  Code which acquired the lock at the top of
996 	 * the loop would be more readable, but would result in a lot of
997 	 * extra locking.  Compare:
998 	 *
999 	 * Straightforward:
1000 	 *
1001 	 *	LOCK();
1002 	 *	...
1003 	 *	UNLOCK();
1004 	 *	while (expression) {
1005 	 *		LOCK();
1006 	 *		...
1007 	 *		UNLOCK();
1008 	 *
1009 	 *	       	Unlocked part here...
1010 	 *
1011 	 *		LOCK();
1012 	 *		...
1013 	 *		UNLOCK();
1014 	 *	}
1015 	 *
1016 	 * Note how if the loop continues we unlock and then immediately lock.
1017 	 * For N iterations of the loop, this code does 2N+1 locks and 2N+1
1018 	 * unlocks.  Also note that the lock is not held when the while
1019 	 * condition is tested, which may or may not be important, depending
1020 	 * on the expression.
1021 	 *
1022 	 * As written:
1023 	 *
1024 	 *	LOCK();
1025 	 *	while (expression) {
1026 	 *		...
1027 	 *		UNLOCK();
1028 	 *
1029 	 *	       	Unlocked part here...
1030 	 *
1031 	 *		LOCK();
1032 	 *		...
1033 	 *	}
1034 	 *	UNLOCK();
1035 	 *
1036 	 * For N iterations of the loop, this code does N+1 locks and N+1
1037 	 * unlocks.  The while expression is always protected by the lock.
1038 	 */
1039 
1040 #ifndef USE_WORKER_THREADS
1041 	ISC_LIST_INIT(new_ready_tasks);
1042 	ISC_LIST_INIT(new_priority_tasks);
1043 #endif
1044 	LOCK(&manager->lock);
1045 
1046 	while (!FINISHED(manager)) {
1047 #ifdef USE_WORKER_THREADS
1048 		/*
1049 		 * For reasons similar to those given in the comment in
1050 		 * isc_task_send() above, it is safe for us to dequeue
1051 		 * the task while only holding the manager lock, and then
1052 		 * change the task to running state while only holding the
1053 		 * task lock.
1054 		 *
1055 		 * If a pause has been requested, don't do any work
1056 		 * until it's been released.
1057 		 */
1058 		while ((empty_readyq(manager) || manager->pause_requested ||
1059 			manager->exclusive_requested) && !FINISHED(manager))
1060 		{
1061 			XTHREADTRACE(isc_msgcat_get(isc_msgcat,
1062 						    ISC_MSGSET_GENERAL,
1063 						    ISC_MSG_WAIT, "wait"));
1064 			WAIT(&manager->work_available, &manager->lock);
1065 			XTHREADTRACE(isc_msgcat_get(isc_msgcat,
1066 						    ISC_MSGSET_TASK,
1067 						    ISC_MSG_AWAKE, "awake"));
1068 		}
1069 #else /* USE_WORKER_THREADS */
1070 		if (total_dispatch_count >= DEFAULT_TASKMGR_QUANTUM ||
1071 		    empty_readyq(manager))
1072 			break;
1073 #endif /* USE_WORKER_THREADS */
1074 		XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_TASK,
1075 					    ISC_MSG_WORKING, "working"));
1076 
1077 		task = pop_readyq(manager);
1078 		if (task != NULL) {
1079 			unsigned int dispatch_count = 0;
1080 			isc_boolean_t done = ISC_FALSE;
1081 			isc_boolean_t requeue = ISC_FALSE;
1082 			isc_boolean_t finished = ISC_FALSE;
1083 			isc_event_t *event;
1084 
1085 			INSIST(VALID_TASK(task));
1086 
1087 			/*
1088 			 * Note we only unlock the manager lock if we actually
1089 			 * have a task to do.  We must reacquire the manager
1090 			 * lock before exiting the 'if (task != NULL)' block.
1091 			 */
1092 			manager->tasks_running++;
1093 			UNLOCK(&manager->lock);
1094 
1095 			LOCK(&task->lock);
1096 			INSIST(task->state == task_state_ready);
1097 			task->state = task_state_running;
1098 			XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1099 					      ISC_MSG_RUNNING, "running"));
1100 			isc_stdtime_get(&task->now);
1101 			do {
1102 				if (!EMPTY(task->events)) {
1103 					event = HEAD(task->events);
1104 					DEQUEUE(task->events, event, ev_link);
1105 
1106 					/*
1107 					 * Execute the event action.
1108 					 */
1109 					XTRACE(isc_msgcat_get(isc_msgcat,
1110 							    ISC_MSGSET_TASK,
1111 							    ISC_MSG_EXECUTE,
1112 							    "execute action"));
1113 					if (event->ev_action != NULL) {
1114 						UNLOCK(&task->lock);
1115 						(event->ev_action)(
1116 							(isc_task_t *)task,
1117 							event);
1118 						LOCK(&task->lock);
1119 					}
1120 					dispatch_count++;
1121 #ifndef USE_WORKER_THREADS
1122 					total_dispatch_count++;
1123 #endif /* USE_WORKER_THREADS */
1124 				}
1125 
1126 				if (task->references == 0 &&
1127 				    EMPTY(task->events) &&
1128 				    !TASK_SHUTTINGDOWN(task)) {
1129 					isc_boolean_t was_idle;
1130 
1131 					/*
1132 					 * There are no references and no
1133 					 * pending events for this task,
1134 					 * which means it will not become
1135 					 * runnable again via an external
1136 					 * action (such as sending an event
1137 					 * or detaching).
1138 					 *
1139 					 * We initiate shutdown to prevent
1140 					 * it from becoming a zombie.
1141 					 *
1142 					 * We do this here instead of in
1143 					 * the "if EMPTY(task->events)" block
1144 					 * below because:
1145 					 *
1146 					 *	If we post no shutdown events,
1147 					 *	we want the task to finish.
1148 					 *
1149 					 *	If we did post shutdown events,
1150 					 *	will still want the task's
1151 					 *	quantum to be applied.
1152 					 */
1153 					was_idle = task_shutdown(task);
1154 					INSIST(!was_idle);
1155 				}
1156 
1157 				if (EMPTY(task->events)) {
1158 					/*
1159 					 * Nothing else to do for this task
1160 					 * right now.
1161 					 */
1162 					XTRACE(isc_msgcat_get(isc_msgcat,
1163 							      ISC_MSGSET_TASK,
1164 							      ISC_MSG_EMPTY,
1165 							      "empty"));
1166 					if (task->references == 0 &&
1167 					    TASK_SHUTTINGDOWN(task)) {
1168 						/*
1169 						 * The task is done.
1170 						 */
1171 						XTRACE(isc_msgcat_get(
1172 							       isc_msgcat,
1173 							       ISC_MSGSET_TASK,
1174 							       ISC_MSG_DONE,
1175 							       "done"));
1176 						finished = ISC_TRUE;
1177 						task->state = task_state_done;
1178 					} else
1179 						task->state = task_state_idle;
1180 					done = ISC_TRUE;
1181 				} else if (dispatch_count >= task->quantum) {
1182 					/*
1183 					 * Our quantum has expired, but
1184 					 * there is more work to be done.
1185 					 * We'll requeue it to the ready
1186 					 * queue later.
1187 					 *
1188 					 * We don't check quantum until
1189 					 * dispatching at least one event,
1190 					 * so the minimum quantum is one.
1191 					 */
1192 					XTRACE(isc_msgcat_get(isc_msgcat,
1193 							      ISC_MSGSET_TASK,
1194 							      ISC_MSG_QUANTUM,
1195 							      "quantum"));
1196 					task->state = task_state_ready;
1197 					requeue = ISC_TRUE;
1198 					done = ISC_TRUE;
1199 				}
1200 			} while (!done);
1201 			UNLOCK(&task->lock);
1202 
1203 			if (finished)
1204 				task_finished(task);
1205 
1206 			LOCK(&manager->lock);
1207 			manager->tasks_running--;
1208 #ifdef USE_WORKER_THREADS
1209 			if (manager->exclusive_requested &&
1210 			    manager->tasks_running == 1) {
1211 				SIGNAL(&manager->exclusive_granted);
1212 			} else if (manager->pause_requested &&
1213 				   manager->tasks_running == 0) {
1214 				SIGNAL(&manager->paused);
1215 			}
1216 #endif /* USE_WORKER_THREADS */
1217 			if (requeue) {
1218 				/*
1219 				 * We know we're awake, so we don't have
1220 				 * to wakeup any sleeping threads if the
1221 				 * ready queue is empty before we requeue.
1222 				 *
1223 				 * A possible optimization if the queue is
1224 				 * empty is to 'goto' the 'if (task != NULL)'
1225 				 * block, avoiding the ENQUEUE of the task
1226 				 * and the subsequent immediate DEQUEUE
1227 				 * (since it is the only executable task).
1228 				 * We don't do this because then we'd be
1229 				 * skipping the exit_requested check.  The
1230 				 * cost of ENQUEUE is low anyway, especially
1231 				 * when you consider that we'd have to do
1232 				 * an extra EMPTY check to see if we could
1233 				 * do the optimization.  If the ready queue
1234 				 * were usually nonempty, the 'optimization'
1235 				 * might even hurt rather than help.
1236 				 */
1237 #ifdef USE_WORKER_THREADS
1238 				push_readyq(manager, task);
1239 #else
1240 				ENQUEUE(new_ready_tasks, task, ready_link);
1241 				if ((task->flags & TASK_F_PRIVILEGED) != 0)
1242 					ENQUEUE(new_priority_tasks, task,
1243 						ready_priority_link);
1244 #endif
1245 			}
1246 		}
1247 
1248 #ifdef USE_WORKER_THREADS
1249 		/*
1250 		 * If we are in privileged execution mode and there are no
1251 		 * tasks remaining on the current ready queue, then
1252 		 * we're stuck.  Automatically drop privileges at that
1253 		 * point and continue with the regular ready queue.
1254 		 */
1255 		if (manager->tasks_running == 0 && empty_readyq(manager)) {
1256 			manager->mode = isc_taskmgrmode_normal;
1257 			if (!empty_readyq(manager))
1258 				BROADCAST(&manager->work_available);
1259 		}
1260 #endif
1261 	}
1262 
1263 #ifndef USE_WORKER_THREADS
1264 	ISC_LIST_APPENDLIST(manager->ready_tasks, new_ready_tasks, ready_link);
1265 	ISC_LIST_APPENDLIST(manager->ready_priority_tasks, new_priority_tasks,
1266 			    ready_priority_link);
1267 	if (empty_readyq(manager))
1268 		manager->mode = isc_taskmgrmode_normal;
1269 #endif
1270 
1271 	UNLOCK(&manager->lock);
1272 }
1273 
1274 #ifdef USE_WORKER_THREADS
1275 static isc_threadresult_t
1276 #ifdef _WIN32
1277 WINAPI
1278 #endif
run(void * uap)1279 run(void *uap) {
1280 	isc__taskmgr_t *manager = uap;
1281 
1282 	XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1283 				    ISC_MSG_STARTING, "starting"));
1284 
1285 	dispatch(manager);
1286 
1287 	XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1288 				    ISC_MSG_EXITING, "exiting"));
1289 
1290 #ifdef OPENSSL_LEAKS
1291 	ERR_remove_state(0);
1292 #endif
1293 
1294 	return ((isc_threadresult_t)0);
1295 }
1296 #endif /* USE_WORKER_THREADS */
1297 
1298 static void
manager_free(isc__taskmgr_t * manager)1299 manager_free(isc__taskmgr_t *manager) {
1300 	isc_mem_t *mctx;
1301 
1302 	LOCK(&manager->lock);
1303 #ifdef USE_WORKER_THREADS
1304 	(void)isc_condition_destroy(&manager->exclusive_granted);
1305 	(void)isc_condition_destroy(&manager->work_available);
1306 	(void)isc_condition_destroy(&manager->paused);
1307 	isc_mem_free(manager->mctx, manager->threads);
1308 #endif /* USE_WORKER_THREADS */
1309 	manager->common.impmagic = 0;
1310 	manager->common.magic = 0;
1311 	mctx = manager->mctx;
1312 	UNLOCK(&manager->lock);
1313 	DESTROYLOCK(&manager->lock);
1314 	isc_mem_put(mctx, manager, sizeof(*manager));
1315 	isc_mem_detach(&mctx);
1316 
1317 #ifdef USE_SHARED_MANAGER
1318 	taskmgr = NULL;
1319 #endif	/* USE_SHARED_MANAGER */
1320 }
1321 
1322 ISC_TASKFUNC_SCOPE isc_result_t
isc__taskmgr_create(isc_mem_t * mctx,unsigned int workers,unsigned int default_quantum,isc_taskmgr_t ** managerp)1323 isc__taskmgr_create(isc_mem_t *mctx, unsigned int workers,
1324 		    unsigned int default_quantum, isc_taskmgr_t **managerp)
1325 {
1326 	isc_result_t result;
1327 	unsigned int i, started = 0;
1328 	isc__taskmgr_t *manager;
1329 
1330 	/*
1331 	 * Create a new task manager.
1332 	 */
1333 
1334 	REQUIRE(workers > 0);
1335 	REQUIRE(managerp != NULL && *managerp == NULL);
1336 
1337 #ifndef USE_WORKER_THREADS
1338 	UNUSED(i);
1339 	UNUSED(started);
1340 #endif
1341 
1342 #ifdef USE_SHARED_MANAGER
1343 	if (taskmgr != NULL) {
1344 		if (taskmgr->refs == 0)
1345 			return (ISC_R_SHUTTINGDOWN);
1346 		taskmgr->refs++;
1347 		*managerp = (isc_taskmgr_t *)taskmgr;
1348 		return (ISC_R_SUCCESS);
1349 	}
1350 #endif /* USE_SHARED_MANAGER */
1351 
1352 	manager = isc_mem_get(mctx, sizeof(*manager));
1353 	if (manager == NULL)
1354 		return (ISC_R_NOMEMORY);
1355 	manager->common.methods = &taskmgrmethods;
1356 	manager->common.impmagic = TASK_MANAGER_MAGIC;
1357 	manager->common.magic = ISCAPI_TASKMGR_MAGIC;
1358 	manager->mode = isc_taskmgrmode_normal;
1359 	manager->mctx = NULL;
1360 	result = isc_mutex_init(&manager->lock);
1361 	if (result != ISC_R_SUCCESS)
1362 		goto cleanup_mgr;
1363 	LOCK(&manager->lock);
1364 
1365 #ifdef USE_WORKER_THREADS
1366 	manager->workers = 0;
1367 	manager->threads = isc_mem_allocate(mctx,
1368 					    workers * sizeof(isc_thread_t));
1369 	if (manager->threads == NULL) {
1370 		result = ISC_R_NOMEMORY;
1371 		goto cleanup_lock;
1372 	}
1373 	if (isc_condition_init(&manager->work_available) != ISC_R_SUCCESS) {
1374 		UNEXPECTED_ERROR(__FILE__, __LINE__,
1375 				 "isc_condition_init() %s",
1376 				 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1377 						ISC_MSG_FAILED, "failed"));
1378 		result = ISC_R_UNEXPECTED;
1379 		goto cleanup_threads;
1380 	}
1381 	if (isc_condition_init(&manager->exclusive_granted) != ISC_R_SUCCESS) {
1382 		UNEXPECTED_ERROR(__FILE__, __LINE__,
1383 				 "isc_condition_init() %s",
1384 				 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1385 						ISC_MSG_FAILED, "failed"));
1386 		result = ISC_R_UNEXPECTED;
1387 		goto cleanup_workavailable;
1388 	}
1389 	if (isc_condition_init(&manager->paused) != ISC_R_SUCCESS) {
1390 		UNEXPECTED_ERROR(__FILE__, __LINE__,
1391 				 "isc_condition_init() %s",
1392 				 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1393 						ISC_MSG_FAILED, "failed"));
1394 		result = ISC_R_UNEXPECTED;
1395 		goto cleanup_exclusivegranted;
1396 	}
1397 #endif /* USE_WORKER_THREADS */
1398 	if (default_quantum == 0)
1399 		default_quantum = DEFAULT_DEFAULT_QUANTUM;
1400 	manager->default_quantum = default_quantum;
1401 	INIT_LIST(manager->tasks);
1402 	INIT_LIST(manager->ready_tasks);
1403 	INIT_LIST(manager->ready_priority_tasks);
1404 	manager->tasks_running = 0;
1405 	manager->exclusive_requested = ISC_FALSE;
1406 	manager->pause_requested = ISC_FALSE;
1407 	manager->exiting = ISC_FALSE;
1408 
1409 	isc_mem_attach(mctx, &manager->mctx);
1410 
1411 #ifdef USE_WORKER_THREADS
1412 	/*
1413 	 * Start workers.
1414 	 */
1415 	for (i = 0; i < workers; i++) {
1416 		if (isc_thread_create(run, manager,
1417 				      &manager->threads[manager->workers]) ==
1418 		    ISC_R_SUCCESS) {
1419 			manager->workers++;
1420 			started++;
1421 		}
1422 	}
1423 	UNLOCK(&manager->lock);
1424 
1425 	if (started == 0) {
1426 		manager_free(manager);
1427 		return (ISC_R_NOTHREADS);
1428 	}
1429 	isc_thread_setconcurrency(workers);
1430 #endif /* USE_WORKER_THREADS */
1431 #ifdef USE_SHARED_MANAGER
1432 	manager->refs = 1;
1433 	UNLOCK(&manager->lock);
1434 	taskmgr = manager;
1435 #endif /* USE_SHARED_MANAGER */
1436 
1437 	*managerp = (isc_taskmgr_t *)manager;
1438 
1439 	return (ISC_R_SUCCESS);
1440 
1441 #ifdef USE_WORKER_THREADS
1442  cleanup_exclusivegranted:
1443 	(void)isc_condition_destroy(&manager->exclusive_granted);
1444  cleanup_workavailable:
1445 	(void)isc_condition_destroy(&manager->work_available);
1446  cleanup_threads:
1447 	isc_mem_free(mctx, manager->threads);
1448  cleanup_lock:
1449 	UNLOCK(&manager->lock);
1450 	DESTROYLOCK(&manager->lock);
1451 #endif
1452  cleanup_mgr:
1453 	isc_mem_put(mctx, manager, sizeof(*manager));
1454 	return (result);
1455 }
1456 
1457 ISC_TASKFUNC_SCOPE void
isc__taskmgr_destroy(isc_taskmgr_t ** managerp)1458 isc__taskmgr_destroy(isc_taskmgr_t **managerp) {
1459 	isc__taskmgr_t *manager;
1460 	isc__task_t *task;
1461 	unsigned int i;
1462 
1463 	/*
1464 	 * Destroy '*managerp'.
1465 	 */
1466 
1467 	REQUIRE(managerp != NULL);
1468 	manager = (void*)(*managerp);
1469 	REQUIRE(VALID_MANAGER(manager));
1470 
1471 #ifndef USE_WORKER_THREADS
1472 	UNUSED(i);
1473 #endif /* USE_WORKER_THREADS */
1474 
1475 #ifdef USE_SHARED_MANAGER
1476 	manager->refs--;
1477 	if (manager->refs > 0) {
1478 		*managerp = NULL;
1479 		return;
1480 	}
1481 #endif
1482 
1483 	XTHREADTRACE("isc_taskmgr_destroy");
1484 	/*
1485 	 * Only one non-worker thread may ever call this routine.
1486 	 * If a worker thread wants to initiate shutdown of the
1487 	 * task manager, it should ask some non-worker thread to call
1488 	 * isc_taskmgr_destroy(), e.g. by signalling a condition variable
1489 	 * that the startup thread is sleeping on.
1490 	 */
1491 
1492 	/*
1493 	 * Unlike elsewhere, we're going to hold this lock a long time.
1494 	 * We need to do so, because otherwise the list of tasks could
1495 	 * change while we were traversing it.
1496 	 *
1497 	 * This is also the only function where we will hold both the
1498 	 * task manager lock and a task lock at the same time.
1499 	 */
1500 
1501 	LOCK(&manager->lock);
1502 
1503 	/*
1504 	 * Make sure we only get called once.
1505 	 */
1506 	INSIST(!manager->exiting);
1507 	manager->exiting = ISC_TRUE;
1508 
1509 	/*
1510 	 * If privileged mode was on, turn it off.
1511 	 */
1512 	manager->mode = isc_taskmgrmode_normal;
1513 
1514 	/*
1515 	 * Post shutdown event(s) to every task (if they haven't already been
1516 	 * posted).
1517 	 */
1518 	for (task = HEAD(manager->tasks);
1519 	     task != NULL;
1520 	     task = NEXT(task, link)) {
1521 		LOCK(&task->lock);
1522 		if (task_shutdown(task))
1523 			push_readyq(manager, task);
1524 		UNLOCK(&task->lock);
1525 	}
1526 #ifdef USE_WORKER_THREADS
1527 	/*
1528 	 * Wake up any sleeping workers.  This ensures we get work done if
1529 	 * there's work left to do, and if there are already no tasks left
1530 	 * it will cause the workers to see manager->exiting.
1531 	 */
1532 	BROADCAST(&manager->work_available);
1533 	UNLOCK(&manager->lock);
1534 
1535 	/*
1536 	 * Wait for all the worker threads to exit.
1537 	 */
1538 	for (i = 0; i < manager->workers; i++)
1539 		(void)isc_thread_join(manager->threads[i], NULL);
1540 #else /* USE_WORKER_THREADS */
1541 	/*
1542 	 * Dispatch the shutdown events.
1543 	 */
1544 	UNLOCK(&manager->lock);
1545 	while (isc__taskmgr_ready((isc_taskmgr_t *)manager))
1546 		(void)isc__taskmgr_dispatch((isc_taskmgr_t *)manager);
1547 #ifdef BIND9
1548 	if (!ISC_LIST_EMPTY(manager->tasks))
1549 		isc_mem_printallactive(stderr);
1550 #endif
1551 	INSIST(ISC_LIST_EMPTY(manager->tasks));
1552 #ifdef USE_SHARED_MANAGER
1553 	taskmgr = NULL;
1554 #endif
1555 #endif /* USE_WORKER_THREADS */
1556 
1557 	manager_free(manager);
1558 
1559 	*managerp = NULL;
1560 }
1561 
1562 ISC_TASKFUNC_SCOPE void
isc__taskmgr_setmode(isc_taskmgr_t * manager0,isc_taskmgrmode_t mode)1563 isc__taskmgr_setmode(isc_taskmgr_t *manager0, isc_taskmgrmode_t mode) {
1564 	isc__taskmgr_t *manager = (void*)manager0;
1565 
1566 	LOCK(&manager->lock);
1567 	manager->mode = mode;
1568 	UNLOCK(&manager->lock);
1569 }
1570 
1571 ISC_TASKFUNC_SCOPE isc_taskmgrmode_t
isc__taskmgr_mode(isc_taskmgr_t * manager0)1572 isc__taskmgr_mode(isc_taskmgr_t *manager0) {
1573 	isc__taskmgr_t *manager = (void*)manager0;
1574 	isc_taskmgrmode_t mode;
1575 	LOCK(&manager->lock);
1576 	mode = manager->mode;
1577 	UNLOCK(&manager->lock);
1578 	return (mode);
1579 }
1580 
1581 #ifndef USE_WORKER_THREADS
1582 isc_boolean_t
isc__taskmgr_ready(isc_taskmgr_t * manager0)1583 isc__taskmgr_ready(isc_taskmgr_t *manager0) {
1584 	isc__taskmgr_t *manager = (void*)manager0;
1585 	isc_boolean_t is_ready;
1586 
1587 #ifdef USE_SHARED_MANAGER
1588 	if (manager == NULL)
1589 		manager = taskmgr;
1590 #endif
1591 	if (manager == NULL)
1592 		return (ISC_FALSE);
1593 
1594 	LOCK(&manager->lock);
1595 	is_ready = !empty_readyq(manager);
1596 	UNLOCK(&manager->lock);
1597 
1598 	return (is_ready);
1599 }
1600 
1601 isc_result_t
isc__taskmgr_dispatch(isc_taskmgr_t * manager0)1602 isc__taskmgr_dispatch(isc_taskmgr_t *manager0) {
1603 	isc__taskmgr_t *manager = (void*)manager0;
1604 
1605 #ifdef USE_SHARED_MANAGER
1606 	if (manager == NULL)
1607 		manager = taskmgr;
1608 #endif
1609 	if (manager == NULL)
1610 		return (ISC_R_NOTFOUND);
1611 
1612 	dispatch(manager);
1613 
1614 	return (ISC_R_SUCCESS);
1615 }
1616 
1617 #else
1618 ISC_TASKFUNC_SCOPE void
isc__taskmgr_pause(isc_taskmgr_t * manager0)1619 isc__taskmgr_pause(isc_taskmgr_t *manager0) {
1620 	isc__taskmgr_t *manager = (void*)manager0;
1621 	LOCK(&manager->lock);
1622 	while (manager->tasks_running > 0) {
1623 		WAIT(&manager->paused, &manager->lock);
1624 	}
1625 	manager->pause_requested = ISC_TRUE;
1626 	UNLOCK(&manager->lock);
1627 }
1628 
1629 ISC_TASKFUNC_SCOPE void
isc__taskmgr_resume(isc_taskmgr_t * manager0)1630 isc__taskmgr_resume(isc_taskmgr_t *manager0) {
1631 	isc__taskmgr_t *manager = (void*)manager0;
1632 
1633 	LOCK(&manager->lock);
1634 	if (manager->pause_requested) {
1635 		manager->pause_requested = ISC_FALSE;
1636 		BROADCAST(&manager->work_available);
1637 	}
1638 	UNLOCK(&manager->lock);
1639 }
1640 #endif /* USE_WORKER_THREADS */
1641 
1642 ISC_TASKFUNC_SCOPE isc_result_t
isc__task_beginexclusive(isc_task_t * task0)1643 isc__task_beginexclusive(isc_task_t *task0) {
1644 #ifdef USE_WORKER_THREADS
1645 	isc__task_t *task = (isc__task_t *)task0;
1646 	isc__taskmgr_t *manager = task->manager;
1647 	REQUIRE(task->state == task_state_running);
1648 	LOCK(&manager->lock);
1649 	if (manager->exclusive_requested) {
1650 		UNLOCK(&manager->lock);
1651 		return (ISC_R_LOCKBUSY);
1652 	}
1653 	manager->exclusive_requested = ISC_TRUE;
1654 	while (manager->tasks_running > 1) {
1655 		WAIT(&manager->exclusive_granted, &manager->lock);
1656 	}
1657 	UNLOCK(&manager->lock);
1658 #else
1659 	UNUSED(task0);
1660 #endif
1661 	return (ISC_R_SUCCESS);
1662 }
1663 
1664 ISC_TASKFUNC_SCOPE void
isc__task_endexclusive(isc_task_t * task0)1665 isc__task_endexclusive(isc_task_t *task0) {
1666 #ifdef USE_WORKER_THREADS
1667 	isc__task_t *task = (isc__task_t *)task0;
1668 	isc__taskmgr_t *manager = task->manager;
1669 
1670 	REQUIRE(task->state == task_state_running);
1671 	LOCK(&manager->lock);
1672 	REQUIRE(manager->exclusive_requested);
1673 	manager->exclusive_requested = ISC_FALSE;
1674 	BROADCAST(&manager->work_available);
1675 	UNLOCK(&manager->lock);
1676 #else
1677 	UNUSED(task0);
1678 #endif
1679 }
1680 
1681 ISC_TASKFUNC_SCOPE void
isc__task_setprivilege(isc_task_t * task0,isc_boolean_t priv)1682 isc__task_setprivilege(isc_task_t *task0, isc_boolean_t priv) {
1683 	isc__task_t *task = (isc__task_t *)task0;
1684 	isc__taskmgr_t *manager = task->manager;
1685 	isc_boolean_t oldpriv;
1686 
1687 	LOCK(&task->lock);
1688 	oldpriv = ISC_TF((task->flags & TASK_F_PRIVILEGED) != 0);
1689 	if (priv)
1690 		task->flags |= TASK_F_PRIVILEGED;
1691 	else
1692 		task->flags &= ~TASK_F_PRIVILEGED;
1693 	UNLOCK(&task->lock);
1694 
1695 	if (priv == oldpriv)
1696 		return;
1697 
1698 	LOCK(&manager->lock);
1699 	if (priv && ISC_LINK_LINKED(task, ready_link))
1700 		ENQUEUE(manager->ready_priority_tasks, task,
1701 			ready_priority_link);
1702 	else if (!priv && ISC_LINK_LINKED(task, ready_priority_link))
1703 		DEQUEUE(manager->ready_priority_tasks, task,
1704 			ready_priority_link);
1705 	UNLOCK(&manager->lock);
1706 }
1707 
1708 ISC_TASKFUNC_SCOPE isc_boolean_t
isc__task_privilege(isc_task_t * task0)1709 isc__task_privilege(isc_task_t *task0) {
1710 	isc__task_t *task = (isc__task_t *)task0;
1711 	isc_boolean_t priv;
1712 
1713 	LOCK(&task->lock);
1714 	priv = ISC_TF((task->flags & TASK_F_PRIVILEGED) != 0);
1715 	UNLOCK(&task->lock);
1716 	return (priv);
1717 }
1718 
1719 #ifdef USE_SOCKETIMPREGISTER
1720 isc_result_t
isc__task_register()1721 isc__task_register() {
1722 	return (isc_task_register(isc__taskmgr_create));
1723 }
1724 #endif
1725 
1726 isc_boolean_t
isc_task_exiting(isc_task_t * t)1727 isc_task_exiting(isc_task_t *t) {
1728 	isc__task_t *task = (isc__task_t *)t;
1729 
1730 	REQUIRE(VALID_TASK(task));
1731 	return (TASK_SHUTTINGDOWN(task));
1732 }
1733 
1734 
1735 #if defined(HAVE_LIBXML2) && defined(BIND9)
1736 void
isc_taskmgr_renderxml(isc_taskmgr_t * mgr0,xmlTextWriterPtr writer)1737 isc_taskmgr_renderxml(isc_taskmgr_t *mgr0, xmlTextWriterPtr writer) {
1738 	isc__taskmgr_t *mgr = (isc__taskmgr_t *)mgr0;
1739 	isc__task_t *task;
1740 
1741 	LOCK(&mgr->lock);
1742 
1743 	/*
1744 	 * Write out the thread-model, and some details about each depending
1745 	 * on which type is enabled.
1746 	 */
1747 	xmlTextWriterStartElement(writer, ISC_XMLCHAR "thread-model");
1748 #ifdef ISC_PLATFORM_USETHREADS
1749 	xmlTextWriterStartElement(writer, ISC_XMLCHAR "type");
1750 	xmlTextWriterWriteString(writer, ISC_XMLCHAR "threaded");
1751 	xmlTextWriterEndElement(writer); /* type */
1752 
1753 	xmlTextWriterStartElement(writer, ISC_XMLCHAR "worker-threads");
1754 	xmlTextWriterWriteFormatString(writer, "%d", mgr->workers);
1755 	xmlTextWriterEndElement(writer); /* worker-threads */
1756 #else /* ISC_PLATFORM_USETHREADS */
1757 	xmlTextWriterStartElement(writer, ISC_XMLCHAR "type");
1758 	xmlTextWriterWriteString(writer, ISC_XMLCHAR "non-threaded");
1759 	xmlTextWriterEndElement(writer); /* type */
1760 
1761 	xmlTextWriterStartElement(writer, ISC_XMLCHAR "references");
1762 	xmlTextWriterWriteFormatString(writer, "%d", mgr->refs);
1763 	xmlTextWriterEndElement(writer); /* references */
1764 #endif /* ISC_PLATFORM_USETHREADS */
1765 
1766 	xmlTextWriterStartElement(writer, ISC_XMLCHAR "default-quantum");
1767 	xmlTextWriterWriteFormatString(writer, "%d", mgr->default_quantum);
1768 	xmlTextWriterEndElement(writer); /* default-quantum */
1769 
1770 	xmlTextWriterStartElement(writer, ISC_XMLCHAR "tasks-running");
1771 	xmlTextWriterWriteFormatString(writer, "%d", mgr->tasks_running);
1772 	xmlTextWriterEndElement(writer); /* tasks-running */
1773 
1774 	xmlTextWriterEndElement(writer); /* thread-model */
1775 
1776 	xmlTextWriterStartElement(writer, ISC_XMLCHAR "tasks");
1777 	task = ISC_LIST_HEAD(mgr->tasks);
1778 	while (task != NULL) {
1779 		LOCK(&task->lock);
1780 		xmlTextWriterStartElement(writer, ISC_XMLCHAR "task");
1781 
1782 		if (task->name[0] != 0) {
1783 			xmlTextWriterStartElement(writer, ISC_XMLCHAR "name");
1784 			xmlTextWriterWriteFormatString(writer, "%s",
1785 						       task->name);
1786 			xmlTextWriterEndElement(writer); /* name */
1787 		}
1788 
1789 		xmlTextWriterStartElement(writer, ISC_XMLCHAR "references");
1790 		xmlTextWriterWriteFormatString(writer, "%d", task->references);
1791 		xmlTextWriterEndElement(writer); /* references */
1792 
1793 		xmlTextWriterStartElement(writer, ISC_XMLCHAR "id");
1794 		xmlTextWriterWriteFormatString(writer, "%p", task);
1795 		xmlTextWriterEndElement(writer); /* id */
1796 
1797 		xmlTextWriterStartElement(writer, ISC_XMLCHAR "state");
1798 		xmlTextWriterWriteFormatString(writer, "%s",
1799 					       statenames[task->state]);
1800 		xmlTextWriterEndElement(writer); /* state */
1801 
1802 		xmlTextWriterStartElement(writer, ISC_XMLCHAR "quantum");
1803 		xmlTextWriterWriteFormatString(writer, "%d", task->quantum);
1804 		xmlTextWriterEndElement(writer); /* quantum */
1805 
1806 		xmlTextWriterEndElement(writer);
1807 
1808 		UNLOCK(&task->lock);
1809 		task = ISC_LIST_NEXT(task, link);
1810 	}
1811 	xmlTextWriterEndElement(writer); /* tasks */
1812 
1813 	UNLOCK(&mgr->lock);
1814 }
1815 #endif /* HAVE_LIBXML2 && BIND9 */
1816