xref: /minix/external/bsd/bind/dist/lib/isc/task.c (revision 00b67f09)
1 /*	$NetBSD: task.c,v 1.11 2014/12/10 04:37:59 christos Exp $	*/
2 
3 /*
4  * Copyright (C) 2004-2014  Internet Systems Consortium, Inc. ("ISC")
5  * Copyright (C) 1998-2003  Internet Software Consortium.
6  *
7  * Permission to use, copy, modify, and/or distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
12  * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
13  * AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
14  * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
15  * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
16  * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /* Id */
21 
22 /*! \file
23  * \author Principal Author: Bob Halley
24  */
25 
26 /*
27  * XXXRTH  Need to document the states a task can be in, and the rules
28  * for changing states.
29  */
30 
31 #include <config.h>
32 
33 #include <isc/app.h>
34 #include <isc/condition.h>
35 #include <isc/event.h>
36 #include <isc/json.h>
37 #include <isc/magic.h>
38 #include <isc/mem.h>
39 #include <isc/msgs.h>
40 #include <isc/once.h>
41 #include <isc/platform.h>
42 #include <isc/string.h>
43 #include <isc/task.h>
44 #include <isc/thread.h>
45 #include <isc/util.h>
46 #include <isc/xml.h>
47 
48 #ifdef OPENSSL_LEAKS
49 #include <openssl/err.h>
50 #endif
51 
52 /*%
53  * For BIND9 internal applications:
54  * when built with threads we use multiple worker threads shared by the whole
55  * application.
56  * when built without threads we share a single global task manager and use
57  * an integrated event loop for socket, timer, and other generic task events.
58  * For generic library:
59  * we don't use either of them: an application can have multiple task managers
60  * whether or not it's threaded, and if the application is threaded each thread
61  * is expected to have a separate manager; no "worker threads" are shared by
62  * the application threads.
63  */
64 #ifdef ISC_PLATFORM_USETHREADS
65 #define USE_WORKER_THREADS
66 #else
67 #define USE_SHARED_MANAGER
68 #endif	/* ISC_PLATFORM_USETHREADS */
69 
70 #include "task_p.h"
71 
72 #ifdef ISC_TASK_TRACE
73 #define XTRACE(m)		fprintf(stderr, "task %p thread %lu: %s\n", \
74 				       task, isc_thread_self(), (m))
75 #define XTTRACE(t, m)		fprintf(stderr, "task %p thread %lu: %s\n", \
76 				       (t), isc_thread_self(), (m))
77 #define XTHREADTRACE(m)		fprintf(stderr, "thread %lu: %s\n", \
78 				       isc_thread_self(), (m))
79 #else
80 #define XTRACE(m)
81 #define XTTRACE(t, m)
82 #define XTHREADTRACE(m)
83 #endif
84 
85 /***
86  *** Types.
87  ***/
88 
89 typedef enum {
90 	task_state_idle, task_state_ready, task_state_running,
91 	task_state_done
92 } task_state_t;
93 
94 #if defined(HAVE_LIBXML2) || defined(HAVE_JSON)
95 static const char *statenames[] = {
96 	"idle", "ready", "running", "done",
97 };
98 #endif
99 
100 #define TASK_MAGIC			ISC_MAGIC('T', 'A', 'S', 'K')
101 #define VALID_TASK(t)			ISC_MAGIC_VALID(t, TASK_MAGIC)
102 
103 typedef struct isc__task isc__task_t;
104 typedef struct isc__taskmgr isc__taskmgr_t;
105 
106 struct isc__task {
107 	/* Not locked. */
108 	isc_task_t			common;
109 	isc__taskmgr_t *		manager;
110 	isc_mutex_t			lock;
111 	/* Locked by task lock. */
112 	task_state_t			state;
113 	unsigned int			references;
114 	isc_eventlist_t			events;
115 	isc_eventlist_t			on_shutdown;
116 	unsigned int			nevents;
117 	unsigned int			quantum;
118 	unsigned int			flags;
119 	isc_stdtime_t			now;
120 	char				name[16];
121 	void *				tag;
122 	/* Locked by task manager lock. */
123 	LINK(isc__task_t)		link;
124 	LINK(isc__task_t)		ready_link;
125 	LINK(isc__task_t)		ready_priority_link;
126 };
127 
128 #define TASK_F_SHUTTINGDOWN		0x01
129 #define TASK_F_PRIVILEGED		0x02
130 
131 #define TASK_SHUTTINGDOWN(t)		(((t)->flags & TASK_F_SHUTTINGDOWN) \
132 					 != 0)
133 
134 #define TASK_MANAGER_MAGIC		ISC_MAGIC('T', 'S', 'K', 'M')
135 #define VALID_MANAGER(m)		ISC_MAGIC_VALID(m, TASK_MANAGER_MAGIC)
136 
137 typedef ISC_LIST(isc__task_t)	isc__tasklist_t;
138 
139 struct isc__taskmgr {
140 	/* Not locked. */
141 	isc_taskmgr_t			common;
142 	isc_mem_t *			mctx;
143 	isc_mutex_t			lock;
144 #ifdef ISC_PLATFORM_USETHREADS
145 	unsigned int			workers;
146 	isc_thread_t *			threads;
147 #endif /* ISC_PLATFORM_USETHREADS */
148 	/* Locked by task manager lock. */
149 	unsigned int			default_quantum;
150 	LIST(isc__task_t)		tasks;
151 	isc__tasklist_t			ready_tasks;
152 	isc__tasklist_t			ready_priority_tasks;
153 	isc_taskmgrmode_t		mode;
154 #ifdef ISC_PLATFORM_USETHREADS
155 	isc_condition_t			work_available;
156 	isc_condition_t			exclusive_granted;
157 	isc_condition_t			paused;
158 #endif /* ISC_PLATFORM_USETHREADS */
159 	unsigned int			tasks_running;
160 	unsigned int			tasks_ready;
161 	isc_boolean_t			pause_requested;
162 	isc_boolean_t			exclusive_requested;
163 	isc_boolean_t			exiting;
164 	isc__task_t			*excl;
165 #ifdef USE_SHARED_MANAGER
166 	unsigned int			refs;
167 #endif /* ISC_PLATFORM_USETHREADS */
168 };
169 
170 #define DEFAULT_TASKMGR_QUANTUM		10
171 #define DEFAULT_DEFAULT_QUANTUM		5
172 #define FINISHED(m)			((m)->exiting && EMPTY((m)->tasks))
173 
174 #ifdef USE_SHARED_MANAGER
175 static isc__taskmgr_t *taskmgr = NULL;
176 #endif /* USE_SHARED_MANAGER */
177 
178 /*%
179  * The following are intended for internal use (indicated by "isc__"
180  * prefix) but are not declared as static, allowing direct access from
181  * unit tests etc.
182  */
183 
184 isc_result_t
185 isc__task_create(isc_taskmgr_t *manager0, unsigned int quantum,
186 		 isc_task_t **taskp);
187 void
188 isc__task_attach(isc_task_t *source0, isc_task_t **targetp);
189 void
190 isc__task_detach(isc_task_t **taskp);
191 void
192 isc__task_send(isc_task_t *task0, isc_event_t **eventp);
193 void
194 isc__task_sendanddetach(isc_task_t **taskp, isc_event_t **eventp);
195 unsigned int
196 isc__task_purgerange(isc_task_t *task0, void *sender, isc_eventtype_t first,
197 		     isc_eventtype_t last, void *tag);
198 unsigned int
199 isc__task_purge(isc_task_t *task, void *sender, isc_eventtype_t type,
200 		void *tag);
201 isc_boolean_t
202 isc_task_purgeevent(isc_task_t *task0, isc_event_t *event);
203 unsigned int
204 isc__task_unsendrange(isc_task_t *task, void *sender, isc_eventtype_t first,
205 		      isc_eventtype_t last, void *tag,
206 		      isc_eventlist_t *events);
207 unsigned int
208 isc__task_unsend(isc_task_t *task, void *sender, isc_eventtype_t type,
209 		 void *tag, isc_eventlist_t *events);
210 isc_result_t
211 isc__task_onshutdown(isc_task_t *task0, isc_taskaction_t action,
212 		     void *arg);
213 void
214 isc__task_shutdown(isc_task_t *task0);
215 void
216 isc__task_destroy(isc_task_t **taskp);
217 void
218 isc__task_setname(isc_task_t *task0, const char *name, void *tag);
219 const char *
220 isc__task_getname(isc_task_t *task0);
221 void *
222 isc__task_gettag(isc_task_t *task0);
223 void
224 isc__task_getcurrenttime(isc_task_t *task0, isc_stdtime_t *t);
225 isc_result_t
226 isc__taskmgr_create(isc_mem_t *mctx, unsigned int workers,
227 		    unsigned int default_quantum, isc_taskmgr_t **managerp);
228 void
229 isc__taskmgr_destroy(isc_taskmgr_t **managerp);
230 void
231 isc_taskmgr_setexcltask(isc_taskmgr_t *mgr0, isc_task_t *task0);
232 isc_result_t
233 isc_taskmgr_excltask(isc_taskmgr_t *mgr0, isc_task_t **taskp);
234 isc_result_t
235 isc__task_beginexclusive(isc_task_t *task);
236 void
237 isc__task_endexclusive(isc_task_t *task0);
238 void
239 isc__task_setprivilege(isc_task_t *task0, isc_boolean_t priv);
240 isc_boolean_t
241 isc__task_privilege(isc_task_t *task0);
242 void
243 isc__taskmgr_setmode(isc_taskmgr_t *manager0, isc_taskmgrmode_t mode);
244 isc_taskmgrmode_t
245 isc__taskmgr_mode(isc_taskmgr_t *manager0);
246 
247 static inline isc_boolean_t
248 empty_readyq(isc__taskmgr_t *manager);
249 
250 static inline isc__task_t *
251 pop_readyq(isc__taskmgr_t *manager);
252 
253 static inline void
254 push_readyq(isc__taskmgr_t *manager, isc__task_t *task);
255 
256 static struct isc__taskmethods {
257 	isc_taskmethods_t methods;
258 
259 	/*%
260 	 * The following are defined just for avoiding unused static functions.
261 	 */
262 	void *purgeevent, *unsendrange, *getname, *gettag, *getcurrenttime;
263 } taskmethods = {
264 	{
265 		isc__task_attach,
266 		isc__task_detach,
267 		isc__task_destroy,
268 		isc__task_send,
269 		isc__task_sendanddetach,
270 		isc__task_unsend,
271 		isc__task_onshutdown,
272 		isc__task_shutdown,
273 		isc__task_setname,
274 		isc__task_purge,
275 		isc__task_purgerange,
276 		isc__task_beginexclusive,
277 		isc__task_endexclusive,
278 		isc__task_setprivilege,
279 		isc__task_privilege
280 	},
281 	(void *)isc_task_purgeevent,
282 	(void *)isc__task_unsendrange,
283 	(void *)isc__task_getname,
284 	(void *)isc__task_gettag,
285 	(void *)isc__task_getcurrenttime
286 };
287 
288 static isc_taskmgrmethods_t taskmgrmethods = {
289 	isc__taskmgr_destroy,
290 	isc__taskmgr_setmode,
291 	isc__taskmgr_mode,
292 	isc__task_create,
293 	isc_taskmgr_setexcltask,
294 	isc_taskmgr_excltask
295 };
296 
297 /***
298  *** Tasks.
299  ***/
300 
301 static void
task_finished(isc__task_t * task)302 task_finished(isc__task_t *task) {
303 	isc__taskmgr_t *manager = task->manager;
304 
305 	REQUIRE(EMPTY(task->events));
306 	REQUIRE(task->nevents == 0);
307 	REQUIRE(EMPTY(task->on_shutdown));
308 	REQUIRE(task->references == 0);
309 	REQUIRE(task->state == task_state_done);
310 
311 	XTRACE("task_finished");
312 
313 	LOCK(&manager->lock);
314 	UNLINK(manager->tasks, task, link);
315 #ifdef USE_WORKER_THREADS
316 	if (FINISHED(manager)) {
317 		/*
318 		 * All tasks have completed and the
319 		 * task manager is exiting.  Wake up
320 		 * any idle worker threads so they
321 		 * can exit.
322 		 */
323 		BROADCAST(&manager->work_available);
324 	}
325 #endif /* USE_WORKER_THREADS */
326 	UNLOCK(&manager->lock);
327 
328 	DESTROYLOCK(&task->lock);
329 	task->common.impmagic = 0;
330 	task->common.magic = 0;
331 	isc_mem_put(manager->mctx, task, sizeof(*task));
332 }
333 
334 isc_result_t
isc__task_create(isc_taskmgr_t * manager0,unsigned int quantum,isc_task_t ** taskp)335 isc__task_create(isc_taskmgr_t *manager0, unsigned int quantum,
336 		 isc_task_t **taskp)
337 {
338 	isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
339 	isc__task_t *task;
340 	isc_boolean_t exiting;
341 	isc_result_t result;
342 
343 	REQUIRE(VALID_MANAGER(manager));
344 	REQUIRE(taskp != NULL && *taskp == NULL);
345 
346 	task = isc_mem_get(manager->mctx, sizeof(*task));
347 	if (task == NULL)
348 		return (ISC_R_NOMEMORY);
349 	XTRACE("isc_task_create");
350 	task->manager = manager;
351 	result = isc_mutex_init(&task->lock);
352 	if (result != ISC_R_SUCCESS) {
353 		isc_mem_put(manager->mctx, task, sizeof(*task));
354 		return (result);
355 	}
356 	task->state = task_state_idle;
357 	task->references = 1;
358 	INIT_LIST(task->events);
359 	INIT_LIST(task->on_shutdown);
360 	task->nevents = 0;
361 	task->quantum = quantum;
362 	task->flags = 0;
363 	task->now = 0;
364 	memset(task->name, 0, sizeof(task->name));
365 	task->tag = NULL;
366 	INIT_LINK(task, link);
367 	INIT_LINK(task, ready_link);
368 	INIT_LINK(task, ready_priority_link);
369 
370 	exiting = ISC_FALSE;
371 	LOCK(&manager->lock);
372 	if (!manager->exiting) {
373 		if (task->quantum == 0)
374 			task->quantum = manager->default_quantum;
375 		APPEND(manager->tasks, task, link);
376 	} else
377 		exiting = ISC_TRUE;
378 	UNLOCK(&manager->lock);
379 
380 	if (exiting) {
381 		DESTROYLOCK(&task->lock);
382 		isc_mem_put(manager->mctx, task, sizeof(*task));
383 		return (ISC_R_SHUTTINGDOWN);
384 	}
385 
386 	task->common.methods = (isc_taskmethods_t *)&taskmethods;
387 	task->common.magic = ISCAPI_TASK_MAGIC;
388 	task->common.impmagic = TASK_MAGIC;
389 	*taskp = (isc_task_t *)task;
390 
391 	return (ISC_R_SUCCESS);
392 }
393 
394 void
isc__task_attach(isc_task_t * source0,isc_task_t ** targetp)395 isc__task_attach(isc_task_t *source0, isc_task_t **targetp) {
396 	isc__task_t *source = (isc__task_t *)source0;
397 
398 	/*
399 	 * Attach *targetp to source.
400 	 */
401 
402 	REQUIRE(VALID_TASK(source));
403 	REQUIRE(targetp != NULL && *targetp == NULL);
404 
405 	XTTRACE(source, "isc_task_attach");
406 
407 	LOCK(&source->lock);
408 	source->references++;
409 	UNLOCK(&source->lock);
410 
411 	*targetp = (isc_task_t *)source;
412 }
413 
414 static inline isc_boolean_t
task_shutdown(isc__task_t * task)415 task_shutdown(isc__task_t *task) {
416 	isc_boolean_t was_idle = ISC_FALSE;
417 	isc_event_t *event, *prev;
418 
419 	/*
420 	 * Caller must be holding the task's lock.
421 	 */
422 
423 	XTRACE("task_shutdown");
424 
425 	if (! TASK_SHUTTINGDOWN(task)) {
426 		XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
427 				      ISC_MSG_SHUTTINGDOWN, "shutting down"));
428 		task->flags |= TASK_F_SHUTTINGDOWN;
429 		if (task->state == task_state_idle) {
430 			INSIST(EMPTY(task->events));
431 			task->state = task_state_ready;
432 			was_idle = ISC_TRUE;
433 		}
434 		INSIST(task->state == task_state_ready ||
435 		       task->state == task_state_running);
436 
437 		/*
438 		 * Note that we post shutdown events LIFO.
439 		 */
440 		for (event = TAIL(task->on_shutdown);
441 		     event != NULL;
442 		     event = prev) {
443 			prev = PREV(event, ev_link);
444 			DEQUEUE(task->on_shutdown, event, ev_link);
445 			ENQUEUE(task->events, event, ev_link);
446 			task->nevents++;
447 		}
448 	}
449 
450 	return (was_idle);
451 }
452 
453 /*
454  * Moves a task onto the appropriate run queue.
455  *
456  * Caller must NOT hold manager lock.
457  */
458 static inline void
task_ready(isc__task_t * task)459 task_ready(isc__task_t *task) {
460 	isc__taskmgr_t *manager = task->manager;
461 #ifdef USE_WORKER_THREADS
462 	isc_boolean_t has_privilege = isc__task_privilege((isc_task_t *) task);
463 #endif /* USE_WORKER_THREADS */
464 
465 	REQUIRE(VALID_MANAGER(manager));
466 	REQUIRE(task->state == task_state_ready);
467 
468 	XTRACE("task_ready");
469 
470 	LOCK(&manager->lock);
471 	push_readyq(manager, task);
472 #ifdef USE_WORKER_THREADS
473 	if (manager->mode == isc_taskmgrmode_normal || has_privilege)
474 		SIGNAL(&manager->work_available);
475 #endif /* USE_WORKER_THREADS */
476 	UNLOCK(&manager->lock);
477 }
478 
479 static inline isc_boolean_t
task_detach(isc__task_t * task)480 task_detach(isc__task_t *task) {
481 
482 	/*
483 	 * Caller must be holding the task lock.
484 	 */
485 
486 	REQUIRE(task->references > 0);
487 
488 	XTRACE("detach");
489 
490 	task->references--;
491 	if (task->references == 0 && task->state == task_state_idle) {
492 		INSIST(EMPTY(task->events));
493 		/*
494 		 * There are no references to this task, and no
495 		 * pending events.  We could try to optimize and
496 		 * either initiate shutdown or clean up the task,
497 		 * depending on its state, but it's easier to just
498 		 * make the task ready and allow run() or the event
499 		 * loop to deal with shutting down and termination.
500 		 */
501 		task->state = task_state_ready;
502 		return (ISC_TRUE);
503 	}
504 
505 	return (ISC_FALSE);
506 }
507 
508 void
isc__task_detach(isc_task_t ** taskp)509 isc__task_detach(isc_task_t **taskp) {
510 	isc__task_t *task;
511 	isc_boolean_t was_idle;
512 
513 	/*
514 	 * Detach *taskp from its task.
515 	 */
516 
517 	REQUIRE(taskp != NULL);
518 	task = (isc__task_t *)*taskp;
519 	REQUIRE(VALID_TASK(task));
520 
521 	XTRACE("isc_task_detach");
522 
523 	LOCK(&task->lock);
524 	was_idle = task_detach(task);
525 	UNLOCK(&task->lock);
526 
527 	if (was_idle)
528 		task_ready(task);
529 
530 	*taskp = NULL;
531 }
532 
533 static inline isc_boolean_t
task_send(isc__task_t * task,isc_event_t ** eventp)534 task_send(isc__task_t *task, isc_event_t **eventp) {
535 	isc_boolean_t was_idle = ISC_FALSE;
536 	isc_event_t *event;
537 
538 	/*
539 	 * Caller must be holding the task lock.
540 	 */
541 
542 	REQUIRE(eventp != NULL);
543 	event = *eventp;
544 	REQUIRE(event != NULL);
545 	REQUIRE(event->ev_type > 0);
546 	REQUIRE(task->state != task_state_done);
547 
548 	XTRACE("task_send");
549 
550 	if (task->state == task_state_idle) {
551 		was_idle = ISC_TRUE;
552 		INSIST(EMPTY(task->events));
553 		task->state = task_state_ready;
554 	}
555 	INSIST(task->state == task_state_ready ||
556 	       task->state == task_state_running);
557 	ENQUEUE(task->events, event, ev_link);
558 	task->nevents++;
559 	*eventp = NULL;
560 
561 	return (was_idle);
562 }
563 
564 void
isc__task_send(isc_task_t * task0,isc_event_t ** eventp)565 isc__task_send(isc_task_t *task0, isc_event_t **eventp) {
566 	isc__task_t *task = (isc__task_t *)task0;
567 	isc_boolean_t was_idle;
568 
569 	/*
570 	 * Send '*event' to 'task'.
571 	 */
572 
573 	REQUIRE(VALID_TASK(task));
574 
575 	XTRACE("isc_task_send");
576 
577 	/*
578 	 * We're trying hard to hold locks for as short a time as possible.
579 	 * We're also trying to hold as few locks as possible.  This is why
580 	 * some processing is deferred until after the lock is released.
581 	 */
582 	LOCK(&task->lock);
583 	was_idle = task_send(task, eventp);
584 	UNLOCK(&task->lock);
585 
586 	if (was_idle) {
587 		/*
588 		 * We need to add this task to the ready queue.
589 		 *
590 		 * We've waited until now to do it because making a task
591 		 * ready requires locking the manager.  If we tried to do
592 		 * this while holding the task lock, we could deadlock.
593 		 *
594 		 * We've changed the state to ready, so no one else will
595 		 * be trying to add this task to the ready queue.  The
596 		 * only way to leave the ready state is by executing the
597 		 * task.  It thus doesn't matter if events are added,
598 		 * removed, or a shutdown is started in the interval
599 		 * between the time we released the task lock, and the time
600 		 * we add the task to the ready queue.
601 		 */
602 		task_ready(task);
603 	}
604 }
605 
606 void
isc__task_sendanddetach(isc_task_t ** taskp,isc_event_t ** eventp)607 isc__task_sendanddetach(isc_task_t **taskp, isc_event_t **eventp) {
608 	isc_boolean_t idle1, idle2;
609 	isc__task_t *task;
610 
611 	/*
612 	 * Send '*event' to '*taskp' and then detach '*taskp' from its
613 	 * task.
614 	 */
615 
616 	REQUIRE(taskp != NULL);
617 	task = (isc__task_t *)*taskp;
618 	REQUIRE(VALID_TASK(task));
619 
620 	XTRACE("isc_task_sendanddetach");
621 
622 	LOCK(&task->lock);
623 	idle1 = task_send(task, eventp);
624 	idle2 = task_detach(task);
625 	UNLOCK(&task->lock);
626 
627 	/*
628 	 * If idle1, then idle2 shouldn't be true as well since we're holding
629 	 * the task lock, and thus the task cannot switch from ready back to
630 	 * idle.
631 	 */
632 	INSIST(!(idle1 && idle2));
633 
634 	if (idle1 || idle2)
635 		task_ready(task);
636 
637 	*taskp = NULL;
638 }
639 
640 #define PURGE_OK(event)	(((event)->ev_attributes & ISC_EVENTATTR_NOPURGE) == 0)
641 
642 static unsigned int
dequeue_events(isc__task_t * task,void * sender,isc_eventtype_t first,isc_eventtype_t last,void * tag,isc_eventlist_t * events,isc_boolean_t purging)643 dequeue_events(isc__task_t *task, void *sender, isc_eventtype_t first,
644 	       isc_eventtype_t last, void *tag,
645 	       isc_eventlist_t *events, isc_boolean_t purging)
646 {
647 	isc_event_t *event, *next_event;
648 	unsigned int count = 0;
649 
650 	REQUIRE(VALID_TASK(task));
651 	REQUIRE(last >= first);
652 
653 	XTRACE("dequeue_events");
654 
655 	/*
656 	 * Events matching 'sender', whose type is >= first and <= last, and
657 	 * whose tag is 'tag' will be dequeued.  If 'purging', matching events
658 	 * which are marked as unpurgable will not be dequeued.
659 	 *
660 	 * sender == NULL means "any sender", and tag == NULL means "any tag".
661 	 */
662 
663 	LOCK(&task->lock);
664 
665 	for (event = HEAD(task->events); event != NULL; event = next_event) {
666 		next_event = NEXT(event, ev_link);
667 		if (event->ev_type >= first && event->ev_type <= last &&
668 		    (sender == NULL || event->ev_sender == sender) &&
669 		    (tag == NULL || event->ev_tag == tag) &&
670 		    (!purging || PURGE_OK(event))) {
671 			DEQUEUE(task->events, event, ev_link);
672 			task->nevents--;
673 			ENQUEUE(*events, event, ev_link);
674 			count++;
675 		}
676 	}
677 
678 	UNLOCK(&task->lock);
679 
680 	return (count);
681 }
682 
683 unsigned int
isc__task_purgerange(isc_task_t * task0,void * sender,isc_eventtype_t first,isc_eventtype_t last,void * tag)684 isc__task_purgerange(isc_task_t *task0, void *sender, isc_eventtype_t first,
685 		     isc_eventtype_t last, void *tag)
686 {
687 	isc__task_t *task = (isc__task_t *)task0;
688 	unsigned int count;
689 	isc_eventlist_t events;
690 	isc_event_t *event, *next_event;
691 
692 	/*
693 	 * Purge events from a task's event queue.
694 	 */
695 
696 	XTRACE("isc_task_purgerange");
697 
698 	ISC_LIST_INIT(events);
699 
700 	count = dequeue_events(task, sender, first, last, tag, &events,
701 			       ISC_TRUE);
702 
703 	for (event = HEAD(events); event != NULL; event = next_event) {
704 		next_event = NEXT(event, ev_link);
705 		isc_event_free(&event);
706 	}
707 
708 	/*
709 	 * Note that purging never changes the state of the task.
710 	 */
711 
712 	return (count);
713 }
714 
715 unsigned int
isc__task_purge(isc_task_t * task,void * sender,isc_eventtype_t type,void * tag)716 isc__task_purge(isc_task_t *task, void *sender, isc_eventtype_t type,
717 		void *tag)
718 {
719 	/*
720 	 * Purge events from a task's event queue.
721 	 */
722 
723 	XTRACE("isc_task_purge");
724 
725 	return (isc__task_purgerange(task, sender, type, type, tag));
726 }
727 
728 isc_boolean_t
isc_task_purgeevent(isc_task_t * task0,isc_event_t * event)729 isc_task_purgeevent(isc_task_t *task0, isc_event_t *event) {
730 	isc__task_t *task = (isc__task_t *)task0;
731 	isc_event_t *curr_event, *next_event;
732 
733 	/*
734 	 * Purge 'event' from a task's event queue.
735 	 *
736 	 * XXXRTH:  WARNING:  This method may be removed before beta.
737 	 */
738 
739 	REQUIRE(VALID_TASK(task));
740 
741 	/*
742 	 * If 'event' is on the task's event queue, it will be purged,
743 	 * unless it is marked as unpurgeable.  'event' does not have to be
744 	 * on the task's event queue; in fact, it can even be an invalid
745 	 * pointer.  Purging only occurs if the event is actually on the task's
746 	 * event queue.
747 	 *
748 	 * Purging never changes the state of the task.
749 	 */
750 
751 	LOCK(&task->lock);
752 	for (curr_event = HEAD(task->events);
753 	     curr_event != NULL;
754 	     curr_event = next_event) {
755 		next_event = NEXT(curr_event, ev_link);
756 		if (curr_event == event && PURGE_OK(event)) {
757 			DEQUEUE(task->events, curr_event, ev_link);
758 			task->nevents--;
759 			break;
760 		}
761 	}
762 	UNLOCK(&task->lock);
763 
764 	if (curr_event == NULL)
765 		return (ISC_FALSE);
766 
767 	isc_event_free(&curr_event);
768 
769 	return (ISC_TRUE);
770 }
771 
772 unsigned int
isc__task_unsendrange(isc_task_t * task,void * sender,isc_eventtype_t first,isc_eventtype_t last,void * tag,isc_eventlist_t * events)773 isc__task_unsendrange(isc_task_t *task, void *sender, isc_eventtype_t first,
774 		      isc_eventtype_t last, void *tag,
775 		      isc_eventlist_t *events)
776 {
777 	/*
778 	 * Remove events from a task's event queue.
779 	 */
780 
781 	XTRACE("isc_task_unsendrange");
782 
783 	return (dequeue_events((isc__task_t *)task, sender, first,
784 			       last, tag, events, ISC_FALSE));
785 }
786 
787 unsigned int
isc__task_unsend(isc_task_t * task,void * sender,isc_eventtype_t type,void * tag,isc_eventlist_t * events)788 isc__task_unsend(isc_task_t *task, void *sender, isc_eventtype_t type,
789 		 void *tag, isc_eventlist_t *events)
790 {
791 	/*
792 	 * Remove events from a task's event queue.
793 	 */
794 
795 	XTRACE("isc_task_unsend");
796 
797 	return (dequeue_events((isc__task_t *)task, sender, type,
798 			       type, tag, events, ISC_FALSE));
799 }
800 
801 isc_result_t
isc__task_onshutdown(isc_task_t * task0,isc_taskaction_t action,void * arg)802 isc__task_onshutdown(isc_task_t *task0, isc_taskaction_t action,
803 		     void *arg)
804 {
805 	isc__task_t *task = (isc__task_t *)task0;
806 	isc_boolean_t disallowed = ISC_FALSE;
807 	isc_result_t result = ISC_R_SUCCESS;
808 	isc_event_t *event;
809 
810 	/*
811 	 * Send a shutdown event with action 'action' and argument 'arg' when
812 	 * 'task' is shutdown.
813 	 */
814 
815 	REQUIRE(VALID_TASK(task));
816 	REQUIRE(action != NULL);
817 
818 	event = isc_event_allocate(task->manager->mctx,
819 				   NULL,
820 				   ISC_TASKEVENT_SHUTDOWN,
821 				   action,
822 				   arg,
823 				   sizeof(*event));
824 	if (event == NULL)
825 		return (ISC_R_NOMEMORY);
826 
827 	LOCK(&task->lock);
828 	if (TASK_SHUTTINGDOWN(task)) {
829 		disallowed = ISC_TRUE;
830 		result = ISC_R_SHUTTINGDOWN;
831 	} else
832 		ENQUEUE(task->on_shutdown, event, ev_link);
833 	UNLOCK(&task->lock);
834 
835 	if (disallowed)
836 		isc_mem_put(task->manager->mctx, event, sizeof(*event));
837 
838 	return (result);
839 }
840 
841 void
isc__task_shutdown(isc_task_t * task0)842 isc__task_shutdown(isc_task_t *task0) {
843 	isc__task_t *task = (isc__task_t *)task0;
844 	isc_boolean_t was_idle;
845 
846 	/*
847 	 * Shutdown 'task'.
848 	 */
849 
850 	REQUIRE(VALID_TASK(task));
851 
852 	LOCK(&task->lock);
853 	was_idle = task_shutdown(task);
854 	UNLOCK(&task->lock);
855 
856 	if (was_idle)
857 		task_ready(task);
858 }
859 
860 void
isc__task_destroy(isc_task_t ** taskp)861 isc__task_destroy(isc_task_t **taskp) {
862 
863 	/*
864 	 * Destroy '*taskp'.
865 	 */
866 
867 	REQUIRE(taskp != NULL);
868 
869 	isc_task_shutdown(*taskp);
870 	isc_task_detach(taskp);
871 }
872 
873 void
isc__task_setname(isc_task_t * task0,const char * name,void * tag)874 isc__task_setname(isc_task_t *task0, const char *name, void *tag) {
875 	isc__task_t *task = (isc__task_t *)task0;
876 
877 	/*
878 	 * Name 'task'.
879 	 */
880 
881 	REQUIRE(VALID_TASK(task));
882 
883 	LOCK(&task->lock);
884 	memset(task->name, 0, sizeof(task->name));
885 	strncpy(task->name, name, sizeof(task->name) - 1);
886 	task->tag = tag;
887 	UNLOCK(&task->lock);
888 }
889 
890 const char *
isc__task_getname(isc_task_t * task0)891 isc__task_getname(isc_task_t *task0) {
892 	isc__task_t *task = (isc__task_t *)task0;
893 
894 	REQUIRE(VALID_TASK(task));
895 
896 	return (task->name);
897 }
898 
899 void *
isc__task_gettag(isc_task_t * task0)900 isc__task_gettag(isc_task_t *task0) {
901 	isc__task_t *task = (isc__task_t *)task0;
902 
903 	REQUIRE(VALID_TASK(task));
904 
905 	return (task->tag);
906 }
907 
908 void
isc__task_getcurrenttime(isc_task_t * task0,isc_stdtime_t * t)909 isc__task_getcurrenttime(isc_task_t *task0, isc_stdtime_t *t) {
910 	isc__task_t *task = (isc__task_t *)task0;
911 
912 	REQUIRE(VALID_TASK(task));
913 	REQUIRE(t != NULL);
914 
915 	LOCK(&task->lock);
916 	*t = task->now;
917 	UNLOCK(&task->lock);
918 }
919 
920 /***
921  *** Task Manager.
922  ***/
923 
924 /*
925  * Return ISC_TRUE if the current ready list for the manager, which is
926  * either ready_tasks or the ready_priority_tasks, depending on whether
927  * the manager is currently in normal or privileged execution mode.
928  *
929  * Caller must hold the task manager lock.
930  */
931 static inline isc_boolean_t
empty_readyq(isc__taskmgr_t * manager)932 empty_readyq(isc__taskmgr_t *manager) {
933 	isc__tasklist_t queue;
934 
935 	if (manager->mode == isc_taskmgrmode_normal)
936 		queue = manager->ready_tasks;
937 	else
938 		queue = manager->ready_priority_tasks;
939 
940 	return (ISC_TF(EMPTY(queue)));
941 }
942 
943 /*
944  * Dequeue and return a pointer to the first task on the current ready
945  * list for the manager.
946  * If the task is privileged, dequeue it from the other ready list
947  * as well.
948  *
949  * Caller must hold the task manager lock.
950  */
951 static inline isc__task_t *
pop_readyq(isc__taskmgr_t * manager)952 pop_readyq(isc__taskmgr_t *manager) {
953 	isc__task_t *task;
954 
955 	if (manager->mode == isc_taskmgrmode_normal)
956 		task = HEAD(manager->ready_tasks);
957 	else
958 		task = HEAD(manager->ready_priority_tasks);
959 
960 	if (task != NULL) {
961 		DEQUEUE(manager->ready_tasks, task, ready_link);
962 		if (ISC_LINK_LINKED(task, ready_priority_link))
963 			DEQUEUE(manager->ready_priority_tasks, task,
964 				ready_priority_link);
965 	}
966 
967 	return (task);
968 }
969 
970 /*
971  * Push 'task' onto the ready_tasks queue.  If 'task' has the privilege
972  * flag set, then also push it onto the ready_priority_tasks queue.
973  *
974  * Caller must hold the task manager lock.
975  */
976 static inline void
push_readyq(isc__taskmgr_t * manager,isc__task_t * task)977 push_readyq(isc__taskmgr_t *manager, isc__task_t *task) {
978 	ENQUEUE(manager->ready_tasks, task, ready_link);
979 	if ((task->flags & TASK_F_PRIVILEGED) != 0)
980 		ENQUEUE(manager->ready_priority_tasks, task,
981 			ready_priority_link);
982 	manager->tasks_ready++;
983 }
984 
985 static void
dispatch(isc__taskmgr_t * manager)986 dispatch(isc__taskmgr_t *manager) {
987 	isc__task_t *task;
988 #ifndef USE_WORKER_THREADS
989 	unsigned int total_dispatch_count = 0;
990 	isc__tasklist_t new_ready_tasks;
991 	isc__tasklist_t new_priority_tasks;
992 	unsigned int tasks_ready = 0;
993 #endif /* USE_WORKER_THREADS */
994 
995 	REQUIRE(VALID_MANAGER(manager));
996 
997 	/*
998 	 * Again we're trying to hold the lock for as short a time as possible
999 	 * and to do as little locking and unlocking as possible.
1000 	 *
1001 	 * In both while loops, the appropriate lock must be held before the
1002 	 * while body starts.  Code which acquired the lock at the top of
1003 	 * the loop would be more readable, but would result in a lot of
1004 	 * extra locking.  Compare:
1005 	 *
1006 	 * Straightforward:
1007 	 *
1008 	 *	LOCK();
1009 	 *	...
1010 	 *	UNLOCK();
1011 	 *	while (expression) {
1012 	 *		LOCK();
1013 	 *		...
1014 	 *		UNLOCK();
1015 	 *
1016 	 *	       	Unlocked part here...
1017 	 *
1018 	 *		LOCK();
1019 	 *		...
1020 	 *		UNLOCK();
1021 	 *	}
1022 	 *
1023 	 * Note how if the loop continues we unlock and then immediately lock.
1024 	 * For N iterations of the loop, this code does 2N+1 locks and 2N+1
1025 	 * unlocks.  Also note that the lock is not held when the while
1026 	 * condition is tested, which may or may not be important, depending
1027 	 * on the expression.
1028 	 *
1029 	 * As written:
1030 	 *
1031 	 *	LOCK();
1032 	 *	while (expression) {
1033 	 *		...
1034 	 *		UNLOCK();
1035 	 *
1036 	 *	       	Unlocked part here...
1037 	 *
1038 	 *		LOCK();
1039 	 *		...
1040 	 *	}
1041 	 *	UNLOCK();
1042 	 *
1043 	 * For N iterations of the loop, this code does N+1 locks and N+1
1044 	 * unlocks.  The while expression is always protected by the lock.
1045 	 */
1046 
1047 #ifndef USE_WORKER_THREADS
1048 	ISC_LIST_INIT(new_ready_tasks);
1049 	ISC_LIST_INIT(new_priority_tasks);
1050 #endif
1051 	LOCK(&manager->lock);
1052 
1053 	while (!FINISHED(manager)) {
1054 #ifdef USE_WORKER_THREADS
1055 		/*
1056 		 * For reasons similar to those given in the comment in
1057 		 * isc_task_send() above, it is safe for us to dequeue
1058 		 * the task while only holding the manager lock, and then
1059 		 * change the task to running state while only holding the
1060 		 * task lock.
1061 		 *
1062 		 * If a pause has been requested, don't do any work
1063 		 * until it's been released.
1064 		 */
1065 		while ((empty_readyq(manager) || manager->pause_requested ||
1066 			manager->exclusive_requested) && !FINISHED(manager))
1067 		{
1068 			XTHREADTRACE(isc_msgcat_get(isc_msgcat,
1069 						    ISC_MSGSET_GENERAL,
1070 						    ISC_MSG_WAIT, "wait"));
1071 			WAIT(&manager->work_available, &manager->lock);
1072 			XTHREADTRACE(isc_msgcat_get(isc_msgcat,
1073 						    ISC_MSGSET_TASK,
1074 						    ISC_MSG_AWAKE, "awake"));
1075 		}
1076 #else /* USE_WORKER_THREADS */
1077 		if (total_dispatch_count >= DEFAULT_TASKMGR_QUANTUM ||
1078 		    empty_readyq(manager))
1079 			break;
1080 #endif /* USE_WORKER_THREADS */
1081 		XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_TASK,
1082 					    ISC_MSG_WORKING, "working"));
1083 
1084 		task = pop_readyq(manager);
1085 		if (task != NULL) {
1086 			unsigned int dispatch_count = 0;
1087 			isc_boolean_t done = ISC_FALSE;
1088 			isc_boolean_t requeue = ISC_FALSE;
1089 			isc_boolean_t finished = ISC_FALSE;
1090 			isc_event_t *event;
1091 
1092 			INSIST(VALID_TASK(task));
1093 
1094 			/*
1095 			 * Note we only unlock the manager lock if we actually
1096 			 * have a task to do.  We must reacquire the manager
1097 			 * lock before exiting the 'if (task != NULL)' block.
1098 			 */
1099 			manager->tasks_ready--;
1100 			manager->tasks_running++;
1101 			UNLOCK(&manager->lock);
1102 
1103 			LOCK(&task->lock);
1104 			INSIST(task->state == task_state_ready);
1105 			task->state = task_state_running;
1106 			XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1107 					      ISC_MSG_RUNNING, "running"));
1108 			isc_stdtime_get(&task->now);
1109 			do {
1110 				if (!EMPTY(task->events)) {
1111 					event = HEAD(task->events);
1112 					DEQUEUE(task->events, event, ev_link);
1113 					task->nevents--;
1114 
1115 					/*
1116 					 * Execute the event action.
1117 					 */
1118 					XTRACE(isc_msgcat_get(isc_msgcat,
1119 							    ISC_MSGSET_TASK,
1120 							    ISC_MSG_EXECUTE,
1121 							    "execute action"));
1122 					if (event->ev_action != NULL) {
1123 						UNLOCK(&task->lock);
1124 						(event->ev_action)(
1125 							(isc_task_t *)task,
1126 							event);
1127 						LOCK(&task->lock);
1128 					}
1129 					dispatch_count++;
1130 #ifndef USE_WORKER_THREADS
1131 					total_dispatch_count++;
1132 #endif /* USE_WORKER_THREADS */
1133 				}
1134 
1135 				if (task->references == 0 &&
1136 				    EMPTY(task->events) &&
1137 				    !TASK_SHUTTINGDOWN(task)) {
1138 					isc_boolean_t was_idle;
1139 
1140 					/*
1141 					 * There are no references and no
1142 					 * pending events for this task,
1143 					 * which means it will not become
1144 					 * runnable again via an external
1145 					 * action (such as sending an event
1146 					 * or detaching).
1147 					 *
1148 					 * We initiate shutdown to prevent
1149 					 * it from becoming a zombie.
1150 					 *
1151 					 * We do this here instead of in
1152 					 * the "if EMPTY(task->events)" block
1153 					 * below because:
1154 					 *
1155 					 *	If we post no shutdown events,
1156 					 *	we want the task to finish.
1157 					 *
1158 					 *	If we did post shutdown events,
1159 					 *	will still want the task's
1160 					 *	quantum to be applied.
1161 					 */
1162 					was_idle = task_shutdown(task);
1163 					INSIST(!was_idle);
1164 				}
1165 
1166 				if (EMPTY(task->events)) {
1167 					/*
1168 					 * Nothing else to do for this task
1169 					 * right now.
1170 					 */
1171 					XTRACE(isc_msgcat_get(isc_msgcat,
1172 							      ISC_MSGSET_TASK,
1173 							      ISC_MSG_EMPTY,
1174 							      "empty"));
1175 					if (task->references == 0 &&
1176 					    TASK_SHUTTINGDOWN(task)) {
1177 						/*
1178 						 * The task is done.
1179 						 */
1180 						XTRACE(isc_msgcat_get(
1181 							       isc_msgcat,
1182 							       ISC_MSGSET_TASK,
1183 							       ISC_MSG_DONE,
1184 							       "done"));
1185 						finished = ISC_TRUE;
1186 						task->state = task_state_done;
1187 					} else
1188 						task->state = task_state_idle;
1189 					done = ISC_TRUE;
1190 				} else if (dispatch_count >= task->quantum) {
1191 					/*
1192 					 * Our quantum has expired, but
1193 					 * there is more work to be done.
1194 					 * We'll requeue it to the ready
1195 					 * queue later.
1196 					 *
1197 					 * We don't check quantum until
1198 					 * dispatching at least one event,
1199 					 * so the minimum quantum is one.
1200 					 */
1201 					XTRACE(isc_msgcat_get(isc_msgcat,
1202 							      ISC_MSGSET_TASK,
1203 							      ISC_MSG_QUANTUM,
1204 							      "quantum"));
1205 					task->state = task_state_ready;
1206 					requeue = ISC_TRUE;
1207 					done = ISC_TRUE;
1208 				}
1209 			} while (!done);
1210 			UNLOCK(&task->lock);
1211 
1212 			if (finished)
1213 				task_finished(task);
1214 
1215 			LOCK(&manager->lock);
1216 			manager->tasks_running--;
1217 #ifdef USE_WORKER_THREADS
1218 			if (manager->exclusive_requested &&
1219 			    manager->tasks_running == 1) {
1220 				SIGNAL(&manager->exclusive_granted);
1221 			} else if (manager->pause_requested &&
1222 				   manager->tasks_running == 0) {
1223 				SIGNAL(&manager->paused);
1224 			}
1225 #endif /* USE_WORKER_THREADS */
1226 			if (requeue) {
1227 				/*
1228 				 * We know we're awake, so we don't have
1229 				 * to wakeup any sleeping threads if the
1230 				 * ready queue is empty before we requeue.
1231 				 *
1232 				 * A possible optimization if the queue is
1233 				 * empty is to 'goto' the 'if (task != NULL)'
1234 				 * block, avoiding the ENQUEUE of the task
1235 				 * and the subsequent immediate DEQUEUE
1236 				 * (since it is the only executable task).
1237 				 * We don't do this because then we'd be
1238 				 * skipping the exit_requested check.  The
1239 				 * cost of ENQUEUE is low anyway, especially
1240 				 * when you consider that we'd have to do
1241 				 * an extra EMPTY check to see if we could
1242 				 * do the optimization.  If the ready queue
1243 				 * were usually nonempty, the 'optimization'
1244 				 * might even hurt rather than help.
1245 				 */
1246 #ifdef USE_WORKER_THREADS
1247 				push_readyq(manager, task);
1248 #else
1249 				ENQUEUE(new_ready_tasks, task, ready_link);
1250 				if ((task->flags & TASK_F_PRIVILEGED) != 0)
1251 					ENQUEUE(new_priority_tasks, task,
1252 						ready_priority_link);
1253 				tasks_ready++;
1254 #endif
1255 			}
1256 		}
1257 
1258 #ifdef USE_WORKER_THREADS
1259 		/*
1260 		 * If we are in privileged execution mode and there are no
1261 		 * tasks remaining on the current ready queue, then
1262 		 * we're stuck.  Automatically drop privileges at that
1263 		 * point and continue with the regular ready queue.
1264 		 */
1265 		if (manager->tasks_running == 0 && empty_readyq(manager)) {
1266 			manager->mode = isc_taskmgrmode_normal;
1267 			if (!empty_readyq(manager))
1268 				BROADCAST(&manager->work_available);
1269 		}
1270 #endif
1271 	}
1272 
1273 #ifndef USE_WORKER_THREADS
1274 	ISC_LIST_APPENDLIST(manager->ready_tasks, new_ready_tasks, ready_link);
1275 	ISC_LIST_APPENDLIST(manager->ready_priority_tasks, new_priority_tasks,
1276 			    ready_priority_link);
1277 	manager->tasks_ready += tasks_ready;
1278 	if (empty_readyq(manager))
1279 		manager->mode = isc_taskmgrmode_normal;
1280 #endif
1281 
1282 	UNLOCK(&manager->lock);
1283 }
1284 
1285 #ifdef USE_WORKER_THREADS
1286 static isc_threadresult_t
1287 #ifdef _WIN32
1288 WINAPI
1289 #endif
run(void * uap)1290 run(void *uap) {
1291 	isc__taskmgr_t *manager = uap;
1292 
1293 	XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1294 				    ISC_MSG_STARTING, "starting"));
1295 
1296 	dispatch(manager);
1297 
1298 	XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1299 				    ISC_MSG_EXITING, "exiting"));
1300 
1301 #ifdef OPENSSL_LEAKS
1302 	ERR_remove_state(0);
1303 #endif
1304 
1305 	return ((isc_threadresult_t)0);
1306 }
1307 #endif /* USE_WORKER_THREADS */
1308 
1309 static void
manager_free(isc__taskmgr_t * manager)1310 manager_free(isc__taskmgr_t *manager) {
1311 	isc_mem_t *mctx;
1312 
1313 #ifdef USE_WORKER_THREADS
1314 	(void)isc_condition_destroy(&manager->exclusive_granted);
1315 	(void)isc_condition_destroy(&manager->work_available);
1316 	(void)isc_condition_destroy(&manager->paused);
1317 	isc_mem_free(manager->mctx, manager->threads);
1318 #endif /* USE_WORKER_THREADS */
1319 	DESTROYLOCK(&manager->lock);
1320 	manager->common.impmagic = 0;
1321 	manager->common.magic = 0;
1322 	mctx = manager->mctx;
1323 	isc_mem_put(mctx, manager, sizeof(*manager));
1324 	isc_mem_detach(&mctx);
1325 
1326 #ifdef USE_SHARED_MANAGER
1327 	taskmgr = NULL;
1328 #endif	/* USE_SHARED_MANAGER */
1329 }
1330 
1331 isc_result_t
isc__taskmgr_create(isc_mem_t * mctx,unsigned int workers,unsigned int default_quantum,isc_taskmgr_t ** managerp)1332 isc__taskmgr_create(isc_mem_t *mctx, unsigned int workers,
1333 		    unsigned int default_quantum, isc_taskmgr_t **managerp)
1334 {
1335 	isc_result_t result;
1336 	unsigned int i, started = 0;
1337 	isc__taskmgr_t *manager;
1338 
1339 	/*
1340 	 * Create a new task manager.
1341 	 */
1342 
1343 	REQUIRE(workers > 0);
1344 	REQUIRE(managerp != NULL && *managerp == NULL);
1345 
1346 #ifndef USE_WORKER_THREADS
1347 	UNUSED(i);
1348 	UNUSED(started);
1349 #endif
1350 
1351 #ifdef USE_SHARED_MANAGER
1352 	if (taskmgr != NULL) {
1353 		if (taskmgr->refs == 0)
1354 			return (ISC_R_SHUTTINGDOWN);
1355 		taskmgr->refs++;
1356 		*managerp = (isc_taskmgr_t *)taskmgr;
1357 		return (ISC_R_SUCCESS);
1358 	}
1359 #endif /* USE_SHARED_MANAGER */
1360 
1361 	manager = isc_mem_get(mctx, sizeof(*manager));
1362 	if (manager == NULL)
1363 		return (ISC_R_NOMEMORY);
1364 	manager->common.methods = &taskmgrmethods;
1365 	manager->common.impmagic = TASK_MANAGER_MAGIC;
1366 	manager->common.magic = ISCAPI_TASKMGR_MAGIC;
1367 	manager->mode = isc_taskmgrmode_normal;
1368 	manager->mctx = NULL;
1369 	result = isc_mutex_init(&manager->lock);
1370 	if (result != ISC_R_SUCCESS)
1371 		goto cleanup_mgr;
1372 
1373 #ifdef USE_WORKER_THREADS
1374 	manager->workers = 0;
1375 	manager->threads = isc_mem_allocate(mctx,
1376 					    workers * sizeof(isc_thread_t));
1377 	if (manager->threads == NULL) {
1378 		result = ISC_R_NOMEMORY;
1379 		goto cleanup_lock;
1380 	}
1381 	if (isc_condition_init(&manager->work_available) != ISC_R_SUCCESS) {
1382 		UNEXPECTED_ERROR(__FILE__, __LINE__,
1383 				 "isc_condition_init() %s",
1384 				 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1385 						ISC_MSG_FAILED, "failed"));
1386 		result = ISC_R_UNEXPECTED;
1387 		goto cleanup_threads;
1388 	}
1389 	if (isc_condition_init(&manager->exclusive_granted) != ISC_R_SUCCESS) {
1390 		UNEXPECTED_ERROR(__FILE__, __LINE__,
1391 				 "isc_condition_init() %s",
1392 				 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1393 						ISC_MSG_FAILED, "failed"));
1394 		result = ISC_R_UNEXPECTED;
1395 		goto cleanup_workavailable;
1396 	}
1397 	if (isc_condition_init(&manager->paused) != ISC_R_SUCCESS) {
1398 		UNEXPECTED_ERROR(__FILE__, __LINE__,
1399 				 "isc_condition_init() %s",
1400 				 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1401 						ISC_MSG_FAILED, "failed"));
1402 		result = ISC_R_UNEXPECTED;
1403 		goto cleanup_exclusivegranted;
1404 	}
1405 #endif /* USE_WORKER_THREADS */
1406 	if (default_quantum == 0)
1407 		default_quantum = DEFAULT_DEFAULT_QUANTUM;
1408 	manager->default_quantum = default_quantum;
1409 	INIT_LIST(manager->tasks);
1410 	INIT_LIST(manager->ready_tasks);
1411 	INIT_LIST(manager->ready_priority_tasks);
1412 	manager->tasks_running = 0;
1413 	manager->tasks_ready = 0;
1414 	manager->exclusive_requested = ISC_FALSE;
1415 	manager->pause_requested = ISC_FALSE;
1416 	manager->exiting = ISC_FALSE;
1417 	manager->excl = NULL;
1418 
1419 	isc_mem_attach(mctx, &manager->mctx);
1420 
1421 #ifdef USE_WORKER_THREADS
1422 	LOCK(&manager->lock);
1423 	/*
1424 	 * Start workers.
1425 	 */
1426 	for (i = 0; i < workers; i++) {
1427 		if (isc_thread_create(run, manager,
1428 				      &manager->threads[manager->workers]) ==
1429 		    ISC_R_SUCCESS) {
1430 			manager->workers++;
1431 			started++;
1432 		}
1433 	}
1434 	UNLOCK(&manager->lock);
1435 
1436 	if (started == 0) {
1437 		manager_free(manager);
1438 		return (ISC_R_NOTHREADS);
1439 	}
1440 	isc_thread_setconcurrency(workers);
1441 #endif /* USE_WORKER_THREADS */
1442 #ifdef USE_SHARED_MANAGER
1443 	manager->refs = 1;
1444 	taskmgr = manager;
1445 #endif /* USE_SHARED_MANAGER */
1446 
1447 	*managerp = (isc_taskmgr_t *)manager;
1448 
1449 	return (ISC_R_SUCCESS);
1450 
1451 #ifdef USE_WORKER_THREADS
1452  cleanup_exclusivegranted:
1453 	(void)isc_condition_destroy(&manager->exclusive_granted);
1454  cleanup_workavailable:
1455 	(void)isc_condition_destroy(&manager->work_available);
1456  cleanup_threads:
1457 	isc_mem_free(mctx, manager->threads);
1458  cleanup_lock:
1459 	DESTROYLOCK(&manager->lock);
1460 #endif
1461  cleanup_mgr:
1462 	isc_mem_put(mctx, manager, sizeof(*manager));
1463 	return (result);
1464 }
1465 
1466 void
isc__taskmgr_destroy(isc_taskmgr_t ** managerp)1467 isc__taskmgr_destroy(isc_taskmgr_t **managerp) {
1468 	isc__taskmgr_t *manager;
1469 	isc__task_t *task;
1470 	unsigned int i;
1471 
1472 	/*
1473 	 * Destroy '*managerp'.
1474 	 */
1475 
1476 	REQUIRE(managerp != NULL);
1477 	manager = (isc__taskmgr_t *)*managerp;
1478 	REQUIRE(VALID_MANAGER(manager));
1479 
1480 #ifndef USE_WORKER_THREADS
1481 	UNUSED(i);
1482 #endif /* USE_WORKER_THREADS */
1483 
1484 #ifdef USE_SHARED_MANAGER
1485 	manager->refs--;
1486 	if (manager->refs > 0) {
1487 		*managerp = NULL;
1488 		return;
1489 	}
1490 #endif
1491 
1492 	XTHREADTRACE("isc_taskmgr_destroy");
1493 	/*
1494 	 * Only one non-worker thread may ever call this routine.
1495 	 * If a worker thread wants to initiate shutdown of the
1496 	 * task manager, it should ask some non-worker thread to call
1497 	 * isc_taskmgr_destroy(), e.g. by signalling a condition variable
1498 	 * that the startup thread is sleeping on.
1499 	 */
1500 
1501 	/*
1502 	 * Detach the exclusive task before acquiring the manager lock
1503 	 */
1504 	if (manager->excl != NULL)
1505 		isc__task_detach((isc_task_t **) &manager->excl);
1506 
1507 	/*
1508 	 * Unlike elsewhere, we're going to hold this lock a long time.
1509 	 * We need to do so, because otherwise the list of tasks could
1510 	 * change while we were traversing it.
1511 	 *
1512 	 * This is also the only function where we will hold both the
1513 	 * task manager lock and a task lock at the same time.
1514 	 */
1515 
1516 	LOCK(&manager->lock);
1517 
1518 	/*
1519 	 * Make sure we only get called once.
1520 	 */
1521 	INSIST(!manager->exiting);
1522 	manager->exiting = ISC_TRUE;
1523 
1524 	/*
1525 	 * If privileged mode was on, turn it off.
1526 	 */
1527 	manager->mode = isc_taskmgrmode_normal;
1528 
1529 	/*
1530 	 * Post shutdown event(s) to every task (if they haven't already been
1531 	 * posted).
1532 	 */
1533 	for (task = HEAD(manager->tasks);
1534 	     task != NULL;
1535 	     task = NEXT(task, link)) {
1536 		LOCK(&task->lock);
1537 		if (task_shutdown(task))
1538 			push_readyq(manager, task);
1539 		UNLOCK(&task->lock);
1540 	}
1541 #ifdef USE_WORKER_THREADS
1542 	/*
1543 	 * Wake up any sleeping workers.  This ensures we get work done if
1544 	 * there's work left to do, and if there are already no tasks left
1545 	 * it will cause the workers to see manager->exiting.
1546 	 */
1547 	BROADCAST(&manager->work_available);
1548 	UNLOCK(&manager->lock);
1549 
1550 	/*
1551 	 * Wait for all the worker threads to exit.
1552 	 */
1553 	for (i = 0; i < manager->workers; i++)
1554 		(void)isc_thread_join(manager->threads[i], NULL);
1555 #else /* USE_WORKER_THREADS */
1556 	/*
1557 	 * Dispatch the shutdown events.
1558 	 */
1559 	UNLOCK(&manager->lock);
1560 	while (isc__taskmgr_ready((isc_taskmgr_t *)manager))
1561 		(void)isc__taskmgr_dispatch((isc_taskmgr_t *)manager);
1562 	if (!ISC_LIST_EMPTY(manager->tasks))
1563 		isc_mem_printallactive(stderr);
1564 	INSIST(ISC_LIST_EMPTY(manager->tasks));
1565 #ifdef USE_SHARED_MANAGER
1566 	taskmgr = NULL;
1567 #endif
1568 #endif /* USE_WORKER_THREADS */
1569 
1570 	manager_free(manager);
1571 
1572 	*managerp = NULL;
1573 }
1574 
1575 void
isc__taskmgr_setmode(isc_taskmgr_t * manager0,isc_taskmgrmode_t mode)1576 isc__taskmgr_setmode(isc_taskmgr_t *manager0, isc_taskmgrmode_t mode) {
1577 	isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
1578 
1579 	LOCK(&manager->lock);
1580 	manager->mode = mode;
1581 	UNLOCK(&manager->lock);
1582 }
1583 
1584 isc_taskmgrmode_t
isc__taskmgr_mode(isc_taskmgr_t * manager0)1585 isc__taskmgr_mode(isc_taskmgr_t *manager0) {
1586 	isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
1587 	isc_taskmgrmode_t mode;
1588 	LOCK(&manager->lock);
1589 	mode = manager->mode;
1590 	UNLOCK(&manager->lock);
1591 	return (mode);
1592 }
1593 
1594 #ifndef USE_WORKER_THREADS
1595 isc_boolean_t
isc__taskmgr_ready(isc_taskmgr_t * manager0)1596 isc__taskmgr_ready(isc_taskmgr_t *manager0) {
1597 	isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
1598 	isc_boolean_t is_ready;
1599 
1600 #ifdef USE_SHARED_MANAGER
1601 	if (manager == NULL)
1602 		manager = taskmgr;
1603 #endif
1604 	if (manager == NULL)
1605 		return (ISC_FALSE);
1606 
1607 	LOCK(&manager->lock);
1608 	is_ready = !empty_readyq(manager);
1609 	UNLOCK(&manager->lock);
1610 
1611 	return (is_ready);
1612 }
1613 
1614 isc_result_t
isc__taskmgr_dispatch(isc_taskmgr_t * manager0)1615 isc__taskmgr_dispatch(isc_taskmgr_t *manager0) {
1616 	isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
1617 
1618 #ifdef USE_SHARED_MANAGER
1619 	if (manager == NULL)
1620 		manager = taskmgr;
1621 #endif
1622 	if (manager == NULL)
1623 		return (ISC_R_NOTFOUND);
1624 
1625 	dispatch(manager);
1626 
1627 	return (ISC_R_SUCCESS);
1628 }
1629 
1630 #else
1631 void
isc__taskmgr_pause(isc_taskmgr_t * manager0)1632 isc__taskmgr_pause(isc_taskmgr_t *manager0) {
1633 	isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
1634 	LOCK(&manager->lock);
1635 	while (manager->tasks_running > 0) {
1636 		WAIT(&manager->paused, &manager->lock);
1637 	}
1638 	manager->pause_requested = ISC_TRUE;
1639 	UNLOCK(&manager->lock);
1640 }
1641 
1642 void
isc__taskmgr_resume(isc_taskmgr_t * manager0)1643 isc__taskmgr_resume(isc_taskmgr_t *manager0) {
1644 	isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
1645 
1646 	LOCK(&manager->lock);
1647 	if (manager->pause_requested) {
1648 		manager->pause_requested = ISC_FALSE;
1649 		BROADCAST(&manager->work_available);
1650 	}
1651 	UNLOCK(&manager->lock);
1652 }
1653 #endif /* USE_WORKER_THREADS */
1654 
1655 void
isc_taskmgr_setexcltask(isc_taskmgr_t * mgr0,isc_task_t * task0)1656 isc_taskmgr_setexcltask(isc_taskmgr_t *mgr0, isc_task_t *task0) {
1657 	isc__taskmgr_t *mgr = (isc__taskmgr_t *) mgr0;
1658 	isc__task_t *task = (isc__task_t *) task0;
1659 
1660 	REQUIRE(VALID_MANAGER(mgr));
1661 	REQUIRE(VALID_TASK(task));
1662 	if (mgr->excl != NULL)
1663 		isc__task_detach((isc_task_t **) &mgr->excl);
1664 	isc__task_attach(task0, (isc_task_t **) &mgr->excl);
1665 }
1666 
1667 isc_result_t
isc_taskmgr_excltask(isc_taskmgr_t * mgr0,isc_task_t ** taskp)1668 isc_taskmgr_excltask(isc_taskmgr_t *mgr0, isc_task_t **taskp) {
1669 	isc__taskmgr_t *mgr = (isc__taskmgr_t *) mgr0;
1670 
1671 	REQUIRE(VALID_MANAGER(mgr));
1672 	REQUIRE(taskp != NULL && *taskp == NULL);
1673 
1674 	if (mgr->excl == NULL)
1675 		return (ISC_R_NOTFOUND);
1676 
1677 	isc__task_attach((isc_task_t *) mgr->excl, taskp);
1678 	return (ISC_R_SUCCESS);
1679 }
1680 
1681 isc_result_t
isc__task_beginexclusive(isc_task_t * task0)1682 isc__task_beginexclusive(isc_task_t *task0) {
1683 #ifdef USE_WORKER_THREADS
1684 	isc__task_t *task = (isc__task_t *)task0;
1685 	isc__taskmgr_t *manager = task->manager;
1686 
1687 	REQUIRE(task->state == task_state_running);
1688 	/* XXX: Require task == manager->excl? */
1689 
1690 	LOCK(&manager->lock);
1691 	if (manager->exclusive_requested) {
1692 		UNLOCK(&manager->lock);
1693 		return (ISC_R_LOCKBUSY);
1694 	}
1695 	manager->exclusive_requested = ISC_TRUE;
1696 	while (manager->tasks_running > 1) {
1697 		WAIT(&manager->exclusive_granted, &manager->lock);
1698 	}
1699 	UNLOCK(&manager->lock);
1700 #else
1701 	UNUSED(task0);
1702 #endif
1703 	return (ISC_R_SUCCESS);
1704 }
1705 
1706 void
isc__task_endexclusive(isc_task_t * task0)1707 isc__task_endexclusive(isc_task_t *task0) {
1708 #ifdef USE_WORKER_THREADS
1709 	isc__task_t *task = (isc__task_t *)task0;
1710 	isc__taskmgr_t *manager = task->manager;
1711 
1712 	REQUIRE(task->state == task_state_running);
1713 	LOCK(&manager->lock);
1714 	REQUIRE(manager->exclusive_requested);
1715 	manager->exclusive_requested = ISC_FALSE;
1716 	BROADCAST(&manager->work_available);
1717 	UNLOCK(&manager->lock);
1718 #else
1719 	UNUSED(task0);
1720 #endif
1721 }
1722 
1723 void
isc__task_setprivilege(isc_task_t * task0,isc_boolean_t priv)1724 isc__task_setprivilege(isc_task_t *task0, isc_boolean_t priv) {
1725 	isc__task_t *task = (isc__task_t *)task0;
1726 	isc__taskmgr_t *manager = task->manager;
1727 	isc_boolean_t oldpriv;
1728 
1729 	LOCK(&task->lock);
1730 	oldpriv = ISC_TF((task->flags & TASK_F_PRIVILEGED) != 0);
1731 	if (priv)
1732 		task->flags |= TASK_F_PRIVILEGED;
1733 	else
1734 		task->flags &= ~TASK_F_PRIVILEGED;
1735 	UNLOCK(&task->lock);
1736 
1737 	if (priv == oldpriv)
1738 		return;
1739 
1740 	LOCK(&manager->lock);
1741 	if (priv && ISC_LINK_LINKED(task, ready_link))
1742 		ENQUEUE(manager->ready_priority_tasks, task,
1743 			ready_priority_link);
1744 	else if (!priv && ISC_LINK_LINKED(task, ready_priority_link))
1745 		DEQUEUE(manager->ready_priority_tasks, task,
1746 			ready_priority_link);
1747 	UNLOCK(&manager->lock);
1748 }
1749 
1750 isc_boolean_t
isc__task_privilege(isc_task_t * task0)1751 isc__task_privilege(isc_task_t *task0) {
1752 	isc__task_t *task = (isc__task_t *)task0;
1753 	isc_boolean_t priv;
1754 
1755 	LOCK(&task->lock);
1756 	priv = ISC_TF((task->flags & TASK_F_PRIVILEGED) != 0);
1757 	UNLOCK(&task->lock);
1758 	return (priv);
1759 }
1760 
1761 isc_result_t
isc__task_register(void)1762 isc__task_register(void) {
1763 	return (isc_task_register(isc__taskmgr_create));
1764 }
1765 
1766 isc_boolean_t
isc_task_exiting(isc_task_t * t)1767 isc_task_exiting(isc_task_t *t) {
1768 	isc__task_t *task = (isc__task_t *)t;
1769 
1770 	REQUIRE(VALID_TASK(task));
1771 	return (TASK_SHUTTINGDOWN(task));
1772 }
1773 
1774 
1775 #ifdef HAVE_LIBXML2
1776 #define TRY0(a) do { xmlrc = (a); if (xmlrc < 0) goto error; } while(/*CONSTCOND*/0)
1777 int
isc_taskmgr_renderxml(isc_taskmgr_t * mgr0,xmlTextWriterPtr writer)1778 isc_taskmgr_renderxml(isc_taskmgr_t *mgr0, xmlTextWriterPtr writer) {
1779 	isc__taskmgr_t *mgr = (isc__taskmgr_t *)mgr0;
1780 	isc__task_t *task = NULL;
1781 	int xmlrc;
1782 
1783 	LOCK(&mgr->lock);
1784 
1785 	/*
1786 	 * Write out the thread-model, and some details about each depending
1787 	 * on which type is enabled.
1788 	 */
1789 	TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "thread-model"));
1790 #ifdef ISC_PLATFORM_USETHREADS
1791 	TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "type"));
1792 	TRY0(xmlTextWriterWriteString(writer, ISC_XMLCHAR "threaded"));
1793 	TRY0(xmlTextWriterEndElement(writer)); /* type */
1794 
1795 	TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "worker-threads"));
1796 	TRY0(xmlTextWriterWriteFormatString(writer, "%d", mgr->workers));
1797 	TRY0(xmlTextWriterEndElement(writer)); /* worker-threads */
1798 #else /* ISC_PLATFORM_USETHREADS */
1799 	TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "type"));
1800 	TRY0(xmlTextWriterWriteString(writer, ISC_XMLCHAR "non-threaded"));
1801 	TRY0(xmlTextWriterEndElement(writer)); /* type */
1802 
1803 	TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "references"));
1804 	TRY0(xmlTextWriterWriteFormatString(writer, "%d", mgr->refs));
1805 	TRY0(xmlTextWriterEndElement(writer)); /* references */
1806 #endif /* ISC_PLATFORM_USETHREADS */
1807 
1808 	TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "default-quantum"));
1809 	TRY0(xmlTextWriterWriteFormatString(writer, "%d",
1810 					    mgr->default_quantum));
1811 	TRY0(xmlTextWriterEndElement(writer)); /* default-quantum */
1812 
1813 	TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "tasks-running"));
1814 	TRY0(xmlTextWriterWriteFormatString(writer, "%d", mgr->tasks_running));
1815 	TRY0(xmlTextWriterEndElement(writer)); /* tasks-running */
1816 
1817 	TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "tasks-ready"));
1818 	TRY0(xmlTextWriterWriteFormatString(writer, "%d", mgr->tasks_ready));
1819 	TRY0(xmlTextWriterEndElement(writer)); /* tasks-ready */
1820 
1821 	TRY0(xmlTextWriterEndElement(writer)); /* thread-model */
1822 
1823 	TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "tasks"));
1824 	task = ISC_LIST_HEAD(mgr->tasks);
1825 	while (task != NULL) {
1826 		LOCK(&task->lock);
1827 		TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "task"));
1828 
1829 		if (task->name[0] != 0) {
1830 			TRY0(xmlTextWriterStartElement(writer,
1831 						       ISC_XMLCHAR "name"));
1832 			TRY0(xmlTextWriterWriteFormatString(writer, "%s",
1833 						       task->name));
1834 			TRY0(xmlTextWriterEndElement(writer)); /* name */
1835 		}
1836 
1837 		TRY0(xmlTextWriterStartElement(writer,
1838 					       ISC_XMLCHAR "references"));
1839 		TRY0(xmlTextWriterWriteFormatString(writer, "%d",
1840 						    task->references));
1841 		TRY0(xmlTextWriterEndElement(writer)); /* references */
1842 
1843 		TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "id"));
1844 		TRY0(xmlTextWriterWriteFormatString(writer, "%p", task));
1845 		TRY0(xmlTextWriterEndElement(writer)); /* id */
1846 
1847 		TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "state"));
1848 		TRY0(xmlTextWriterWriteFormatString(writer, "%s",
1849 					       statenames[task->state]));
1850 		TRY0(xmlTextWriterEndElement(writer)); /* state */
1851 
1852 		TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "quantum"));
1853 		TRY0(xmlTextWriterWriteFormatString(writer, "%d",
1854 						    task->quantum));
1855 		TRY0(xmlTextWriterEndElement(writer)); /* quantum */
1856 
1857 		TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "events"));
1858 		TRY0(xmlTextWriterWriteFormatString(writer, "%d",
1859 						    task->nevents));
1860 		TRY0(xmlTextWriterEndElement(writer)); /* events */
1861 
1862 		TRY0(xmlTextWriterEndElement(writer));
1863 
1864 		UNLOCK(&task->lock);
1865 		task = ISC_LIST_NEXT(task, link);
1866 	}
1867 	TRY0(xmlTextWriterEndElement(writer)); /* tasks */
1868 
1869  error:
1870 	if (task != NULL)
1871 		UNLOCK(&task->lock);
1872 	UNLOCK(&mgr->lock);
1873 
1874 	return (xmlrc);
1875 }
1876 #endif /* HAVE_LIBXML2 */
1877 
1878 #ifdef HAVE_JSON
1879 #define CHECKMEM(m) do { \
1880 	if (m == NULL) { \
1881 		result = ISC_R_NOMEMORY;\
1882 		goto error;\
1883 	} \
1884 } while(/*CONSTCOND*/0)
1885 
1886 isc_result_t
isc_taskmgr_renderjson(isc_taskmgr_t * mgr0,json_object * tasks)1887 isc_taskmgr_renderjson(isc_taskmgr_t *mgr0, json_object *tasks) {
1888 	isc_result_t result = ISC_R_SUCCESS;
1889 	isc__taskmgr_t *mgr = (isc__taskmgr_t *)mgr0;
1890 	isc__task_t *task = NULL;
1891 	json_object *obj = NULL, *array = NULL, *taskobj = NULL;
1892 
1893 	LOCK(&mgr->lock);
1894 
1895 	/*
1896 	 * Write out the thread-model, and some details about each depending
1897 	 * on which type is enabled.
1898 	 */
1899 #ifdef ISC_PLATFORM_USETHREADS
1900 	obj = json_object_new_string("threaded");
1901 	CHECKMEM(obj);
1902 	json_object_object_add(tasks, "thread-model", obj);
1903 
1904 	obj = json_object_new_int(mgr->workers);
1905 	CHECKMEM(obj);
1906 	json_object_object_add(tasks, "worker-threads", obj);
1907 #else /* ISC_PLATFORM_USETHREADS */
1908 	obj = json_object_new_string("non-threaded");
1909 	CHECKMEM(obj);
1910 	json_object_object_add(tasks, "thread-model", obj);
1911 
1912 	obj = json_object_new_int(mgr->refs);
1913 	CHECKMEM(obj);
1914 	json_object_object_add(tasks, "references", obj);
1915 #endif /* ISC_PLATFORM_USETHREADS */
1916 
1917 	obj = json_object_new_int(mgr->default_quantum);
1918 	CHECKMEM(obj);
1919 	json_object_object_add(tasks, "default-quantum", obj);
1920 
1921 	obj = json_object_new_int(mgr->tasks_running);
1922 	CHECKMEM(obj);
1923 	json_object_object_add(tasks, "tasks-running", obj);
1924 
1925 	obj = json_object_new_int(mgr->tasks_ready);
1926 	CHECKMEM(obj);
1927 	json_object_object_add(tasks, "tasks-ready", obj);
1928 
1929 	array = json_object_new_array();
1930 	CHECKMEM(array);
1931 
1932 	for (task = ISC_LIST_HEAD(mgr->tasks);
1933 	     task != NULL;
1934 	     task = ISC_LIST_NEXT(task, link))
1935 	{
1936 		char buf[255];
1937 
1938 		LOCK(&task->lock);
1939 
1940 		taskobj = json_object_new_object();
1941 		CHECKMEM(taskobj);
1942 		json_object_array_add(array, taskobj);
1943 
1944 		sprintf(buf, "%p", task);
1945 		obj = json_object_new_string(buf);
1946 		CHECKMEM(obj);
1947 		json_object_object_add(taskobj, "id", obj);
1948 
1949 		if (task->name[0] != 0) {
1950 			obj = json_object_new_string(task->name);
1951 			CHECKMEM(obj);
1952 			json_object_object_add(taskobj, "name", obj);
1953 		}
1954 
1955 		obj = json_object_new_int(task->references);
1956 		CHECKMEM(obj);
1957 		json_object_object_add(taskobj, "references", obj);
1958 
1959 		obj = json_object_new_string(statenames[task->state]);
1960 		CHECKMEM(obj);
1961 		json_object_object_add(taskobj, "state", obj);
1962 
1963 		obj = json_object_new_int(task->quantum);
1964 		CHECKMEM(obj);
1965 		json_object_object_add(taskobj, "quantum", obj);
1966 
1967 		obj = json_object_new_int(task->nevents);
1968 		CHECKMEM(obj);
1969 		json_object_object_add(taskobj, "events", obj);
1970 
1971 		UNLOCK(&task->lock);
1972 	}
1973 
1974 	json_object_object_add(tasks, "tasks", array);
1975 	array = NULL;
1976 	result = ISC_R_SUCCESS;
1977 
1978  error:
1979 	if (array != NULL)
1980 		json_object_put(array);
1981 
1982 	if (task != NULL)
1983 		UNLOCK(&task->lock);
1984 	UNLOCK(&mgr->lock);
1985 
1986 	return (result);
1987 }
1988 #endif
1989 
1990 
1991 static isc_mutex_t createlock;
1992 static isc_once_t once = ISC_ONCE_INIT;
1993 static isc_taskmgrcreatefunc_t taskmgr_createfunc = NULL;
1994 
1995 static void
initialize(void)1996 initialize(void) {
1997 	RUNTIME_CHECK(isc_mutex_init(&createlock) == ISC_R_SUCCESS);
1998 }
1999 
2000 isc_result_t
isc_task_register(isc_taskmgrcreatefunc_t createfunc)2001 isc_task_register(isc_taskmgrcreatefunc_t createfunc) {
2002 	isc_result_t result = ISC_R_SUCCESS;
2003 
2004 	RUNTIME_CHECK(isc_once_do(&once, initialize) == ISC_R_SUCCESS);
2005 
2006 	LOCK(&createlock);
2007 	if (taskmgr_createfunc == NULL)
2008 		taskmgr_createfunc = createfunc;
2009 	else
2010 		result = ISC_R_EXISTS;
2011 	UNLOCK(&createlock);
2012 
2013 	return (result);
2014 }
2015 
2016 isc_result_t
isc_taskmgr_createinctx(isc_mem_t * mctx,isc_appctx_t * actx,unsigned int workers,unsigned int default_quantum,isc_taskmgr_t ** managerp)2017 isc_taskmgr_createinctx(isc_mem_t *mctx, isc_appctx_t *actx,
2018 			unsigned int workers, unsigned int default_quantum,
2019 			isc_taskmgr_t **managerp)
2020 {
2021 	isc_result_t result;
2022 
2023 	LOCK(&createlock);
2024 
2025 	REQUIRE(taskmgr_createfunc != NULL);
2026 	result = (*taskmgr_createfunc)(mctx, workers, default_quantum,
2027 				       managerp);
2028 
2029 	UNLOCK(&createlock);
2030 
2031 	if (result == ISC_R_SUCCESS)
2032 		isc_appctx_settaskmgr(actx, *managerp);
2033 
2034 	return (result);
2035 }
2036 
2037 isc_result_t
isc_taskmgr_create(isc_mem_t * mctx,unsigned int workers,unsigned int default_quantum,isc_taskmgr_t ** managerp)2038 isc_taskmgr_create(isc_mem_t *mctx, unsigned int workers,
2039 		   unsigned int default_quantum, isc_taskmgr_t **managerp)
2040 {
2041 	isc_result_t result;
2042 
2043 	if (isc_bind9)
2044 		return (isc__taskmgr_create(mctx, workers,
2045 					    default_quantum, managerp));
2046 	LOCK(&createlock);
2047 
2048 	REQUIRE(taskmgr_createfunc != NULL);
2049 	result = (*taskmgr_createfunc)(mctx, workers, default_quantum,
2050 				       managerp);
2051 
2052 	UNLOCK(&createlock);
2053 
2054 	return (result);
2055 }
2056 
2057 void
isc_taskmgr_destroy(isc_taskmgr_t ** managerp)2058 isc_taskmgr_destroy(isc_taskmgr_t **managerp) {
2059 	REQUIRE(managerp != NULL && ISCAPI_TASKMGR_VALID(*managerp));
2060 
2061 	if (isc_bind9)
2062 		isc__taskmgr_destroy(managerp);
2063 	else
2064 		(*managerp)->methods->destroy(managerp);
2065 
2066 	ENSURE(*managerp == NULL);
2067 }
2068 
2069 void
isc_taskmgr_setmode(isc_taskmgr_t * manager,isc_taskmgrmode_t mode)2070 isc_taskmgr_setmode(isc_taskmgr_t *manager, isc_taskmgrmode_t mode) {
2071 	REQUIRE(ISCAPI_TASKMGR_VALID(manager));
2072 
2073 	if (isc_bind9)
2074 		isc__taskmgr_setmode(manager, mode);
2075 	else
2076 		manager->methods->setmode(manager, mode);
2077 }
2078 
2079 isc_taskmgrmode_t
isc_taskmgr_mode(isc_taskmgr_t * manager)2080 isc_taskmgr_mode(isc_taskmgr_t *manager) {
2081 	REQUIRE(ISCAPI_TASKMGR_VALID(manager));
2082 
2083 	if (isc_bind9)
2084 		return (isc__taskmgr_mode(manager));
2085 
2086 	return (manager->methods->mode(manager));
2087 }
2088 
2089 isc_result_t
isc_task_create(isc_taskmgr_t * manager,unsigned int quantum,isc_task_t ** taskp)2090 isc_task_create(isc_taskmgr_t *manager, unsigned int quantum,
2091 		isc_task_t **taskp)
2092 {
2093 	REQUIRE(ISCAPI_TASKMGR_VALID(manager));
2094 	REQUIRE(taskp != NULL && *taskp == NULL);
2095 
2096 	if (isc_bind9)
2097 		return (isc__task_create(manager, quantum, taskp));
2098 
2099 	return (manager->methods->taskcreate(manager, quantum, taskp));
2100 }
2101 
2102 void
isc_task_attach(isc_task_t * source,isc_task_t ** targetp)2103 isc_task_attach(isc_task_t *source, isc_task_t **targetp) {
2104 	REQUIRE(ISCAPI_TASK_VALID(source));
2105 	REQUIRE(targetp != NULL && *targetp == NULL);
2106 
2107 	if (isc_bind9)
2108 		isc__task_attach(source, targetp);
2109 	else
2110 		source->methods->attach(source, targetp);
2111 
2112 	ENSURE(*targetp == source);
2113 }
2114 
2115 void
isc_task_detach(isc_task_t ** taskp)2116 isc_task_detach(isc_task_t **taskp) {
2117 	REQUIRE(taskp != NULL && ISCAPI_TASK_VALID(*taskp));
2118 
2119 	if (isc_bind9)
2120 		isc__task_detach(taskp);
2121 	else
2122 		(*taskp)->methods->detach(taskp);
2123 
2124 	ENSURE(*taskp == NULL);
2125 }
2126 
2127 void
isc_task_send(isc_task_t * task,isc_event_t ** eventp)2128 isc_task_send(isc_task_t *task, isc_event_t **eventp) {
2129 	REQUIRE(ISCAPI_TASK_VALID(task));
2130 	REQUIRE(eventp != NULL && *eventp != NULL);
2131 
2132 	if (isc_bind9)
2133 		isc__task_send(task, eventp);
2134 	else {
2135 		task->methods->send(task, eventp);
2136 		ENSURE(*eventp == NULL);
2137 	}
2138 }
2139 
2140 void
isc_task_sendanddetach(isc_task_t ** taskp,isc_event_t ** eventp)2141 isc_task_sendanddetach(isc_task_t **taskp, isc_event_t **eventp) {
2142 	REQUIRE(taskp != NULL && ISCAPI_TASK_VALID(*taskp));
2143 	REQUIRE(eventp != NULL && *eventp != NULL);
2144 
2145 	if (isc_bind9)
2146 		isc__task_sendanddetach(taskp, eventp);
2147 	else {
2148 		(*taskp)->methods->sendanddetach(taskp, eventp);
2149 		ENSURE(*eventp == NULL);
2150 	}
2151 
2152 	ENSURE(*taskp == NULL);
2153 }
2154 
2155 unsigned int
isc_task_unsend(isc_task_t * task,void * sender,isc_eventtype_t type,void * tag,isc_eventlist_t * events)2156 isc_task_unsend(isc_task_t *task, void *sender, isc_eventtype_t type,
2157 		void *tag, isc_eventlist_t *events)
2158 {
2159 	REQUIRE(ISCAPI_TASK_VALID(task));
2160 
2161 	if (isc_bind9)
2162 		return (isc__task_unsend(task, sender, type, tag, events));
2163 
2164 	return (task->methods->unsend(task, sender, type, tag, events));
2165 }
2166 
2167 isc_result_t
isc_task_onshutdown(isc_task_t * task,isc_taskaction_t action,void * arg)2168 isc_task_onshutdown(isc_task_t *task, isc_taskaction_t action, void *arg)
2169 {
2170 	REQUIRE(ISCAPI_TASK_VALID(task));
2171 
2172 	if (isc_bind9)
2173 		return (isc__task_onshutdown(task, action, arg));
2174 
2175 	return (task->methods->onshutdown(task, action, arg));
2176 }
2177 
2178 void
isc_task_shutdown(isc_task_t * task)2179 isc_task_shutdown(isc_task_t *task) {
2180 	REQUIRE(ISCAPI_TASK_VALID(task));
2181 
2182 	if (isc_bind9)
2183 		isc__task_shutdown(task);
2184 	else
2185 		task->methods->shutdown(task);
2186 }
2187 
2188 void
isc_task_destroy(isc_task_t ** taskp)2189 isc_task_destroy(isc_task_t **taskp) {
2190 	if (!isc_bind9)
2191 		return;
2192 
2193 	isc__task_destroy(taskp);
2194 }
2195 
2196 void
isc_task_setname(isc_task_t * task,const char * name,void * tag)2197 isc_task_setname(isc_task_t *task, const char *name, void *tag) {
2198 	REQUIRE(ISCAPI_TASK_VALID(task));
2199 
2200 	if (isc_bind9)
2201 		isc__task_setname(task, name, tag);
2202 	else
2203 		task->methods->setname(task, name, tag);
2204 }
2205 
2206 unsigned int
isc_task_purge(isc_task_t * task,void * sender,isc_eventtype_t type,void * tag)2207 isc_task_purge(isc_task_t *task, void *sender, isc_eventtype_t type, void *tag)
2208 {
2209 	REQUIRE(ISCAPI_TASK_VALID(task));
2210 
2211 	if (isc_bind9)
2212 		return (isc__task_purge(task, sender, type, tag));
2213 
2214 	return (task->methods->purgeevents(task, sender, type, tag));
2215 }
2216 
2217 isc_result_t
isc_task_beginexclusive(isc_task_t * task)2218 isc_task_beginexclusive(isc_task_t *task) {
2219 	REQUIRE(ISCAPI_TASK_VALID(task));
2220 
2221 	if (isc_bind9)
2222 		return (isc__task_beginexclusive(task));
2223 
2224 	return (task->methods->beginexclusive(task));
2225 }
2226 
2227 void
isc_task_endexclusive(isc_task_t * task)2228 isc_task_endexclusive(isc_task_t *task) {
2229 	REQUIRE(ISCAPI_TASK_VALID(task));
2230 
2231 	if (isc_bind9)
2232 		isc__task_endexclusive(task);
2233 	else
2234 		task->methods->endexclusive(task);
2235 }
2236 
2237 void
isc_task_setprivilege(isc_task_t * task,isc_boolean_t priv)2238 isc_task_setprivilege(isc_task_t *task, isc_boolean_t priv) {
2239 	REQUIRE(ISCAPI_TASK_VALID(task));
2240 
2241 	if (isc_bind9)
2242 		isc__task_setprivilege(task, priv);
2243 	else
2244 		task->methods->setprivilege(task, priv);
2245 }
2246 
2247 isc_boolean_t
isc_task_privilege(isc_task_t * task)2248 isc_task_privilege(isc_task_t *task) {
2249 	REQUIRE(ISCAPI_TASK_VALID(task));
2250 
2251 	if (isc_bind9)
2252 		return (isc__task_privilege(task));
2253 
2254 	return (task->methods->privilege(task));
2255 }
2256 
2257 void
isc_task_getcurrenttime(isc_task_t * task,isc_stdtime_t * t)2258 isc_task_getcurrenttime(isc_task_t *task, isc_stdtime_t *t) {
2259 	if (!isc_bind9)
2260 		return;
2261 
2262 	isc__task_getcurrenttime(task, t);
2263 }
2264 
2265 /*%
2266  * This is necessary for libisc's internal timer implementation.  Other
2267  * implementation might skip implementing this.
2268  */
2269 unsigned int
isc_task_purgerange(isc_task_t * task,void * sender,isc_eventtype_t first,isc_eventtype_t last,void * tag)2270 isc_task_purgerange(isc_task_t *task, void *sender, isc_eventtype_t first,
2271 		    isc_eventtype_t last, void *tag)
2272 {
2273 	REQUIRE(ISCAPI_TASK_VALID(task));
2274 
2275 	if (isc_bind9)
2276 		return (isc__task_purgerange(task, sender, first, last, tag));
2277 
2278 	return (task->methods->purgerange(task, sender, first, last, tag));
2279 }
2280