xref: /openbsd/usr.bin/dig/lib/isc/task.c (revision 1fb015a8)
1 /*
2  * Copyright (C) Internet Systems Consortium, Inc. ("ISC")
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
9  * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
10  * AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
11  * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
12  * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
13  * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
14  * PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 /*! \file
18  * \author Principal Author: Bob Halley
19  */
20 
21 /*
22  * XXXRTH  Need to document the states a task can be in, and the rules
23  * for changing states.
24  */
25 
26 #include <stdlib.h>
27 #include <string.h>
28 #include <time.h>
29 
30 #include <isc/event.h>
31 #include <isc/task.h>
32 #include <isc/util.h>
33 
34 #include "task_p.h"
35 
36 /***
37  *** Types.
38  ***/
39 
40 typedef enum {
41 	task_state_idle, task_state_ready, task_state_running,
42 	task_state_done
43 } task_state_t;
44 
45 struct isc_task {
46 	/* Not locked. */
47 	isc_taskmgr_t *		manager;
48 	/* Locked by task lock. */
49 	task_state_t			state;
50 	unsigned int			references;
51 	isc_eventlist_t			events;
52 	isc_eventlist_t			on_shutdown;
53 	unsigned int			nevents;
54 	unsigned int			quantum;
55 	unsigned int			flags;
56 	time_t			now;
57 	char				name[16];
58 	void *				tag;
59 	/* Locked by task manager lock. */
60 	LINK(isc_task_t)		link;
61 	LINK(isc_task_t)		ready_link;
62 	LINK(isc_task_t)		ready_priority_link;
63 };
64 
65 #define TASK_F_SHUTTINGDOWN		0x01
66 #define TASK_F_PRIVILEGED		0x02
67 
68 #define TASK_SHUTTINGDOWN(t)		(((t)->flags & TASK_F_SHUTTINGDOWN) \
69 					 != 0)
70 
71 typedef ISC_LIST(isc_task_t)	isc_tasklist_t;
72 
73 struct isc_taskmgr {
74 	/* Not locked. */
75 	/* Locked by task manager lock. */
76 	unsigned int			default_quantum;
77 	LIST(isc_task_t)		tasks;
78 	isc_tasklist_t			ready_tasks;
79 	isc_tasklist_t			ready_priority_tasks;
80 	isc_taskmgrmode_t		mode;
81 	unsigned int			tasks_running;
82 	unsigned int			tasks_ready;
83 	int			pause_requested;
84 	int			exclusive_requested;
85 	int			exiting;
86 
87 	/*
88 	 * Multiple threads can read/write 'excl' at the same time, so we need
89 	 * to protect the access.  We can't use 'lock' since isc_task_detach()
90 	 * will try to acquire it.
91 	 */
92 	isc_task_t			*excl;
93 	unsigned int			refs;
94 };
95 
96 #define DEFAULT_TASKMGR_QUANTUM		10
97 #define DEFAULT_DEFAULT_QUANTUM		5
98 #define FINISHED(m)			((m)->exiting && EMPTY((m)->tasks))
99 
100 static isc_taskmgr_t *taskmgr = NULL;
101 
102 static inline int
103 empty_readyq(isc_taskmgr_t *manager);
104 
105 static inline isc_task_t *
106 pop_readyq(isc_taskmgr_t *manager);
107 
108 static inline void
109 push_readyq(isc_taskmgr_t *manager, isc_task_t *task);
110 
111 /***
112  *** Tasks.
113  ***/
114 
115 static void
task_finished(isc_task_t * task)116 task_finished(isc_task_t *task) {
117 	isc_taskmgr_t *manager = task->manager;
118 
119 	REQUIRE(EMPTY(task->events));
120 	REQUIRE(task->nevents == 0);
121 	REQUIRE(EMPTY(task->on_shutdown));
122 	REQUIRE(task->references == 0);
123 	REQUIRE(task->state == task_state_done);
124 
125 	UNLINK(manager->tasks, task, link);
126 
127 	free(task);
128 }
129 
130 isc_result_t
isc_task_create(isc_taskmgr_t * manager,unsigned int quantum,isc_task_t ** taskp)131 isc_task_create(isc_taskmgr_t *manager, unsigned int quantum,
132 		 isc_task_t **taskp)
133 {
134 	isc_task_t *task;
135 	int exiting;
136 
137 	REQUIRE(taskp != NULL && *taskp == NULL);
138 
139 	task = malloc(sizeof(*task));
140 	if (task == NULL)
141 		return (ISC_R_NOMEMORY);
142 	task->manager = manager;
143 	task->state = task_state_idle;
144 	task->references = 1;
145 	INIT_LIST(task->events);
146 	INIT_LIST(task->on_shutdown);
147 	task->nevents = 0;
148 	task->quantum = quantum;
149 	task->flags = 0;
150 	task->now = 0;
151 	memset(task->name, 0, sizeof(task->name));
152 	task->tag = NULL;
153 	INIT_LINK(task, link);
154 	INIT_LINK(task, ready_link);
155 	INIT_LINK(task, ready_priority_link);
156 
157 	exiting = 0;
158 	if (!manager->exiting) {
159 		if (task->quantum == 0)
160 			task->quantum = manager->default_quantum;
161 		APPEND(manager->tasks, task, link);
162 	} else
163 		exiting = 1;
164 
165 	if (exiting) {
166 		free(task);
167 		return (ISC_R_SHUTTINGDOWN);
168 	}
169 
170 	*taskp = (isc_task_t *)task;
171 	return (ISC_R_SUCCESS);
172 }
173 
174 void
isc_task_attach(isc_task_t * source0,isc_task_t ** targetp)175 isc_task_attach(isc_task_t *source0, isc_task_t **targetp) {
176 	isc_task_t *source = (isc_task_t *)source0;
177 
178 	/*
179 	 * Attach *targetp to source.
180 	 */
181 
182 	REQUIRE(targetp != NULL && *targetp == NULL);
183 
184 	source->references++;
185 
186 	*targetp = (isc_task_t *)source;
187 }
188 
189 static inline int
task_shutdown(isc_task_t * task)190 task_shutdown(isc_task_t *task) {
191 	int was_idle = 0;
192 	isc_event_t *event, *prev;
193 
194 	/*
195 	 * Caller must be holding the task's lock.
196 	 */
197 
198 	if (! TASK_SHUTTINGDOWN(task)) {
199 		task->flags |= TASK_F_SHUTTINGDOWN;
200 		if (task->state == task_state_idle) {
201 			INSIST(EMPTY(task->events));
202 			task->state = task_state_ready;
203 			was_idle = 1;
204 		}
205 		INSIST(task->state == task_state_ready ||
206 		       task->state == task_state_running);
207 
208 		/*
209 		 * Note that we post shutdown events LIFO.
210 		 */
211 		for (event = TAIL(task->on_shutdown);
212 		     event != NULL;
213 		     event = prev) {
214 			prev = PREV(event, ev_link);
215 			DEQUEUE(task->on_shutdown, event, ev_link);
216 			ENQUEUE(task->events, event, ev_link);
217 			task->nevents++;
218 		}
219 	}
220 
221 	return (was_idle);
222 }
223 
224 /*
225  * Moves a task onto the appropriate run queue.
226  *
227  * Caller must NOT hold manager lock.
228  */
229 static inline void
task_ready(isc_task_t * task)230 task_ready(isc_task_t *task) {
231 	isc_taskmgr_t *manager = task->manager;
232 
233 	REQUIRE(task->state == task_state_ready);
234 
235 	push_readyq(manager, task);
236 }
237 
238 static inline int
task_detach(isc_task_t * task)239 task_detach(isc_task_t *task) {
240 
241 	/*
242 	 * Caller must be holding the task lock.
243 	 */
244 
245 	REQUIRE(task->references > 0);
246 
247 	task->references--;
248 	if (task->references == 0 && task->state == task_state_idle) {
249 		INSIST(EMPTY(task->events));
250 		/*
251 		 * There are no references to this task, and no
252 		 * pending events.  We could try to optimize and
253 		 * either initiate shutdown or clean up the task,
254 		 * depending on its state, but it's easier to just
255 		 * make the task ready and allow run() or the event
256 		 * loop to deal with shutting down and termination.
257 		 */
258 		task->state = task_state_ready;
259 		return (1);
260 	}
261 
262 	return (0);
263 }
264 
265 void
isc_task_detach(isc_task_t ** taskp)266 isc_task_detach(isc_task_t **taskp) {
267 	isc_task_t *task;
268 	int was_idle;
269 
270 	/*
271 	 * Detach *taskp from its task.
272 	 */
273 
274 	REQUIRE(taskp != NULL);
275 	task = (isc_task_t *)*taskp;
276 
277 	was_idle = task_detach(task);
278 
279 	if (was_idle)
280 		task_ready(task);
281 
282 	*taskp = NULL;
283 }
284 
285 static inline int
task_send(isc_task_t * task,isc_event_t ** eventp)286 task_send(isc_task_t *task, isc_event_t **eventp) {
287 	int was_idle = 0;
288 	isc_event_t *event;
289 
290 	/*
291 	 * Caller must be holding the task lock.
292 	 */
293 
294 	REQUIRE(eventp != NULL);
295 	event = *eventp;
296 	REQUIRE(event != NULL);
297 	REQUIRE(event->ev_type > 0);
298 	REQUIRE(task->state != task_state_done);
299 	REQUIRE(!ISC_LINK_LINKED(event, ev_ratelink));
300 
301 	if (task->state == task_state_idle) {
302 		was_idle = 1;
303 		INSIST(EMPTY(task->events));
304 		task->state = task_state_ready;
305 	}
306 	INSIST(task->state == task_state_ready ||
307 	       task->state == task_state_running);
308 	ENQUEUE(task->events, event, ev_link);
309 	task->nevents++;
310 	*eventp = NULL;
311 
312 	return (was_idle);
313 }
314 
315 void
isc_task_send(isc_task_t * task,isc_event_t ** eventp)316 isc_task_send(isc_task_t *task, isc_event_t **eventp) {
317 	int was_idle;
318 
319 	/*
320 	 * Send '*event' to 'task'.
321 	 */
322 
323 	/*
324 	 * We're trying hard to hold locks for as short a time as possible.
325 	 * We're also trying to hold as few locks as possible.  This is why
326 	 * some processing is deferred until after the lock is released.
327 	 */
328 	was_idle = task_send(task, eventp);
329 
330 	if (was_idle) {
331 		/*
332 		 * We need to add this task to the ready queue.
333 		 *
334 		 * We've waited until now to do it because making a task
335 		 * ready requires locking the manager.  If we tried to do
336 		 * this while holding the task lock, we could deadlock.
337 		 *
338 		 * We've changed the state to ready, so no one else will
339 		 * be trying to add this task to the ready queue.  The
340 		 * only way to leave the ready state is by executing the
341 		 * task.  It thus doesn't matter if events are added,
342 		 * removed, or a shutdown is started in the interval
343 		 * between the time we released the task lock, and the time
344 		 * we add the task to the ready queue.
345 		 */
346 		task_ready(task);
347 	}
348 }
349 
350 void
isc_task_sendanddetach(isc_task_t ** taskp,isc_event_t ** eventp)351 isc_task_sendanddetach(isc_task_t **taskp, isc_event_t **eventp) {
352 	int idle1, idle2;
353 	isc_task_t *task;
354 
355 	/*
356 	 * Send '*event' to '*taskp' and then detach '*taskp' from its
357 	 * task.
358 	 */
359 
360 	REQUIRE(taskp != NULL);
361 	task = (isc_task_t *)*taskp;
362 
363 	idle1 = task_send(task, eventp);
364 	idle2 = task_detach(task);
365 
366 	/*
367 	 * If idle1, then idle2 shouldn't be true as well since we're holding
368 	 * the task lock, and thus the task cannot switch from ready back to
369 	 * idle.
370 	 */
371 	INSIST(!(idle1 && idle2));
372 
373 	if (idle1 || idle2)
374 		task_ready(task);
375 
376 	*taskp = NULL;
377 }
378 
379 #define PURGE_OK(event)	(((event)->ev_attributes & ISC_EVENTATTR_NOPURGE) == 0)
380 
381 static unsigned int
dequeue_events(isc_task_t * task,void * sender,isc_eventtype_t first,isc_eventtype_t last,void * tag,isc_eventlist_t * events,int purging)382 dequeue_events(isc_task_t *task, void *sender, isc_eventtype_t first,
383 	       isc_eventtype_t last, void *tag,
384 	       isc_eventlist_t *events, int purging)
385 {
386 	isc_event_t *event, *next_event;
387 	unsigned int count = 0;
388 
389 	REQUIRE(last >= first);
390 
391 	/*
392 	 * Events matching 'sender', whose type is >= first and <= last, and
393 	 * whose tag is 'tag' will be dequeued.  If 'purging', matching events
394 	 * which are marked as unpurgable will not be dequeued.
395 	 *
396 	 * sender == NULL means "any sender", and tag == NULL means "any tag".
397 	 */
398 
399 	for (event = HEAD(task->events); event != NULL; event = next_event) {
400 		next_event = NEXT(event, ev_link);
401 		if (event->ev_type >= first && event->ev_type <= last &&
402 		    (sender == NULL || event->ev_sender == sender) &&
403 		    (tag == NULL || event->ev_tag == tag) &&
404 		    (!purging || PURGE_OK(event))) {
405 			DEQUEUE(task->events, event, ev_link);
406 			task->nevents--;
407 			ENQUEUE(*events, event, ev_link);
408 			count++;
409 		}
410 	}
411 
412 	return (count);
413 }
414 
415 unsigned int
isc_task_purgerange(isc_task_t * task,void * sender,isc_eventtype_t first,isc_eventtype_t last,void * tag)416 isc_task_purgerange(isc_task_t *task, void *sender, isc_eventtype_t first,
417 		     isc_eventtype_t last, void *tag)
418 {
419 	unsigned int count;
420 	isc_eventlist_t events;
421 	isc_event_t *event, *next_event;
422 
423 	/*
424 	 * Purge events from a task's event queue.
425 	 */
426 
427 	ISC_LIST_INIT(events);
428 
429 	count = dequeue_events(task, sender, first, last, tag, &events,
430 			       1);
431 
432 	for (event = HEAD(events); event != NULL; event = next_event) {
433 		next_event = NEXT(event, ev_link);
434 		ISC_LIST_UNLINK(events, event, ev_link);
435 		isc_event_free(&event);
436 	}
437 
438 	/*
439 	 * Note that purging never changes the state of the task.
440 	 */
441 
442 	return (count);
443 }
444 
445 void
isc_task_setname(isc_task_t * task,const char * name,void * tag)446 isc_task_setname(isc_task_t *task, const char *name, void *tag) {
447 	/*
448 	 * Name 'task'.
449 	 */
450 
451 	strlcpy(task->name, name, sizeof(task->name));
452 	task->tag = tag;
453 }
454 
455 /***
456  *** Task Manager.
457  ***/
458 
459 /*
460  * Return 1 if the current ready list for the manager, which is
461  * either ready_tasks or the ready_priority_tasks, depending on whether
462  * the manager is currently in normal or privileged execution mode.
463  *
464  * Caller must hold the task manager lock.
465  */
466 static inline int
empty_readyq(isc_taskmgr_t * manager)467 empty_readyq(isc_taskmgr_t *manager) {
468 	isc_tasklist_t queue;
469 
470 	if (manager->mode == isc_taskmgrmode_normal)
471 		queue = manager->ready_tasks;
472 	else
473 		queue = manager->ready_priority_tasks;
474 
475 	return (EMPTY(queue));
476 }
477 
478 /*
479  * Dequeue and return a pointer to the first task on the current ready
480  * list for the manager.
481  * If the task is privileged, dequeue it from the other ready list
482  * as well.
483  *
484  * Caller must hold the task manager lock.
485  */
486 static inline isc_task_t *
pop_readyq(isc_taskmgr_t * manager)487 pop_readyq(isc_taskmgr_t *manager) {
488 	isc_task_t *task;
489 
490 	if (manager->mode == isc_taskmgrmode_normal)
491 		task = HEAD(manager->ready_tasks);
492 	else
493 		task = HEAD(manager->ready_priority_tasks);
494 
495 	if (task != NULL) {
496 		DEQUEUE(manager->ready_tasks, task, ready_link);
497 		if (ISC_LINK_LINKED(task, ready_priority_link))
498 			DEQUEUE(manager->ready_priority_tasks, task,
499 				ready_priority_link);
500 	}
501 
502 	return (task);
503 }
504 
505 /*
506  * Push 'task' onto the ready_tasks queue.  If 'task' has the privilege
507  * flag set, then also push it onto the ready_priority_tasks queue.
508  *
509  * Caller must hold the task manager lock.
510  */
511 static inline void
push_readyq(isc_taskmgr_t * manager,isc_task_t * task)512 push_readyq(isc_taskmgr_t *manager, isc_task_t *task) {
513 	ENQUEUE(manager->ready_tasks, task, ready_link);
514 	if ((task->flags & TASK_F_PRIVILEGED) != 0)
515 		ENQUEUE(manager->ready_priority_tasks, task,
516 			ready_priority_link);
517 	manager->tasks_ready++;
518 }
519 
520 static void
dispatch(isc_taskmgr_t * manager)521 dispatch(isc_taskmgr_t *manager) {
522 	isc_task_t *task;
523 	unsigned int total_dispatch_count = 0;
524 	isc_tasklist_t new_ready_tasks;
525 	isc_tasklist_t new_priority_tasks;
526 	unsigned int tasks_ready = 0;
527 
528 	ISC_LIST_INIT(new_ready_tasks);
529 	ISC_LIST_INIT(new_priority_tasks);
530 
531 	while (!FINISHED(manager)) {
532 		if (total_dispatch_count >= DEFAULT_TASKMGR_QUANTUM ||
533 		    empty_readyq(manager))
534 			break;
535 
536 		task = pop_readyq(manager);
537 		if (task != NULL) {
538 			unsigned int dispatch_count = 0;
539 			int done = 0;
540 			int requeue = 0;
541 			int finished = 0;
542 			isc_event_t *event;
543 
544 			/*
545 			 * Note we only unlock the manager lock if we actually
546 			 * have a task to do.  We must reacquire the manager
547 			 * lock before exiting the 'if (task != NULL)' block.
548 			 */
549 			manager->tasks_ready--;
550 			manager->tasks_running++;
551 
552 			INSIST(task->state == task_state_ready);
553 			task->state = task_state_running;
554 			time(&task->now);
555 			do {
556 				if (!EMPTY(task->events)) {
557 					event = HEAD(task->events);
558 					DEQUEUE(task->events, event, ev_link);
559 					task->nevents--;
560 
561 					/*
562 					 * Execute the event action.
563 					 */
564 					if (event->ev_action != NULL) {
565 						(event->ev_action)(
566 							(isc_task_t *)task,
567 							event);
568 					}
569 					dispatch_count++;
570 					total_dispatch_count++;
571 				}
572 
573 				if (task->references == 0 &&
574 				    EMPTY(task->events) &&
575 				    !TASK_SHUTTINGDOWN(task)) {
576 					int was_idle;
577 
578 					/*
579 					 * There are no references and no
580 					 * pending events for this task,
581 					 * which means it will not become
582 					 * runnable again via an external
583 					 * action (such as sending an event
584 					 * or detaching).
585 					 *
586 					 * We initiate shutdown to prevent
587 					 * it from becoming a zombie.
588 					 *
589 					 * We do this here instead of in
590 					 * the "if EMPTY(task->events)" block
591 					 * below because:
592 					 *
593 					 *	If we post no shutdown events,
594 					 *	we want the task to finish.
595 					 *
596 					 *	If we did post shutdown events,
597 					 *	will still want the task's
598 					 *	quantum to be applied.
599 					 */
600 					was_idle = task_shutdown(task);
601 					INSIST(!was_idle);
602 				}
603 
604 				if (EMPTY(task->events)) {
605 					/*
606 					 * Nothing else to do for this task
607 					 * right now.
608 					 */
609 					if (task->references == 0 &&
610 					    TASK_SHUTTINGDOWN(task)) {
611 						/*
612 						 * The task is done.
613 						 */
614 						finished = 1;
615 						task->state = task_state_done;
616 					} else
617 						task->state = task_state_idle;
618 					done = 1;
619 				} else if (dispatch_count >= task->quantum) {
620 					/*
621 					 * Our quantum has expired, but
622 					 * there is more work to be done.
623 					 * We'll requeue it to the ready
624 					 * queue later.
625 					 *
626 					 * We don't check quantum until
627 					 * dispatching at least one event,
628 					 * so the minimum quantum is one.
629 					 */
630 					task->state = task_state_ready;
631 					requeue = 1;
632 					done = 1;
633 				}
634 			} while (!done);
635 
636 			if (finished)
637 				task_finished(task);
638 
639 			manager->tasks_running--;
640 			if (requeue) {
641 				/*
642 				 * We know we're awake, so we don't have
643 				 * to wakeup any sleeping threads if the
644 				 * ready queue is empty before we requeue.
645 				 *
646 				 * A possible optimization if the queue is
647 				 * empty is to 'goto' the 'if (task != NULL)'
648 				 * block, avoiding the ENQUEUE of the task
649 				 * and the subsequent immediate DEQUEUE
650 				 * (since it is the only executable task).
651 				 * We don't do this because then we'd be
652 				 * skipping the exit_requested check.  The
653 				 * cost of ENQUEUE is low anyway, especially
654 				 * when you consider that we'd have to do
655 				 * an extra EMPTY check to see if we could
656 				 * do the optimization.  If the ready queue
657 				 * were usually nonempty, the 'optimization'
658 				 * might even hurt rather than help.
659 				 */
660 				ENQUEUE(new_ready_tasks, task, ready_link);
661 				if ((task->flags & TASK_F_PRIVILEGED) != 0)
662 					ENQUEUE(new_priority_tasks, task,
663 						ready_priority_link);
664 				tasks_ready++;
665 			}
666 		}
667 
668 	}
669 
670 	ISC_LIST_APPENDLIST(manager->ready_tasks, new_ready_tasks, ready_link);
671 	ISC_LIST_APPENDLIST(manager->ready_priority_tasks, new_priority_tasks,
672 			    ready_priority_link);
673 	manager->tasks_ready += tasks_ready;
674 	if (empty_readyq(manager))
675 		manager->mode = isc_taskmgrmode_normal;
676 
677 }
678 
679 static void
manager_free(isc_taskmgr_t * manager)680 manager_free(isc_taskmgr_t *manager) {
681 	free(manager);
682 	taskmgr = NULL;
683 }
684 
685 isc_result_t
isc_taskmgr_create(unsigned int workers,unsigned int default_quantum,isc_taskmgr_t ** managerp)686 isc_taskmgr_create(unsigned int workers,
687 		    unsigned int default_quantum, isc_taskmgr_t **managerp)
688 {
689 	unsigned int i, started = 0;
690 	isc_taskmgr_t *manager;
691 
692 	/*
693 	 * Create a new task manager.
694 	 */
695 
696 	REQUIRE(workers > 0);
697 	REQUIRE(managerp != NULL && *managerp == NULL);
698 
699 	UNUSED(i);
700 	UNUSED(started);
701 
702 	if (taskmgr != NULL) {
703 		if (taskmgr->refs == 0)
704 			return (ISC_R_SHUTTINGDOWN);
705 		taskmgr->refs++;
706 		*managerp = (isc_taskmgr_t *)taskmgr;
707 		return (ISC_R_SUCCESS);
708 	}
709 
710 	manager = malloc(sizeof(*manager));
711 	if (manager == NULL)
712 		return (ISC_R_NOMEMORY);
713 	manager->mode = isc_taskmgrmode_normal;
714 
715 	if (default_quantum == 0)
716 		default_quantum = DEFAULT_DEFAULT_QUANTUM;
717 	manager->default_quantum = default_quantum;
718 	INIT_LIST(manager->tasks);
719 	INIT_LIST(manager->ready_tasks);
720 	INIT_LIST(manager->ready_priority_tasks);
721 	manager->tasks_running = 0;
722 	manager->tasks_ready = 0;
723 	manager->exclusive_requested = 0;
724 	manager->pause_requested = 0;
725 	manager->exiting = 0;
726 	manager->excl = NULL;
727 
728 	manager->refs = 1;
729 	taskmgr = manager;
730 
731 	*managerp = (isc_taskmgr_t *)manager;
732 
733 	return (ISC_R_SUCCESS);
734 }
735 
736 void
isc_taskmgr_destroy(isc_taskmgr_t ** managerp)737 isc_taskmgr_destroy(isc_taskmgr_t **managerp) {
738 	isc_taskmgr_t *manager;
739 	isc_task_t *task;
740 	unsigned int i;
741 
742 	/*
743 	 * Destroy '*managerp'.
744 	 */
745 
746 	REQUIRE(managerp != NULL);
747 	manager = (isc_taskmgr_t *)*managerp;
748 
749 	UNUSED(i);
750 
751 	manager->refs--;
752 	if (manager->refs > 0) {
753 		*managerp = NULL;
754 		return;
755 	}
756 
757 	/*
758 	 * Only one non-worker thread may ever call this routine.
759 	 * If a worker thread wants to initiate shutdown of the
760 	 * task manager, it should ask some non-worker thread to call
761 	 * isc_taskmgr_destroy(), e.g. by signalling a condition variable
762 	 * that the startup thread is sleeping on.
763 	 */
764 
765 	/*
766 	 * Detach the exclusive task before acquiring the manager lock
767 	 */
768 	if (manager->excl != NULL)
769 		isc_task_detach((isc_task_t **) &manager->excl);
770 
771 	/*
772 	 * Make sure we only get called once.
773 	 */
774 	INSIST(!manager->exiting);
775 	manager->exiting = 1;
776 
777 	/*
778 	 * If privileged mode was on, turn it off.
779 	 */
780 	manager->mode = isc_taskmgrmode_normal;
781 
782 	/*
783 	 * Post shutdown event(s) to every task (if they haven't already been
784 	 * posted).
785 	 */
786 	for (task = HEAD(manager->tasks);
787 	     task != NULL;
788 	     task = NEXT(task, link)) {
789 		if (task_shutdown(task))
790 			push_readyq(manager, task);
791 	}
792 	/*
793 	 * Dispatch the shutdown events.
794 	 */
795 	while (isc_taskmgr_ready((isc_taskmgr_t *)manager))
796 		(void)isc_taskmgr_dispatch((isc_taskmgr_t *)manager);
797 	INSIST(ISC_LIST_EMPTY(manager->tasks));
798 	taskmgr = NULL;
799 
800 	manager_free(manager);
801 
802 	*managerp = NULL;
803 }
804 
805 int
isc_taskmgr_ready(isc_taskmgr_t * manager)806 isc_taskmgr_ready(isc_taskmgr_t *manager) {
807 	int is_ready;
808 
809 	if (manager == NULL)
810 		manager = taskmgr;
811 	if (manager == NULL)
812 		return (0);
813 
814 	is_ready = !empty_readyq(manager);
815 
816 	return (is_ready);
817 }
818 
819 isc_result_t
isc_taskmgr_dispatch(isc_taskmgr_t * manager)820 isc_taskmgr_dispatch(isc_taskmgr_t *manager) {
821 	if (manager == NULL)
822 		manager = taskmgr;
823 	if (manager == NULL)
824 		return (ISC_R_NOTFOUND);
825 
826 	dispatch(manager);
827 
828 	return (ISC_R_SUCCESS);
829 }
830