xref: /dragonfly/sys/kern/subr_taskqueue.c (revision 49781055)
1 /*-
2  * Copyright (c) 2000 Doug Rabson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  *	 $FreeBSD: src/sys/kern/subr_taskqueue.c,v 1.1.2.3 2003/09/10 00:40:39 ken Exp $
27  *	$DragonFly: src/sys/kern/subr_taskqueue.c,v 1.9 2005/10/13 00:02:22 dillon Exp $
28  */
29 
30 #include <sys/param.h>
31 #include <sys/queue.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/taskqueue.h>
35 #include <sys/interrupt.h>
36 #include <sys/lock.h>
37 #include <sys/malloc.h>
38 #include <sys/kthread.h>
39 #include <sys/thread2.h>
40 
41 #include <machine/ipl.h>
42 
43 MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
44 
45 static STAILQ_HEAD(taskqueue_list, taskqueue) taskqueue_queues;
46 
47 struct taskqueue {
48 	STAILQ_ENTRY(taskqueue)	tq_link;
49 	STAILQ_HEAD(, task)	tq_queue;
50 	const char		*tq_name;
51 	taskqueue_enqueue_fn	tq_enqueue;
52 	void			*tq_context;
53 	int			tq_draining;
54 };
55 
56 struct taskqueue *
57 taskqueue_create(const char *name, int mflags,
58 		 taskqueue_enqueue_fn enqueue, void *context)
59 {
60 	struct taskqueue *queue;
61 	static int once = 1;
62 
63 	queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags);
64 	if (!queue)
65 		return 0;
66 	STAILQ_INIT(&queue->tq_queue);
67 	queue->tq_name = name;
68 	queue->tq_enqueue = enqueue;
69 	queue->tq_context = context;
70 	queue->tq_draining = 0;
71 
72 	crit_enter();
73 	if (once) {
74 		STAILQ_INIT(&taskqueue_queues);
75 		once = 0;
76 	}
77 	STAILQ_INSERT_TAIL(&taskqueue_queues, queue, tq_link);
78 	crit_exit();
79 
80 	return queue;
81 }
82 
83 void
84 taskqueue_free(struct taskqueue *queue)
85 {
86 	crit_enter();
87 	queue->tq_draining = 1;
88 	crit_exit();
89 
90 	taskqueue_run(queue);
91 
92 	crit_enter();
93 	STAILQ_REMOVE(&taskqueue_queues, queue, taskqueue, tq_link);
94 	crit_exit();
95 
96 	free(queue, M_TASKQUEUE);
97 }
98 
99 struct taskqueue *
100 taskqueue_find(const char *name)
101 {
102 	struct taskqueue *queue;
103 
104 	crit_enter();
105 	STAILQ_FOREACH(queue, &taskqueue_queues, tq_link) {
106 		if (!strcmp(queue->tq_name, name)) {
107 			crit_exit();
108 			return queue;
109 		}
110 	}
111 	crit_exit();
112 	return 0;
113 }
114 
115 int
116 taskqueue_enqueue(struct taskqueue *queue, struct task *task)
117 {
118 	struct task *ins;
119 	struct task *prev;
120 
121 	crit_enter();
122 
123 	/*
124 	 * Don't allow new tasks on a queue which is being freed.
125 	 */
126 	if (queue->tq_draining) {
127 		crit_exit();
128 		return EPIPE;
129 	}
130 
131 	/*
132 	 * Count multiple enqueues.
133 	 */
134 	if (task->ta_pending) {
135 		task->ta_pending++;
136 		crit_exit();
137 		return 0;
138 	}
139 
140 	/*
141 	 * Optimise the case when all tasks have the same priority.
142 	 */
143 	prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
144 	if (!prev || prev->ta_priority >= task->ta_priority) {
145 		STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
146 	} else {
147 		prev = 0;
148 		for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
149 		     prev = ins, ins = STAILQ_NEXT(ins, ta_link))
150 			if (ins->ta_priority < task->ta_priority)
151 				break;
152 
153 		if (prev)
154 			STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
155 		else
156 			STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
157 	}
158 
159 	task->ta_pending = 1;
160 	if (queue->tq_enqueue)
161 		queue->tq_enqueue(queue->tq_context);
162 
163 	crit_exit();
164 
165 	return 0;
166 }
167 
168 void
169 taskqueue_run(struct taskqueue *queue)
170 {
171 	struct task *task;
172 	int pending;
173 
174 	crit_enter();
175 	while (STAILQ_FIRST(&queue->tq_queue)) {
176 		/*
177 		 * Carefully remove the first task from the queue and
178 		 * zero its pending count.
179 		 */
180 		task = STAILQ_FIRST(&queue->tq_queue);
181 		STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
182 		pending = task->ta_pending;
183 		task->ta_pending = 0;
184 		crit_exit();
185 
186 		task->ta_func(task->ta_context, pending);
187 
188 		crit_enter();
189 	}
190 	crit_exit();
191 }
192 
193 static void
194 taskqueue_swi_enqueue(void *context)
195 {
196 	setsofttq();
197 }
198 
199 static void
200 taskqueue_swi_run(void *arg, void *frame)
201 {
202 	taskqueue_run(taskqueue_swi);
203 }
204 
205 TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, 0,
206 	 register_swi(SWI_TQ, taskqueue_swi_run, NULL, "swi_taskq", NULL));
207 
208 static void
209 taskqueue_kthread(void *arg)
210 {
211 	for (;;) {
212 		taskqueue_run(taskqueue_thread[mycpuid]);
213 		crit_enter();
214 		if (STAILQ_EMPTY(&taskqueue_thread[mycpuid]->tq_queue))
215 			tsleep(taskqueue_thread[mycpuid], 0, "tqthr", 0);
216 		crit_exit();
217 	}
218 }
219 
220 static void
221 taskqueue_thread_enqueue(void *context)
222 {
223 	wakeup(taskqueue_thread[mycpuid]);
224 }
225 
226 struct taskqueue *taskqueue_thread[MAXCPU];
227 static struct thread *taskqueue_thread_td[MAXCPU];
228 
229 static void
230 taskqueue_init(void)
231 {
232 	int cpu;
233 
234 	for (cpu = 0; cpu < ncpus; cpu++) {
235 		taskqueue_thread[cpu] = taskqueue_create("thread", M_INTWAIT,
236 		    taskqueue_thread_enqueue, NULL);
237 		kthread_create(taskqueue_kthread, NULL,
238 		    &taskqueue_thread_td[cpu], "taskqueue");
239 	}
240 }
241 
242 SYSINIT(taskqueueinit, SI_SUB_CONFIGURE, SI_ORDER_SECOND, taskqueue_init, NULL);
243