xref: /dragonfly/sys/kern/subr_taskqueue.c (revision 60233e58)
1 /*-
2  * Copyright (c) 2000 Doug Rabson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  *	 $FreeBSD: src/sys/kern/subr_taskqueue.c,v 1.1.2.3 2003/09/10 00:40:39 ken Exp $
27  *	$DragonFly: src/sys/kern/subr_taskqueue.c,v 1.13 2008/06/07 11:44:04 mneumann Exp $
28  */
29 
30 #include <sys/param.h>
31 #include <sys/queue.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/taskqueue.h>
35 #include <sys/interrupt.h>
36 #include <sys/lock.h>
37 #include <sys/malloc.h>
38 #include <sys/kthread.h>
39 #include <sys/thread2.h>
40 
41 MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
42 
43 static STAILQ_HEAD(taskqueue_list, taskqueue) taskqueue_queues;
44 
45 struct taskqueue {
46 	STAILQ_ENTRY(taskqueue)	tq_link;
47 	STAILQ_HEAD(, task)	tq_queue;
48 	const char		*tq_name;
49 	taskqueue_enqueue_fn	tq_enqueue;
50 	void			*tq_context;
51 	int			tq_draining;
52 };
53 
54 struct taskqueue *
55 taskqueue_create(const char *name, int mflags,
56 		 taskqueue_enqueue_fn enqueue, void *context)
57 {
58 	struct taskqueue *queue;
59 	static int once = 1;
60 
61 	queue = kmalloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags);
62 	if (!queue)
63 		return NULL;
64 	STAILQ_INIT(&queue->tq_queue);
65 	queue->tq_name = name;
66 	queue->tq_enqueue = enqueue;
67 	queue->tq_context = context;
68 	queue->tq_draining = 0;
69 
70 	crit_enter();
71 	if (once) {
72 		STAILQ_INIT(&taskqueue_queues);
73 		once = 0;
74 	}
75 	STAILQ_INSERT_TAIL(&taskqueue_queues, queue, tq_link);
76 	crit_exit();
77 
78 	return queue;
79 }
80 
81 void
82 taskqueue_free(struct taskqueue *queue)
83 {
84 	crit_enter();
85 	queue->tq_draining = 1;
86 	crit_exit();
87 
88 	taskqueue_run(queue);
89 
90 	crit_enter();
91 	STAILQ_REMOVE(&taskqueue_queues, queue, taskqueue, tq_link);
92 	crit_exit();
93 
94 	kfree(queue, M_TASKQUEUE);
95 }
96 
97 struct taskqueue *
98 taskqueue_find(const char *name)
99 {
100 	struct taskqueue *queue;
101 
102 	crit_enter();
103 	STAILQ_FOREACH(queue, &taskqueue_queues, tq_link) {
104 		if (!strcmp(queue->tq_name, name)) {
105 			crit_exit();
106 			return queue;
107 		}
108 	}
109 	crit_exit();
110 	return NULL;
111 }
112 
113 /*
114  * NOTE!  If using the per-cpu taskqueues ``taskqueue_thread[mycpuid]'',
115  * be sure NOT TO SHARE the ``task'' between CPUs.  TASKS ARE NOT LOCKED.
116  * So either use a throwaway task which will only be enqueued once, or
117  * use one task per CPU!
118  */
119 int
120 taskqueue_enqueue(struct taskqueue *queue, struct task *task)
121 {
122 	struct task *ins;
123 	struct task *prev;
124 
125 	crit_enter();
126 
127 	/*
128 	 * Don't allow new tasks on a queue which is being freed.
129 	 */
130 	if (queue->tq_draining) {
131 		crit_exit();
132 		return EPIPE;
133 	}
134 
135 	/*
136 	 * Count multiple enqueues.
137 	 */
138 	if (task->ta_pending) {
139 		task->ta_pending++;
140 		crit_exit();
141 		return 0;
142 	}
143 
144 	/*
145 	 * Optimise the case when all tasks have the same priority.
146 	 */
147 	prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
148 	if (!prev || prev->ta_priority >= task->ta_priority) {
149 		STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
150 	} else {
151 		prev = 0;
152 		for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
153 		     prev = ins, ins = STAILQ_NEXT(ins, ta_link))
154 			if (ins->ta_priority < task->ta_priority)
155 				break;
156 
157 		if (prev)
158 			STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
159 		else
160 			STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
161 	}
162 
163 	task->ta_pending = 1;
164 	if (queue->tq_enqueue)
165 		queue->tq_enqueue(queue->tq_context);
166 
167 	crit_exit();
168 
169 	return 0;
170 }
171 
172 void
173 taskqueue_run(struct taskqueue *queue)
174 {
175 	struct task *task;
176 	int pending;
177 
178 	crit_enter();
179 	while (STAILQ_FIRST(&queue->tq_queue)) {
180 		/*
181 		 * Carefully remove the first task from the queue and
182 		 * zero its pending count.
183 		 */
184 		task = STAILQ_FIRST(&queue->tq_queue);
185 		STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
186 		pending = task->ta_pending;
187 		task->ta_pending = 0;
188 		crit_exit();
189 
190 		task->ta_func(task->ta_context, pending);
191 
192 		crit_enter();
193 	}
194 	crit_exit();
195 }
196 
197 static void
198 taskqueue_swi_enqueue(void *context)
199 {
200 	setsofttq();
201 }
202 
203 static void
204 taskqueue_swi_run(void *arg, void *frame)
205 {
206 	taskqueue_run(taskqueue_swi);
207 }
208 
209 TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, 0,
210 	 register_swi(SWI_TQ, taskqueue_swi_run, NULL, "swi_taskq", NULL));
211 
212 static void
213 taskqueue_kthread(void *arg)
214 {
215 	for (;;) {
216 		taskqueue_run(taskqueue_thread[mycpuid]);
217 		crit_enter();
218 		if (STAILQ_EMPTY(&taskqueue_thread[mycpuid]->tq_queue))
219 			tsleep(taskqueue_thread[mycpuid], 0, "tqthr", 0);
220 		crit_exit();
221 	}
222 }
223 
224 static void
225 taskqueue_thread_enqueue(void *context)
226 {
227 	wakeup(taskqueue_thread[mycpuid]);
228 }
229 
230 struct taskqueue *taskqueue_thread[MAXCPU];
231 static struct thread *taskqueue_thread_td[MAXCPU];
232 
233 static void
234 taskqueue_init(void)
235 {
236 	int cpu;
237 
238 	for (cpu = 0; cpu < ncpus; cpu++) {
239 		taskqueue_thread[cpu] = taskqueue_create("thread", M_INTWAIT,
240 		    taskqueue_thread_enqueue, NULL);
241 		lwkt_create(taskqueue_kthread, NULL,
242 		    &taskqueue_thread_td[cpu], NULL,
243 		    0, cpu, "taskqueue %d", cpu);
244 	}
245 }
246 
247 SYSINIT(taskqueueinit, SI_SUB_CONFIGURE, SI_ORDER_SECOND, taskqueue_init, NULL);
248