xref: /dragonfly/sys/kern/subr_taskqueue.c (revision cae2835b)
1 /*-
2  * Copyright (c) 2000 Doug Rabson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: src/sys/kern/subr_taskqueue.c,v 1.69 2012/08/28 13:35:37 jhb Exp $"
27  */
28 
29 #include <sys/param.h>
30 #include <sys/queue.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/taskqueue.h>
34 #include <sys/interrupt.h>
35 #include <sys/lock.h>
36 #include <sys/malloc.h>
37 #include <sys/kthread.h>
38 #include <sys/thread2.h>
39 #include <sys/spinlock.h>
40 #include <sys/spinlock2.h>
41 #include <sys/serialize.h>
42 #include <sys/proc.h>
43 
44 MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
45 
46 static STAILQ_HEAD(taskqueue_list, taskqueue) taskqueue_queues;
47 static struct lock	taskqueue_queues_lock;
48 
49 struct taskqueue {
50 	STAILQ_ENTRY(taskqueue)	tq_link;
51 	STAILQ_HEAD(, task)	tq_queue;
52 	const char		*tq_name;
53 	/* NOTE: tq must be locked before calling tq_enqueue */
54 	taskqueue_enqueue_fn	tq_enqueue;
55 	void			*tq_context;
56 
57 	struct task		*tq_running;
58 	struct spinlock		tq_lock;
59 	struct thread		**tq_threads;
60 	int			tq_tcount;
61 	int			tq_flags;
62 	int			tq_callouts;
63 };
64 
65 #define	TQ_FLAGS_ACTIVE		(1 << 0)
66 #define	TQ_FLAGS_BLOCKED	(1 << 1)
67 #define	TQ_FLAGS_PENDING	(1 << 2)
68 
69 #define	DT_CALLOUT_ARMED	(1 << 0)
70 
71 void
72 _timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task,
73     int priority, task_fn_t func, void *context)
74 {
75 
76 	TASK_INIT(&timeout_task->t, priority, func, context);
77 	callout_init(&timeout_task->c); /* XXX use callout_init_mp() */
78 	timeout_task->q = queue;
79 	timeout_task->f = 0;
80 }
81 
82 static void taskqueue_run(struct taskqueue *queue, int lock_held);
83 
84 static __inline void
85 TQ_LOCK_INIT(struct taskqueue *tq)
86 {
87 	spin_init(&tq->tq_lock, "tqlock");
88 }
89 
90 static __inline void
91 TQ_LOCK_UNINIT(struct taskqueue *tq)
92 {
93 	spin_uninit(&tq->tq_lock);
94 }
95 
96 static __inline void
97 TQ_LOCK(struct taskqueue *tq)
98 {
99 	spin_lock(&tq->tq_lock);
100 }
101 
102 static __inline void
103 TQ_UNLOCK(struct taskqueue *tq)
104 {
105 	spin_unlock(&tq->tq_lock);
106 }
107 
108 static __inline void
109 TQ_SLEEP(struct taskqueue *tq, void *ident, const char *wmesg)
110 {
111 	ssleep(ident, &tq->tq_lock, 0, wmesg, 0);
112 }
113 
114 struct taskqueue *
115 taskqueue_create(const char *name, int mflags,
116 		 taskqueue_enqueue_fn enqueue, void *context)
117 {
118 	struct taskqueue *queue;
119 
120 	queue = kmalloc(sizeof(*queue), M_TASKQUEUE, mflags | M_ZERO);
121 	if (!queue)
122 		return NULL;
123 	STAILQ_INIT(&queue->tq_queue);
124 	queue->tq_name = name;
125 	queue->tq_enqueue = enqueue;
126 	queue->tq_context = context;
127 	queue->tq_flags |= TQ_FLAGS_ACTIVE;
128 	TQ_LOCK_INIT(queue);
129 
130 	lockmgr(&taskqueue_queues_lock, LK_EXCLUSIVE);
131 	STAILQ_INSERT_TAIL(&taskqueue_queues, queue, tq_link);
132 	lockmgr(&taskqueue_queues_lock, LK_RELEASE);
133 
134 	return queue;
135 }
136 
137 /* NOTE: tq must be locked */
138 static void
139 taskqueue_terminate(struct thread **pp, struct taskqueue *tq)
140 {
141 	while(tq->tq_tcount > 0) {
142 		/* Unlock spinlock before wakeup() */
143 		TQ_UNLOCK(tq);
144 		wakeup(tq);
145 		TQ_LOCK(tq);
146 		TQ_SLEEP(tq, pp, "taskqueue_terminate");
147 	}
148 }
149 
150 void
151 taskqueue_free(struct taskqueue *queue)
152 {
153 	TQ_LOCK(queue);
154 	queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
155 	taskqueue_run(queue, 1);
156 	taskqueue_terminate(queue->tq_threads, queue);
157 	TQ_UNLOCK(queue);
158 
159 	lockmgr(&taskqueue_queues_lock, LK_EXCLUSIVE);
160 	STAILQ_REMOVE(&taskqueue_queues, queue, taskqueue, tq_link);
161 	lockmgr(&taskqueue_queues_lock, LK_RELEASE);
162 
163 	TQ_LOCK_UNINIT(queue);
164 
165 	kfree(queue, M_TASKQUEUE);
166 }
167 
168 struct taskqueue *
169 taskqueue_find(const char *name)
170 {
171 	struct taskqueue *queue;
172 
173 	lockmgr(&taskqueue_queues_lock, LK_EXCLUSIVE);
174 	STAILQ_FOREACH(queue, &taskqueue_queues, tq_link) {
175 		if (!strcmp(queue->tq_name, name)) {
176 			lockmgr(&taskqueue_queues_lock, LK_RELEASE);
177 			return queue;
178 		}
179 	}
180 	lockmgr(&taskqueue_queues_lock, LK_RELEASE);
181 	return NULL;
182 }
183 
184 /*
185  * NOTE!  If using the per-cpu taskqueues ``taskqueue_thread[mycpuid]'',
186  * be sure NOT TO SHARE the ``task'' between CPUs.  TASKS ARE NOT LOCKED.
187  * So either use a throwaway task which will only be enqueued once, or
188  * use one task per CPU!
189  */
190 static int
191 taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task)
192 {
193 	struct task *ins;
194 	struct task *prev;
195 
196 	/*
197 	 * Don't allow new tasks on a queue which is being freed.
198 	 */
199 	if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0)
200 		return EPIPE;
201 
202 	/*
203 	 * Count multiple enqueues.
204 	 */
205 	if (task->ta_pending) {
206 		task->ta_pending++;
207 		return 0;
208 	}
209 
210 	/*
211 	 * Optimise the case when all tasks have the same priority.
212 	 */
213 	prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
214 	if (!prev || prev->ta_priority >= task->ta_priority) {
215 		STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
216 	} else {
217 		prev = NULL;
218 		for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
219 		     prev = ins, ins = STAILQ_NEXT(ins, ta_link))
220 			if (ins->ta_priority < task->ta_priority)
221 				break;
222 
223 		if (prev)
224 			STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
225 		else
226 			STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
227 	}
228 
229 	task->ta_pending = 1;
230 	if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0) {
231 		if (queue->tq_enqueue)
232 			queue->tq_enqueue(queue->tq_context);
233 	} else {
234 		queue->tq_flags |= TQ_FLAGS_PENDING;
235 	}
236 
237 	return 0;
238 }
239 
240 int
241 taskqueue_enqueue(struct taskqueue *queue, struct task *task)
242 {
243 	int res;
244 
245 	TQ_LOCK(queue);
246 	res = taskqueue_enqueue_locked(queue, task);
247 	TQ_UNLOCK(queue);
248 
249 	return (res);
250 }
251 
252 static void
253 taskqueue_timeout_func(void *arg)
254 {
255 	struct taskqueue *queue;
256 	struct timeout_task *timeout_task;
257 
258 	timeout_task = arg;
259 	queue = timeout_task->q;
260 
261 	TQ_LOCK(queue);
262 	KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout"));
263 	timeout_task->f &= ~DT_CALLOUT_ARMED;
264 	queue->tq_callouts--;
265 	taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t);
266 	TQ_UNLOCK(queue);
267 }
268 
269 int
270 taskqueue_enqueue_timeout(struct taskqueue *queue,
271     struct timeout_task *timeout_task, int ticks)
272 {
273 	int res;
274 
275 	TQ_LOCK(queue);
276 	KASSERT(timeout_task->q == NULL || timeout_task->q == queue,
277 		("Migrated queue"));
278 	timeout_task->q = queue;
279 	res = timeout_task->t.ta_pending;
280 	if (ticks == 0) {
281 		taskqueue_enqueue_locked(queue, &timeout_task->t);
282 		TQ_UNLOCK(queue);
283 	} else {
284 		if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
285 			res++;
286 		} else {
287 			queue->tq_callouts++;
288 			timeout_task->f |= DT_CALLOUT_ARMED;
289 		}
290 		TQ_UNLOCK(queue);
291 		callout_reset(&timeout_task->c, ticks, taskqueue_timeout_func,
292 			      timeout_task);
293 	}
294 	return (res);
295 }
296 
297 void
298 taskqueue_block(struct taskqueue *queue)
299 {
300 	TQ_LOCK(queue);
301 	queue->tq_flags |= TQ_FLAGS_BLOCKED;
302 	TQ_UNLOCK(queue);
303 }
304 
305 void
306 taskqueue_unblock(struct taskqueue *queue)
307 {
308 	TQ_LOCK(queue);
309 	queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
310 	if (queue->tq_flags & TQ_FLAGS_PENDING) {
311 		queue->tq_flags &= ~TQ_FLAGS_PENDING;
312 		if (queue->tq_enqueue)
313 			queue->tq_enqueue(queue->tq_context);
314 	}
315 	TQ_UNLOCK(queue);
316 }
317 
318 static void
319 taskqueue_run(struct taskqueue *queue, int lock_held)
320 {
321 	struct task *task;
322 	int pending;
323 
324 	if (lock_held == 0)
325 		TQ_LOCK(queue);
326 	while (STAILQ_FIRST(&queue->tq_queue)) {
327 		/*
328 		 * Carefully remove the first task from the queue and
329 		 * zero its pending count.
330 		 */
331 		task = STAILQ_FIRST(&queue->tq_queue);
332 		STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
333 		pending = task->ta_pending;
334 		task->ta_pending = 0;
335 		queue->tq_running = task;
336 
337 		TQ_UNLOCK(queue);
338 		task->ta_func(task->ta_context, pending);
339 		queue->tq_running = NULL;
340 		wakeup(task);
341 		TQ_LOCK(queue);
342 	}
343 	if (lock_held == 0)
344 		TQ_UNLOCK(queue);
345 }
346 
347 static int
348 taskqueue_cancel_locked(struct taskqueue *queue, struct task *task,
349     u_int *pendp)
350 {
351 
352 	if (task->ta_pending > 0)
353 		STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link);
354 	if (pendp != NULL)
355 		*pendp = task->ta_pending;
356 	task->ta_pending = 0;
357 	return (task == queue->tq_running ? EBUSY : 0);
358 }
359 
360 int
361 taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp)
362 {
363 	int error;
364 
365 	TQ_LOCK(queue);
366 	error = taskqueue_cancel_locked(queue, task, pendp);
367 	TQ_UNLOCK(queue);
368 
369 	return (error);
370 }
371 
372 int
373 taskqueue_cancel_timeout(struct taskqueue *queue,
374 			 struct timeout_task *timeout_task, u_int *pendp)
375 {
376 	u_int pending, pending1;
377 	int error;
378 
379 	pending = !!callout_stop(&timeout_task->c);
380 	TQ_LOCK(queue);
381 	error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1);
382 	if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
383 		timeout_task->f &= ~DT_CALLOUT_ARMED;
384 		queue->tq_callouts--;
385 	}
386 	TQ_UNLOCK(queue);
387 
388 	if (pendp != NULL)
389 		*pendp = pending + pending1;
390 	return (error);
391 }
392 
393 void
394 taskqueue_drain(struct taskqueue *queue, struct task *task)
395 {
396 	TQ_LOCK(queue);
397 	while (task->ta_pending != 0 || task == queue->tq_running)
398 		TQ_SLEEP(queue, task, "-");
399 	TQ_UNLOCK(queue);
400 }
401 
402 void
403 taskqueue_drain_timeout(struct taskqueue *queue,
404     struct timeout_task *timeout_task)
405 {
406 
407 	callout_stop_sync(&timeout_task->c);
408 	taskqueue_drain(queue, &timeout_task->t);
409 }
410 
411 static void
412 taskqueue_swi_enqueue(void *context)
413 {
414 	setsofttq();
415 }
416 
417 static void
418 taskqueue_swi_run(void *arg, void *frame)
419 {
420 	taskqueue_run(taskqueue_swi, 0);
421 }
422 
423 static void
424 taskqueue_swi_mp_run(void *arg, void *frame)
425 {
426 	taskqueue_run(taskqueue_swi_mp, 0);
427 }
428 
429 int
430 taskqueue_start_threads(struct taskqueue **tqp, int count, int pri, int ncpu,
431 			const char *fmt, ...)
432 {
433 	__va_list ap;
434 	struct thread *td;
435 	struct taskqueue *tq;
436 	int i, error, cpu;
437 	char ktname[MAXCOMLEN];
438 
439 	if (count <= 0)
440 		return EINVAL;
441 
442 	tq = *tqp;
443 	cpu = ncpu;
444 
445 	__va_start(ap, fmt);
446 	kvsnprintf(ktname, MAXCOMLEN, fmt, ap);
447 	__va_end(ap);
448 
449 	tq->tq_threads = kmalloc(sizeof(struct thread *) * count, M_TASKQUEUE,
450 	    M_WAITOK | M_ZERO);
451 
452 	for (i = 0; i < count; i++) {
453 		/*
454 		 * If no specific cpu was specified and more than one thread
455 		 * is to be created, we distribute the threads amongst all
456 		 * cpus.
457 		 */
458 		if ((ncpu <= -1) && (count > 1))
459 			cpu = i%ncpus;
460 
461 		if (count == 1) {
462 			error = lwkt_create(taskqueue_thread_loop, tqp,
463 					    &tq->tq_threads[i], NULL,
464 					    TDF_NOSTART, cpu,
465 					    "%s", ktname);
466 		} else {
467 			error = lwkt_create(taskqueue_thread_loop, tqp,
468 					    &tq->tq_threads[i], NULL,
469 					    TDF_NOSTART, cpu,
470 					    "%s_%d", ktname, i);
471 		}
472 		if (error) {
473 			kprintf("%s: lwkt_create(%s): error %d", __func__,
474 			    ktname, error);
475 			tq->tq_threads[i] = NULL;
476 		} else {
477 			td = tq->tq_threads[i];
478 			lwkt_setpri_initial(td, pri);
479 			lwkt_schedule(td);
480 			tq->tq_tcount++;
481 		}
482 	}
483 
484 	return 0;
485 }
486 
487 void
488 taskqueue_thread_loop(void *arg)
489 {
490 	struct taskqueue **tqp, *tq;
491 
492 	tqp = arg;
493 	tq = *tqp;
494 	TQ_LOCK(tq);
495 	while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
496 		taskqueue_run(tq, 1);
497 		TQ_SLEEP(tq, tq, "tqthr");
498 	}
499 
500 	/* rendezvous with thread that asked us to terminate */
501 	tq->tq_tcount--;
502 	TQ_UNLOCK(tq);
503 	wakeup_one(tq->tq_threads);
504 	lwkt_exit();
505 }
506 
507 /* NOTE: tq must be locked */
508 void
509 taskqueue_thread_enqueue(void *context)
510 {
511 	struct taskqueue **tqp, *tq;
512 
513 	tqp = context;
514 	tq = *tqp;
515 
516 	/* Unlock spinlock before wakeup_one() */
517 	TQ_UNLOCK(tq);
518 	wakeup_one(tq);
519 	TQ_LOCK(tq);
520 }
521 
522 TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, 0,
523 	 register_swi(SWI_TQ, taskqueue_swi_run, NULL, "swi_taskq", NULL, -1));
524 /*
525  * XXX: possibly use a different SWI_TQ_MP or so.
526  * related: sys/interrupt.h
527  * related: platform/XXX/isa/ipl_funcs.c
528  */
529 TASKQUEUE_DEFINE(swi_mp, taskqueue_swi_enqueue, 0,
530     register_swi_mp(SWI_TQ, taskqueue_swi_mp_run, NULL, "swi_mp_taskq", NULL,
531 		    -1));
532 
533 struct taskqueue *taskqueue_thread[MAXCPU];
534 
535 static void
536 taskqueue_init(void)
537 {
538 	int cpu;
539 
540 	lockinit(&taskqueue_queues_lock, "tqqueues", 0, 0);
541 	STAILQ_INIT(&taskqueue_queues);
542 
543 	for (cpu = 0; cpu < ncpus; cpu++) {
544 		taskqueue_thread[cpu] = taskqueue_create("thread", M_INTWAIT,
545 		    taskqueue_thread_enqueue, &taskqueue_thread[cpu]);
546 		taskqueue_start_threads(&taskqueue_thread[cpu], 1,
547 		    TDPRI_KERN_DAEMON, cpu, "taskq_cpu %d", cpu);
548 	}
549 }
550 
551 SYSINIT(taskqueueinit, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, taskqueue_init, NULL);
552