xref: /dragonfly/sys/kern/subr_gtaskqueue.c (revision 5694b1af)
1fab26bfaSMatthew Dillon /*-
2fab26bfaSMatthew Dillon  * Copyright (c) 2000 Doug Rabson
3fab26bfaSMatthew Dillon  * Copyright (c) 2014 Jeff Roberson
4fab26bfaSMatthew Dillon  * Copyright (c) 2016 Matthew Macy
5fab26bfaSMatthew Dillon  * All rights reserved.
6fab26bfaSMatthew Dillon  *
7fab26bfaSMatthew Dillon  * Redistribution and use in source and binary forms, with or without
8fab26bfaSMatthew Dillon  * modification, are permitted provided that the following conditions
9fab26bfaSMatthew Dillon  * are met:
10fab26bfaSMatthew Dillon  * 1. Redistributions of source code must retain the above copyright
11fab26bfaSMatthew Dillon  *    notice, this list of conditions and the following disclaimer.
12fab26bfaSMatthew Dillon  * 2. Redistributions in binary form must reproduce the above copyright
13fab26bfaSMatthew Dillon  *    notice, this list of conditions and the following disclaimer in the
14fab26bfaSMatthew Dillon  *    documentation and/or other materials provided with the distribution.
15fab26bfaSMatthew Dillon  *
16fab26bfaSMatthew Dillon  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17fab26bfaSMatthew Dillon  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18fab26bfaSMatthew Dillon  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19fab26bfaSMatthew Dillon  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20fab26bfaSMatthew Dillon  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21fab26bfaSMatthew Dillon  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22fab26bfaSMatthew Dillon  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23fab26bfaSMatthew Dillon  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24fab26bfaSMatthew Dillon  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25fab26bfaSMatthew Dillon  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26fab26bfaSMatthew Dillon  * SUCH DAMAGE.
27fab26bfaSMatthew Dillon  */
28fab26bfaSMatthew Dillon 
29fab26bfaSMatthew Dillon #include <sys/cdefs.h>
30fab26bfaSMatthew Dillon #include <sys/param.h>
31fab26bfaSMatthew Dillon #include <sys/systm.h>
32fab26bfaSMatthew Dillon #include <sys/bus.h>
33fab26bfaSMatthew Dillon #include <sys/cpumask.h>
34fab26bfaSMatthew Dillon #include <sys/kernel.h>
35fab26bfaSMatthew Dillon #include <sys/libkern.h>
36fab26bfaSMatthew Dillon #include <sys/limits.h>
37fab26bfaSMatthew Dillon #include <sys/lock.h>
38fab26bfaSMatthew Dillon #include <sys/malloc.h>
39fab26bfaSMatthew Dillon #include <sys/proc.h>
40fab26bfaSMatthew Dillon #include <sys/sched.h>
41fab26bfaSMatthew Dillon #include <sys/gtaskqueue.h>
42fab26bfaSMatthew Dillon #include <sys/unistd.h>
43fab26bfaSMatthew Dillon #include <machine/stdarg.h>
44fab26bfaSMatthew Dillon 
45fab26bfaSMatthew Dillon static MALLOC_DEFINE(M_GTASKQUEUE, "gtaskqueue", "Group Task Queues");
46fab26bfaSMatthew Dillon static void	gtaskqueue_thread_enqueue(void *);
47fab26bfaSMatthew Dillon static void	gtaskqueue_thread_loop(void *arg);
48fab26bfaSMatthew Dillon static int	task_is_running(struct gtaskqueue *queue, struct gtask *gtask);
49fab26bfaSMatthew Dillon static void	gtaskqueue_drain_locked(struct gtaskqueue *queue, struct gtask *gtask);
50fab26bfaSMatthew Dillon 
51fab26bfaSMatthew Dillon TASKQGROUP_DEFINE(softirq, ncpus, 1);
52fab26bfaSMatthew Dillon 
53fab26bfaSMatthew Dillon struct gtaskqueue_busy {
54fab26bfaSMatthew Dillon 	struct gtask		*tb_running;
55fab26bfaSMatthew Dillon 	u_int			 tb_seq;
56fab26bfaSMatthew Dillon 	LIST_ENTRY(gtaskqueue_busy) tb_link;
57fab26bfaSMatthew Dillon };
58fab26bfaSMatthew Dillon 
59fab26bfaSMatthew Dillon typedef void (*gtaskqueue_enqueue_fn)(void *context);
60fab26bfaSMatthew Dillon 
61fab26bfaSMatthew Dillon struct gtaskqueue {
62fab26bfaSMatthew Dillon 	STAILQ_HEAD(, gtask)	tq_queue;
63fab26bfaSMatthew Dillon 	LIST_HEAD(, gtaskqueue_busy) tq_active;
64fab26bfaSMatthew Dillon 	u_int			tq_seq;
65fab26bfaSMatthew Dillon 	int			tq_callouts;
66fab26bfaSMatthew Dillon 	struct lock		tq_lock;
67fab26bfaSMatthew Dillon 	gtaskqueue_enqueue_fn	tq_enqueue;
68fab26bfaSMatthew Dillon 	void			*tq_context;
69fab26bfaSMatthew Dillon 	const char		*tq_name;
70fab26bfaSMatthew Dillon 	struct thread		**tq_threads;
71fab26bfaSMatthew Dillon 	int			tq_tcount;
72fab26bfaSMatthew Dillon 	int			tq_flags;
73fab26bfaSMatthew Dillon #if 0
74fab26bfaSMatthew Dillon 	taskqueue_callback_fn	tq_callbacks[TASKQUEUE_NUM_CALLBACKS];
75fab26bfaSMatthew Dillon 	void			*tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS];
76fab26bfaSMatthew Dillon #endif
77fab26bfaSMatthew Dillon };
78fab26bfaSMatthew Dillon 
79fab26bfaSMatthew Dillon #define	TQ_FLAGS_ACTIVE		(1 << 0)
80fab26bfaSMatthew Dillon #define	TQ_FLAGS_BLOCKED	(1 << 1)
81fab26bfaSMatthew Dillon #define	TQ_FLAGS_UNLOCKED_ENQUEUE	(1 << 2)
82fab26bfaSMatthew Dillon 
83fab26bfaSMatthew Dillon #define	DT_CALLOUT_ARMED	(1 << 0)
84fab26bfaSMatthew Dillon 
85fab26bfaSMatthew Dillon #define	TQ_LOCK(tq)		lockmgr(&(tq)->tq_lock, LK_EXCLUSIVE)
86fab26bfaSMatthew Dillon #define	TQ_ASSERT_LOCKED(tq)	KKASSERT(lockstatus(&(tq)->tq_lock, NULL) != 0)
87fab26bfaSMatthew Dillon #define	TQ_UNLOCK(tq)		lockmgr(&(tq)->tq_lock, LK_RELEASE);
88fab26bfaSMatthew Dillon #define	TQ_ASSERT_UNLOCKED(tq)	KKASSERT(lockstatus(&(tq)->tq_lock) == 0)
89fab26bfaSMatthew Dillon 
90fab26bfaSMatthew Dillon #ifdef INVARIANTS
91fab26bfaSMatthew Dillon static void
gtask_dump(struct gtask * gtask)92fab26bfaSMatthew Dillon gtask_dump(struct gtask *gtask)
93fab26bfaSMatthew Dillon {
94fab26bfaSMatthew Dillon 	kprintf("gtask: %p ta_flags=%x ta_priority=%d ta_func=%p "
95fab26bfaSMatthew Dillon 		"ta_context=%p\n",
96fab26bfaSMatthew Dillon 		gtask, gtask->ta_flags, gtask->ta_priority,
97fab26bfaSMatthew Dillon 		gtask->ta_func, gtask->ta_context);
98fab26bfaSMatthew Dillon }
99fab26bfaSMatthew Dillon #endif
100fab26bfaSMatthew Dillon 
101fab26bfaSMatthew Dillon static __inline int
TQ_SLEEP(struct gtaskqueue * tq,void * p,const char * wm)102fab26bfaSMatthew Dillon TQ_SLEEP(struct gtaskqueue *tq, void *p, const char *wm)
103fab26bfaSMatthew Dillon {
104fab26bfaSMatthew Dillon 	return (lksleep(p, &tq->tq_lock, 0, wm, 0));
105fab26bfaSMatthew Dillon }
106fab26bfaSMatthew Dillon 
107fab26bfaSMatthew Dillon static struct gtaskqueue *
_gtaskqueue_create(const char * name,int mflags,taskqueue_enqueue_fn enqueue,void * context,int lkflags,const char * mtxname __unused)108fab26bfaSMatthew Dillon _gtaskqueue_create(const char *name, int mflags,
109fab26bfaSMatthew Dillon 		 taskqueue_enqueue_fn enqueue, void *context,
110fab26bfaSMatthew Dillon 		 int lkflags, const char *mtxname __unused)
111fab26bfaSMatthew Dillon {
112fab26bfaSMatthew Dillon 	struct gtaskqueue *queue;
113fab26bfaSMatthew Dillon 
114fab26bfaSMatthew Dillon 	queue = kmalloc(sizeof(struct gtaskqueue),
115fab26bfaSMatthew Dillon 			M_GTASKQUEUE, mflags | M_ZERO);
116fab26bfaSMatthew Dillon 	if (!queue) {
11750227618SSascha Wildner 		kprintf("_gtaskqueue_create: kmalloc failed %08x\n", mflags);
118fab26bfaSMatthew Dillon 		return (NULL);
119fab26bfaSMatthew Dillon 	}
120fab26bfaSMatthew Dillon 
121fab26bfaSMatthew Dillon 	STAILQ_INIT(&queue->tq_queue);
122fab26bfaSMatthew Dillon 	LIST_INIT(&queue->tq_active);
123fab26bfaSMatthew Dillon 	queue->tq_enqueue = enqueue;
124fab26bfaSMatthew Dillon 	queue->tq_context = context;
125fab26bfaSMatthew Dillon 	queue->tq_name = name ? name : "taskqueue";
126fab26bfaSMatthew Dillon 	queue->tq_flags |= TQ_FLAGS_ACTIVE;
127fab26bfaSMatthew Dillon 	if (enqueue == gtaskqueue_thread_enqueue)
128fab26bfaSMatthew Dillon 		queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE;
129fab26bfaSMatthew Dillon 	lockinit(&queue->tq_lock, queue->tq_name, 0, 0);
130fab26bfaSMatthew Dillon 
131fab26bfaSMatthew Dillon 	return (queue);
132fab26bfaSMatthew Dillon }
133fab26bfaSMatthew Dillon 
134fab26bfaSMatthew Dillon /*
135fab26bfaSMatthew Dillon  * Signal a taskqueue thread to terminate.
136fab26bfaSMatthew Dillon  */
137fab26bfaSMatthew Dillon static void
gtaskqueue_terminate(struct thread ** pp,struct gtaskqueue * tq)138fab26bfaSMatthew Dillon gtaskqueue_terminate(struct thread **pp, struct gtaskqueue *tq)
139fab26bfaSMatthew Dillon {
140fab26bfaSMatthew Dillon 
141fab26bfaSMatthew Dillon 	while (tq->tq_tcount > 0 || tq->tq_callouts > 0) {
142fab26bfaSMatthew Dillon 		wakeup(tq);
143fab26bfaSMatthew Dillon 		TQ_SLEEP(tq, pp, "gtq_destroy");
144fab26bfaSMatthew Dillon 	}
145fab26bfaSMatthew Dillon }
146fab26bfaSMatthew Dillon 
147fab26bfaSMatthew Dillon static void __unused
gtaskqueue_free(struct gtaskqueue * queue)148fab26bfaSMatthew Dillon gtaskqueue_free(struct gtaskqueue *queue)
149fab26bfaSMatthew Dillon {
150fab26bfaSMatthew Dillon 
151fab26bfaSMatthew Dillon 	TQ_LOCK(queue);
152fab26bfaSMatthew Dillon 	queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
153fab26bfaSMatthew Dillon 	gtaskqueue_terminate(queue->tq_threads, queue);
154fab26bfaSMatthew Dillon 	KASSERT(LIST_EMPTY(&queue->tq_active), ("Tasks still running?"));
155fab26bfaSMatthew Dillon 	KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks"));
156fab26bfaSMatthew Dillon 	lockuninit(&queue->tq_lock);
157fab26bfaSMatthew Dillon 	kfree(queue->tq_threads, M_GTASKQUEUE);
158fab26bfaSMatthew Dillon 	/*kfree(queue->tq_name, M_GTASKQUEUE);*/
159fab26bfaSMatthew Dillon 	kfree(queue, M_GTASKQUEUE);
160fab26bfaSMatthew Dillon }
161fab26bfaSMatthew Dillon 
162fab26bfaSMatthew Dillon /*
163fab26bfaSMatthew Dillon  * Wait for all to complete, then prevent it from being enqueued
164fab26bfaSMatthew Dillon  */
165fab26bfaSMatthew Dillon void
grouptask_block(struct grouptask * grouptask)166fab26bfaSMatthew Dillon grouptask_block(struct grouptask *grouptask)
167fab26bfaSMatthew Dillon {
168fab26bfaSMatthew Dillon 	struct gtaskqueue *queue = grouptask->gt_taskqueue;
169fab26bfaSMatthew Dillon 	struct gtask *gtask = &grouptask->gt_task;
170fab26bfaSMatthew Dillon 
171fab26bfaSMatthew Dillon #ifdef INVARIANTS
172fab26bfaSMatthew Dillon 	if (queue == NULL) {
173fab26bfaSMatthew Dillon 		gtask_dump(gtask);
174fab26bfaSMatthew Dillon 		panic("queue == NULL");
175fab26bfaSMatthew Dillon 	}
176fab26bfaSMatthew Dillon #endif
177fab26bfaSMatthew Dillon 	TQ_LOCK(queue);
178fab26bfaSMatthew Dillon 	gtask->ta_flags |= TASK_NOENQUEUE;
179fab26bfaSMatthew Dillon 	gtaskqueue_drain_locked(queue, gtask);
180fab26bfaSMatthew Dillon 	TQ_UNLOCK(queue);
181fab26bfaSMatthew Dillon }
182fab26bfaSMatthew Dillon 
183fab26bfaSMatthew Dillon void
grouptask_unblock(struct grouptask * grouptask)184fab26bfaSMatthew Dillon grouptask_unblock(struct grouptask *grouptask)
185fab26bfaSMatthew Dillon {
186fab26bfaSMatthew Dillon 	struct gtaskqueue *queue = grouptask->gt_taskqueue;
187fab26bfaSMatthew Dillon 	struct gtask *gtask = &grouptask->gt_task;
188fab26bfaSMatthew Dillon 
189fab26bfaSMatthew Dillon #ifdef INVARIANTS
190fab26bfaSMatthew Dillon 	if (queue == NULL) {
191fab26bfaSMatthew Dillon 		gtask_dump(gtask);
192fab26bfaSMatthew Dillon 		panic("queue == NULL");
193fab26bfaSMatthew Dillon 	}
194fab26bfaSMatthew Dillon #endif
195fab26bfaSMatthew Dillon 	TQ_LOCK(queue);
196fab26bfaSMatthew Dillon 	gtask->ta_flags &= ~TASK_NOENQUEUE;
197fab26bfaSMatthew Dillon 	TQ_UNLOCK(queue);
198fab26bfaSMatthew Dillon }
199fab26bfaSMatthew Dillon 
200fab26bfaSMatthew Dillon int
grouptaskqueue_enqueue(struct gtaskqueue * queue,struct gtask * gtask)201fab26bfaSMatthew Dillon grouptaskqueue_enqueue(struct gtaskqueue *queue, struct gtask *gtask)
202fab26bfaSMatthew Dillon {
203fab26bfaSMatthew Dillon #ifdef INVARIANTS
204fab26bfaSMatthew Dillon 	if (queue == NULL) {
205fab26bfaSMatthew Dillon 		gtask_dump(gtask);
206fab26bfaSMatthew Dillon 		panic("queue == NULL");
207fab26bfaSMatthew Dillon 	}
208fab26bfaSMatthew Dillon #endif
209fab26bfaSMatthew Dillon 	TQ_LOCK(queue);
210fab26bfaSMatthew Dillon 	if (gtask->ta_flags & TASK_ENQUEUED) {
211fab26bfaSMatthew Dillon 		TQ_UNLOCK(queue);
212fab26bfaSMatthew Dillon 		return (0);
213fab26bfaSMatthew Dillon 	}
214fab26bfaSMatthew Dillon 	if (gtask->ta_flags & TASK_NOENQUEUE) {
215fab26bfaSMatthew Dillon 		TQ_UNLOCK(queue);
216fab26bfaSMatthew Dillon 		return (EAGAIN);
217fab26bfaSMatthew Dillon 	}
218fab26bfaSMatthew Dillon 	STAILQ_INSERT_TAIL(&queue->tq_queue, gtask, ta_link);
219fab26bfaSMatthew Dillon 	gtask->ta_flags |= TASK_ENQUEUED;
220fab26bfaSMatthew Dillon 	TQ_UNLOCK(queue);
221fab26bfaSMatthew Dillon 	if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
222fab26bfaSMatthew Dillon 		queue->tq_enqueue(queue->tq_context);
223fab26bfaSMatthew Dillon 	return (0);
224fab26bfaSMatthew Dillon }
225fab26bfaSMatthew Dillon 
226fab26bfaSMatthew Dillon static void
gtaskqueue_task_nop_fn(void * context)227fab26bfaSMatthew Dillon gtaskqueue_task_nop_fn(void *context)
228fab26bfaSMatthew Dillon {
229fab26bfaSMatthew Dillon }
230fab26bfaSMatthew Dillon 
231fab26bfaSMatthew Dillon /*
232fab26bfaSMatthew Dillon  * Block until all currently queued tasks in this taskqueue
233fab26bfaSMatthew Dillon  * have begun execution.  Tasks queued during execution of
234fab26bfaSMatthew Dillon  * this function are ignored.
235fab26bfaSMatthew Dillon  */
236fab26bfaSMatthew Dillon static void
gtaskqueue_drain_tq_queue(struct gtaskqueue * queue)237fab26bfaSMatthew Dillon gtaskqueue_drain_tq_queue(struct gtaskqueue *queue)
238fab26bfaSMatthew Dillon {
239fab26bfaSMatthew Dillon 	struct gtask t_barrier;
240fab26bfaSMatthew Dillon 
241fab26bfaSMatthew Dillon 	if (STAILQ_EMPTY(&queue->tq_queue))
242fab26bfaSMatthew Dillon 		return;
243fab26bfaSMatthew Dillon 
244fab26bfaSMatthew Dillon 	/*
245fab26bfaSMatthew Dillon 	 * Enqueue our barrier after all current tasks, but with
246fab26bfaSMatthew Dillon 	 * the highest priority so that newly queued tasks cannot
247fab26bfaSMatthew Dillon 	 * pass it.  Because of the high priority, we can not use
248fab26bfaSMatthew Dillon 	 * taskqueue_enqueue_locked directly (which drops the lock
249fab26bfaSMatthew Dillon 	 * anyway) so just insert it at tail while we have the
250fab26bfaSMatthew Dillon 	 * queue lock.
251fab26bfaSMatthew Dillon 	 */
252fab26bfaSMatthew Dillon 	GTASK_INIT(&t_barrier, 0, USHRT_MAX, gtaskqueue_task_nop_fn, &t_barrier);
253fab26bfaSMatthew Dillon 	STAILQ_INSERT_TAIL(&queue->tq_queue, &t_barrier, ta_link);
254fab26bfaSMatthew Dillon 	t_barrier.ta_flags |= TASK_ENQUEUED;
255fab26bfaSMatthew Dillon 
256fab26bfaSMatthew Dillon 	/*
257fab26bfaSMatthew Dillon 	 * Once the barrier has executed, all previously queued tasks
258fab26bfaSMatthew Dillon 	 * have completed or are currently executing.
259fab26bfaSMatthew Dillon 	 */
260fab26bfaSMatthew Dillon 	while (t_barrier.ta_flags & TASK_ENQUEUED)
261fab26bfaSMatthew Dillon 		TQ_SLEEP(queue, &t_barrier, "gtq_qdrain");
262fab26bfaSMatthew Dillon }
263fab26bfaSMatthew Dillon 
264fab26bfaSMatthew Dillon /*
265fab26bfaSMatthew Dillon  * Block until all currently executing tasks for this taskqueue
266fab26bfaSMatthew Dillon  * complete.  Tasks that begin execution during the execution
267fab26bfaSMatthew Dillon  * of this function are ignored.
268fab26bfaSMatthew Dillon  */
269fab26bfaSMatthew Dillon static void
gtaskqueue_drain_tq_active(struct gtaskqueue * queue)270fab26bfaSMatthew Dillon gtaskqueue_drain_tq_active(struct gtaskqueue *queue)
271fab26bfaSMatthew Dillon {
272fab26bfaSMatthew Dillon 	struct gtaskqueue_busy *tb;
273fab26bfaSMatthew Dillon 	u_int seq;
274fab26bfaSMatthew Dillon 
275fab26bfaSMatthew Dillon 	if (LIST_EMPTY(&queue->tq_active))
276fab26bfaSMatthew Dillon 		return;
277fab26bfaSMatthew Dillon 
278fab26bfaSMatthew Dillon 	/* Block taskq_terminate().*/
279fab26bfaSMatthew Dillon 	queue->tq_callouts++;
280fab26bfaSMatthew Dillon 
281fab26bfaSMatthew Dillon 	/* Wait for any active task with sequence from the past. */
282fab26bfaSMatthew Dillon 	seq = queue->tq_seq;
283fab26bfaSMatthew Dillon restart:
284fab26bfaSMatthew Dillon 	LIST_FOREACH(tb, &queue->tq_active, tb_link) {
285fab26bfaSMatthew Dillon 		if ((int)(tb->tb_seq - seq) <= 0) {
286fab26bfaSMatthew Dillon 			TQ_SLEEP(queue, tb->tb_running, "gtq_adrain");
287fab26bfaSMatthew Dillon 			goto restart;
288fab26bfaSMatthew Dillon 		}
289fab26bfaSMatthew Dillon 	}
290fab26bfaSMatthew Dillon 
291fab26bfaSMatthew Dillon 	/* Release taskqueue_terminate(). */
292fab26bfaSMatthew Dillon 	queue->tq_callouts--;
293fab26bfaSMatthew Dillon 	if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0)
294fab26bfaSMatthew Dillon 		wakeup_one(queue->tq_threads);
295fab26bfaSMatthew Dillon }
296fab26bfaSMatthew Dillon 
297fab26bfaSMatthew Dillon void
gtaskqueue_block(struct gtaskqueue * queue)298fab26bfaSMatthew Dillon gtaskqueue_block(struct gtaskqueue *queue)
299fab26bfaSMatthew Dillon {
300fab26bfaSMatthew Dillon 
301fab26bfaSMatthew Dillon 	TQ_LOCK(queue);
302fab26bfaSMatthew Dillon 	queue->tq_flags |= TQ_FLAGS_BLOCKED;
303fab26bfaSMatthew Dillon 	TQ_UNLOCK(queue);
304fab26bfaSMatthew Dillon }
305fab26bfaSMatthew Dillon 
306fab26bfaSMatthew Dillon void
gtaskqueue_unblock(struct gtaskqueue * queue)307fab26bfaSMatthew Dillon gtaskqueue_unblock(struct gtaskqueue *queue)
308fab26bfaSMatthew Dillon {
309fab26bfaSMatthew Dillon 
310fab26bfaSMatthew Dillon 	TQ_LOCK(queue);
311fab26bfaSMatthew Dillon 	queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
312fab26bfaSMatthew Dillon 	if (!STAILQ_EMPTY(&queue->tq_queue))
313fab26bfaSMatthew Dillon 		queue->tq_enqueue(queue->tq_context);
314fab26bfaSMatthew Dillon 	TQ_UNLOCK(queue);
315fab26bfaSMatthew Dillon }
316fab26bfaSMatthew Dillon 
317fab26bfaSMatthew Dillon static void
gtaskqueue_run_locked(struct gtaskqueue * queue)318fab26bfaSMatthew Dillon gtaskqueue_run_locked(struct gtaskqueue *queue)
319fab26bfaSMatthew Dillon {
320fab26bfaSMatthew Dillon 	struct gtaskqueue_busy tb;
321fab26bfaSMatthew Dillon 	struct gtask *gtask;
322fab26bfaSMatthew Dillon #if 0
323fab26bfaSMatthew Dillon 	struct epoch_tracker et;
324fab26bfaSMatthew Dillon 	bool in_net_epoch;
325fab26bfaSMatthew Dillon #endif
326fab26bfaSMatthew Dillon 
327fab26bfaSMatthew Dillon 	KASSERT(queue != NULL, ("tq is NULL"));
328fab26bfaSMatthew Dillon 	TQ_ASSERT_LOCKED(queue);
329fab26bfaSMatthew Dillon 	tb.tb_running = NULL;
330fab26bfaSMatthew Dillon 	LIST_INSERT_HEAD(&queue->tq_active, &tb, tb_link);
331fab26bfaSMatthew Dillon #if 0
332fab26bfaSMatthew Dillon 	in_net_epoch = false;
333fab26bfaSMatthew Dillon #endif
334fab26bfaSMatthew Dillon 
335fab26bfaSMatthew Dillon 	while ((gtask = STAILQ_FIRST(&queue->tq_queue)) != NULL) {
336fab26bfaSMatthew Dillon 		STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
337fab26bfaSMatthew Dillon 		gtask->ta_flags &= ~TASK_ENQUEUED;
338fab26bfaSMatthew Dillon 		tb.tb_running = gtask;
339fab26bfaSMatthew Dillon 		tb.tb_seq = ++queue->tq_seq;
340fab26bfaSMatthew Dillon 		TQ_UNLOCK(queue);
341fab26bfaSMatthew Dillon 
342fab26bfaSMatthew Dillon 		KASSERT(gtask->ta_func != NULL, ("task->ta_func is NULL"));
343fab26bfaSMatthew Dillon #if 0
344fab26bfaSMatthew Dillon 		if (!in_net_epoch && TASK_IS_NET(gtask)) {
345fab26bfaSMatthew Dillon 			in_net_epoch = true;
346fab26bfaSMatthew Dillon 			NET_EPOCH_ENTER(et);
347fab26bfaSMatthew Dillon 		} else if (in_net_epoch && !TASK_IS_NET(gtask)) {
348fab26bfaSMatthew Dillon 			NET_EPOCH_EXIT(et);
349fab26bfaSMatthew Dillon 			in_net_epoch = false;
350fab26bfaSMatthew Dillon 		}
351fab26bfaSMatthew Dillon #endif
352fab26bfaSMatthew Dillon 		gtask->ta_func(gtask->ta_context);
353fab26bfaSMatthew Dillon 
354fab26bfaSMatthew Dillon 		TQ_LOCK(queue);
355fab26bfaSMatthew Dillon 		wakeup(gtask);
356fab26bfaSMatthew Dillon 	}
357fab26bfaSMatthew Dillon #if 0
358fab26bfaSMatthew Dillon 	if (in_net_epoch)
359fab26bfaSMatthew Dillon 		NET_EPOCH_EXIT(et);
360fab26bfaSMatthew Dillon #endif
361fab26bfaSMatthew Dillon 	LIST_REMOVE(&tb, tb_link);
362fab26bfaSMatthew Dillon }
363fab26bfaSMatthew Dillon 
364fab26bfaSMatthew Dillon static int
task_is_running(struct gtaskqueue * queue,struct gtask * gtask)365fab26bfaSMatthew Dillon task_is_running(struct gtaskqueue *queue, struct gtask *gtask)
366fab26bfaSMatthew Dillon {
367fab26bfaSMatthew Dillon 	struct gtaskqueue_busy *tb;
368fab26bfaSMatthew Dillon 
369fab26bfaSMatthew Dillon 	TQ_ASSERT_LOCKED(queue);
370fab26bfaSMatthew Dillon 	LIST_FOREACH(tb, &queue->tq_active, tb_link) {
371fab26bfaSMatthew Dillon 		if (tb->tb_running == gtask)
372fab26bfaSMatthew Dillon 			return (1);
373fab26bfaSMatthew Dillon 	}
374fab26bfaSMatthew Dillon 	return (0);
375fab26bfaSMatthew Dillon }
376fab26bfaSMatthew Dillon 
377fab26bfaSMatthew Dillon static int
gtaskqueue_cancel_locked(struct gtaskqueue * queue,struct gtask * gtask)378fab26bfaSMatthew Dillon gtaskqueue_cancel_locked(struct gtaskqueue *queue, struct gtask *gtask)
379fab26bfaSMatthew Dillon {
380fab26bfaSMatthew Dillon 
381fab26bfaSMatthew Dillon 	if (gtask->ta_flags & TASK_ENQUEUED)
382fab26bfaSMatthew Dillon 		STAILQ_REMOVE(&queue->tq_queue, gtask, gtask, ta_link);
383fab26bfaSMatthew Dillon 	gtask->ta_flags &= ~TASK_ENQUEUED;
384fab26bfaSMatthew Dillon 	return (task_is_running(queue, gtask) ? EBUSY : 0);
385fab26bfaSMatthew Dillon }
386fab26bfaSMatthew Dillon 
387fab26bfaSMatthew Dillon int
gtaskqueue_cancel(struct gtaskqueue * queue,struct gtask * gtask)388fab26bfaSMatthew Dillon gtaskqueue_cancel(struct gtaskqueue *queue, struct gtask *gtask)
389fab26bfaSMatthew Dillon {
390fab26bfaSMatthew Dillon 	int error;
391fab26bfaSMatthew Dillon 
392fab26bfaSMatthew Dillon 	TQ_LOCK(queue);
393fab26bfaSMatthew Dillon 	error = gtaskqueue_cancel_locked(queue, gtask);
394fab26bfaSMatthew Dillon 	TQ_UNLOCK(queue);
395fab26bfaSMatthew Dillon 
396fab26bfaSMatthew Dillon 	return (error);
397fab26bfaSMatthew Dillon }
398fab26bfaSMatthew Dillon 
399fab26bfaSMatthew Dillon static void
gtaskqueue_drain_locked(struct gtaskqueue * queue,struct gtask * gtask)400fab26bfaSMatthew Dillon gtaskqueue_drain_locked(struct gtaskqueue *queue, struct gtask *gtask)
401fab26bfaSMatthew Dillon {
402fab26bfaSMatthew Dillon 	while ((gtask->ta_flags & TASK_ENQUEUED) || task_is_running(queue, gtask))
403fab26bfaSMatthew Dillon 		TQ_SLEEP(queue, gtask, "gtq_drain");
404fab26bfaSMatthew Dillon }
405fab26bfaSMatthew Dillon 
406fab26bfaSMatthew Dillon void
gtaskqueue_drain(struct gtaskqueue * queue,struct gtask * gtask)407fab26bfaSMatthew Dillon gtaskqueue_drain(struct gtaskqueue *queue, struct gtask *gtask)
408fab26bfaSMatthew Dillon {
409fab26bfaSMatthew Dillon 	TQ_LOCK(queue);
410fab26bfaSMatthew Dillon 	gtaskqueue_drain_locked(queue, gtask);
411fab26bfaSMatthew Dillon 	TQ_UNLOCK(queue);
412fab26bfaSMatthew Dillon }
413fab26bfaSMatthew Dillon 
414fab26bfaSMatthew Dillon void
gtaskqueue_drain_all(struct gtaskqueue * queue)415fab26bfaSMatthew Dillon gtaskqueue_drain_all(struct gtaskqueue *queue)
416fab26bfaSMatthew Dillon {
417fab26bfaSMatthew Dillon 
418fab26bfaSMatthew Dillon 	TQ_LOCK(queue);
419fab26bfaSMatthew Dillon 	gtaskqueue_drain_tq_queue(queue);
420fab26bfaSMatthew Dillon 	gtaskqueue_drain_tq_active(queue);
421fab26bfaSMatthew Dillon 	TQ_UNLOCK(queue);
422fab26bfaSMatthew Dillon }
423fab26bfaSMatthew Dillon 
424*5694b1afSSascha Wildner static int __printflike(4, 0)
_gtaskqueue_start_threads(struct gtaskqueue ** tqp,int count,int pri,const char * name,__va_list ap)425fab26bfaSMatthew Dillon _gtaskqueue_start_threads(struct gtaskqueue **tqp, int count, int pri,
426fab26bfaSMatthew Dillon 			  const char *name, __va_list ap)
427fab26bfaSMatthew Dillon {
428fab26bfaSMatthew Dillon 	char ktname[MAXCOMLEN + 1];
429fab26bfaSMatthew Dillon 	struct thread *td;
430fab26bfaSMatthew Dillon 	struct gtaskqueue *tq;
431fab26bfaSMatthew Dillon 	int i, error;
432fab26bfaSMatthew Dillon 
433fab26bfaSMatthew Dillon 	if (count <= 0)
434fab26bfaSMatthew Dillon 		return (EINVAL);
435fab26bfaSMatthew Dillon 
436fab26bfaSMatthew Dillon 	kvsnprintf(ktname, sizeof(ktname), name, ap);
437fab26bfaSMatthew Dillon 	tq = *tqp;
438fab26bfaSMatthew Dillon 
439fab26bfaSMatthew Dillon 	tq->tq_threads = kmalloc(sizeof(struct thread *) * count,
440fab26bfaSMatthew Dillon 				 M_GTASKQUEUE, M_WAITOK | M_ZERO);
441fab26bfaSMatthew Dillon 
442fab26bfaSMatthew Dillon 	for (i = 0; i < count; i++) {
443fab26bfaSMatthew Dillon 		int cpu = i % ncpus;
444fab26bfaSMatthew Dillon 		if (count == 1) {
445fab26bfaSMatthew Dillon 			error = lwkt_create(gtaskqueue_thread_loop, tqp,
446fab26bfaSMatthew Dillon 					    &tq->tq_threads[i], NULL,
447fab26bfaSMatthew Dillon 					    TDF_NOSTART, cpu,
448fab26bfaSMatthew Dillon 					    "%s", ktname);
449fab26bfaSMatthew Dillon 		} else {
450fab26bfaSMatthew Dillon 			error = lwkt_create(gtaskqueue_thread_loop, tqp,
451fab26bfaSMatthew Dillon 					    &tq->tq_threads[i], NULL,
452fab26bfaSMatthew Dillon 					    TDF_NOSTART, cpu,
453fab26bfaSMatthew Dillon 					    "%s_%d", ktname, i);
454fab26bfaSMatthew Dillon 		}
455fab26bfaSMatthew Dillon 		if (error) {
456fab26bfaSMatthew Dillon 			/* should be ok to continue, taskqueue_free will dtrt */
457fab26bfaSMatthew Dillon 			kprintf("%s: lwkt_create(%s): error %d",
458fab26bfaSMatthew Dillon 				__func__, ktname, error);
459fab26bfaSMatthew Dillon 			tq->tq_threads[i] = NULL;		/* paranoid */
460fab26bfaSMatthew Dillon 		} else
461fab26bfaSMatthew Dillon 			tq->tq_tcount++;
462fab26bfaSMatthew Dillon 	}
463fab26bfaSMatthew Dillon 	for (i = 0; i < count; i++) {
464fab26bfaSMatthew Dillon 		if (tq->tq_threads[i] == NULL)
465fab26bfaSMatthew Dillon 			continue;
466fab26bfaSMatthew Dillon 		td = tq->tq_threads[i];
467fab26bfaSMatthew Dillon 		lwkt_setpri_initial(td, pri);
468fab26bfaSMatthew Dillon 		lwkt_schedule(td);
469fab26bfaSMatthew Dillon 	}
470fab26bfaSMatthew Dillon 
471fab26bfaSMatthew Dillon 	return (0);
472fab26bfaSMatthew Dillon }
473fab26bfaSMatthew Dillon 
474*5694b1afSSascha Wildner static int __printflike(4, 5)
gtaskqueue_start_threads(struct gtaskqueue ** tqp,int count,int pri,const char * name,...)475fab26bfaSMatthew Dillon gtaskqueue_start_threads(struct gtaskqueue **tqp, int count, int pri,
476fab26bfaSMatthew Dillon 			 const char *name, ...)
477fab26bfaSMatthew Dillon {
478fab26bfaSMatthew Dillon 	__va_list ap;
479fab26bfaSMatthew Dillon 	int error;
480fab26bfaSMatthew Dillon 
481fab26bfaSMatthew Dillon 	__va_start(ap, name);
482fab26bfaSMatthew Dillon 	error = _gtaskqueue_start_threads(tqp, count, pri, name, ap);
483fab26bfaSMatthew Dillon 	__va_end(ap);
484fab26bfaSMatthew Dillon 	return (error);
485fab26bfaSMatthew Dillon }
486fab26bfaSMatthew Dillon 
487fab26bfaSMatthew Dillon #if 0
488fab26bfaSMatthew Dillon static inline void
489fab26bfaSMatthew Dillon gtaskqueue_run_callback(struct gtaskqueue *tq,
490fab26bfaSMatthew Dillon     enum taskqueue_callback_type cb_type)
491fab26bfaSMatthew Dillon {
492fab26bfaSMatthew Dillon 	taskqueue_callback_fn tq_callback;
493fab26bfaSMatthew Dillon 
494fab26bfaSMatthew Dillon 	TQ_ASSERT_UNLOCKED(tq);
495fab26bfaSMatthew Dillon 	tq_callback = tq->tq_callbacks[cb_type];
496fab26bfaSMatthew Dillon 	if (tq_callback != NULL)
497fab26bfaSMatthew Dillon 		tq_callback(tq->tq_cb_contexts[cb_type]);
498fab26bfaSMatthew Dillon }
499fab26bfaSMatthew Dillon #endif
500fab26bfaSMatthew Dillon 
501fab26bfaSMatthew Dillon static void
gtaskqueue_thread_loop(void * arg)502fab26bfaSMatthew Dillon gtaskqueue_thread_loop(void *arg)
503fab26bfaSMatthew Dillon {
504fab26bfaSMatthew Dillon 	struct gtaskqueue **tqp, *tq;
505fab26bfaSMatthew Dillon 
506fab26bfaSMatthew Dillon 	tqp = arg;
507fab26bfaSMatthew Dillon 	tq = *tqp;
508fab26bfaSMatthew Dillon #if 0
509fab26bfaSMatthew Dillon 	gtaskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT);
510fab26bfaSMatthew Dillon #endif
511fab26bfaSMatthew Dillon 	TQ_LOCK(tq);
512fab26bfaSMatthew Dillon 	while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
513fab26bfaSMatthew Dillon 		/* XXX ? */
514fab26bfaSMatthew Dillon 		gtaskqueue_run_locked(tq);
515fab26bfaSMatthew Dillon 		/*
516fab26bfaSMatthew Dillon 		 * Because taskqueue_run() can drop tq_mutex, we need to
517fab26bfaSMatthew Dillon 		 * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
518fab26bfaSMatthew Dillon 		 * meantime, which means we missed a wakeup.
519fab26bfaSMatthew Dillon 		 */
520fab26bfaSMatthew Dillon 		if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0)
521fab26bfaSMatthew Dillon 			break;
522fab26bfaSMatthew Dillon 		TQ_SLEEP(tq, tq, "-");
523fab26bfaSMatthew Dillon 	}
524fab26bfaSMatthew Dillon 	gtaskqueue_run_locked(tq);
525fab26bfaSMatthew Dillon 	/*
526fab26bfaSMatthew Dillon 	 * This thread is on its way out, so just drop the lock temporarily
527fab26bfaSMatthew Dillon 	 * in order to call the shutdown callback.  This allows the callback
528fab26bfaSMatthew Dillon 	 * to look at the taskqueue, even just before it dies.
529fab26bfaSMatthew Dillon 	 */
530fab26bfaSMatthew Dillon #if 0
531fab26bfaSMatthew Dillon 	TQ_UNLOCK(tq);
532fab26bfaSMatthew Dillon 	gtaskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN);
533fab26bfaSMatthew Dillon 	TQ_LOCK(tq);
534fab26bfaSMatthew Dillon #endif
535fab26bfaSMatthew Dillon 
536fab26bfaSMatthew Dillon 	/* rendezvous with thread that asked us to terminate */
537fab26bfaSMatthew Dillon 	tq->tq_tcount--;
538fab26bfaSMatthew Dillon 	wakeup_one(tq->tq_threads);
539fab26bfaSMatthew Dillon 	TQ_UNLOCK(tq);
540fab26bfaSMatthew Dillon 	lwkt_exit();
541fab26bfaSMatthew Dillon }
542fab26bfaSMatthew Dillon 
543fab26bfaSMatthew Dillon static void
gtaskqueue_thread_enqueue(void * context)544fab26bfaSMatthew Dillon gtaskqueue_thread_enqueue(void *context)
545fab26bfaSMatthew Dillon {
546fab26bfaSMatthew Dillon 	struct gtaskqueue **tqp, *tq;
547fab26bfaSMatthew Dillon 
548fab26bfaSMatthew Dillon 	tqp = context;
549fab26bfaSMatthew Dillon 	tq = *tqp;
550fab26bfaSMatthew Dillon 	wakeup_one(tq);
551fab26bfaSMatthew Dillon }
552fab26bfaSMatthew Dillon 
553fab26bfaSMatthew Dillon /*
554fab26bfaSMatthew Dillon  * NOTE: FreeBSD uses MTX_SPIN locks, which doesn't make a whole lot
555fab26bfaSMatthew Dillon  *	 of sense (over-use of spin-locks in general). In DFly we
556fab26bfaSMatthew Dillon  *	 want to use blockable locks for almost everything.
557fab26bfaSMatthew Dillon  */
558fab26bfaSMatthew Dillon static struct gtaskqueue *
gtaskqueue_create_fast(const char * name,int mflags,taskqueue_enqueue_fn enqueue,void * context)559fab26bfaSMatthew Dillon gtaskqueue_create_fast(const char *name, int mflags,
560fab26bfaSMatthew Dillon 		       taskqueue_enqueue_fn enqueue, void *context)
561fab26bfaSMatthew Dillon {
562fab26bfaSMatthew Dillon 	return _gtaskqueue_create(name, mflags, enqueue, context,
563fab26bfaSMatthew Dillon 				  0, "fast_taskqueue");
564fab26bfaSMatthew Dillon }
565fab26bfaSMatthew Dillon 
566fab26bfaSMatthew Dillon struct taskqgroup_cpu {
567fab26bfaSMatthew Dillon 	LIST_HEAD(, grouptask) tgc_tasks;
568fab26bfaSMatthew Dillon 	struct gtaskqueue *tgc_taskq;
569fab26bfaSMatthew Dillon 	int		tgc_cnt;
570fab26bfaSMatthew Dillon 	int		tgc_cpu;
571fab26bfaSMatthew Dillon };
572fab26bfaSMatthew Dillon 
573fab26bfaSMatthew Dillon struct taskqgroup {
574fab26bfaSMatthew Dillon 	struct taskqgroup_cpu tqg_queue[MAXCPU];
575fab26bfaSMatthew Dillon 	struct lock	tqg_lock;
576fab26bfaSMatthew Dillon 	const char *	tqg_name;
577fab26bfaSMatthew Dillon 	int		tqg_cnt;
578fab26bfaSMatthew Dillon };
579fab26bfaSMatthew Dillon 
580fab26bfaSMatthew Dillon struct taskq_bind_task {
581fab26bfaSMatthew Dillon 	struct gtask bt_task;
582fab26bfaSMatthew Dillon 	int	bt_cpuid;
583fab26bfaSMatthew Dillon };
584fab26bfaSMatthew Dillon 
585fab26bfaSMatthew Dillon static void
taskqgroup_cpu_create(struct taskqgroup * qgroup,int idx,int cpu)586fab26bfaSMatthew Dillon taskqgroup_cpu_create(struct taskqgroup *qgroup, int idx, int cpu)
587fab26bfaSMatthew Dillon {
588fab26bfaSMatthew Dillon 	struct taskqgroup_cpu *qcpu;
589fab26bfaSMatthew Dillon 
590fab26bfaSMatthew Dillon 	qcpu = &qgroup->tqg_queue[idx];
591fab26bfaSMatthew Dillon 	LIST_INIT(&qcpu->tgc_tasks);
592fab26bfaSMatthew Dillon 	qcpu->tgc_taskq = gtaskqueue_create_fast(NULL, M_WAITOK,
593fab26bfaSMatthew Dillon 						 gtaskqueue_thread_enqueue,
594fab26bfaSMatthew Dillon 						 &qcpu->tgc_taskq);
595fab26bfaSMatthew Dillon 	gtaskqueue_start_threads(&qcpu->tgc_taskq, 1, TDPRI_KERN_DAEMON,
596fab26bfaSMatthew Dillon 				 "%s_%d", qgroup->tqg_name, idx);
597fab26bfaSMatthew Dillon 	qcpu->tgc_cpu = cpu;
598fab26bfaSMatthew Dillon }
599fab26bfaSMatthew Dillon 
600fab26bfaSMatthew Dillon /*
601fab26bfaSMatthew Dillon  * Find the taskq with least # of tasks that doesn't currently have any
602fab26bfaSMatthew Dillon  * other queues from the uniq identifier.
603fab26bfaSMatthew Dillon  */
604fab26bfaSMatthew Dillon static int
taskqgroup_find(struct taskqgroup * qgroup,void * uniq)605fab26bfaSMatthew Dillon taskqgroup_find(struct taskqgroup *qgroup, void *uniq)
606fab26bfaSMatthew Dillon {
607fab26bfaSMatthew Dillon 	struct grouptask *n;
608fab26bfaSMatthew Dillon 	int i, idx, mincnt;
609fab26bfaSMatthew Dillon 	int strict;
610fab26bfaSMatthew Dillon 
611fab26bfaSMatthew Dillon 	KKASSERT(lockstatus(&qgroup->tqg_lock, NULL) != 0);
612fab26bfaSMatthew Dillon 	KASSERT(qgroup->tqg_cnt != 0,
613fab26bfaSMatthew Dillon 	    ("qgroup %s has no queues", qgroup->tqg_name));
614fab26bfaSMatthew Dillon 
615fab26bfaSMatthew Dillon 	/*
616fab26bfaSMatthew Dillon 	 * Two passes: first scan for a queue with the least tasks that
617fab26bfaSMatthew Dillon 	 * does not already service this uniq id.  If that fails simply find
618fab26bfaSMatthew Dillon 	 * the queue with the least total tasks.
619fab26bfaSMatthew Dillon 	 */
620fab26bfaSMatthew Dillon 	for (idx = -1, mincnt = INT_MAX, strict = 1; mincnt == INT_MAX;
621fab26bfaSMatthew Dillon 	    strict = 0) {
622fab26bfaSMatthew Dillon 		for (i = 0; i < qgroup->tqg_cnt; i++) {
623fab26bfaSMatthew Dillon 			if (qgroup->tqg_queue[i].tgc_cnt > mincnt)
624fab26bfaSMatthew Dillon 				continue;
625fab26bfaSMatthew Dillon 			if (strict) {
626fab26bfaSMatthew Dillon 				LIST_FOREACH(n, &qgroup->tqg_queue[i].tgc_tasks,
627fab26bfaSMatthew Dillon 				    gt_list)
628fab26bfaSMatthew Dillon 					if (n->gt_uniq == uniq)
629fab26bfaSMatthew Dillon 						break;
630fab26bfaSMatthew Dillon 				if (n != NULL)
631fab26bfaSMatthew Dillon 					continue;
632fab26bfaSMatthew Dillon 			}
633fab26bfaSMatthew Dillon 			mincnt = qgroup->tqg_queue[i].tgc_cnt;
634fab26bfaSMatthew Dillon 			idx = i;
635fab26bfaSMatthew Dillon 		}
636fab26bfaSMatthew Dillon 	}
637fab26bfaSMatthew Dillon 	if (idx == -1)
638fab26bfaSMatthew Dillon 		panic("%s: failed to pick a qid.", __func__);
639fab26bfaSMatthew Dillon 
640fab26bfaSMatthew Dillon 	return (idx);
641fab26bfaSMatthew Dillon }
642fab26bfaSMatthew Dillon 
643fab26bfaSMatthew Dillon void
taskqgroup_attach(struct taskqgroup * qgroup,struct grouptask * gtask,void * uniq,device_t dev,struct resource * irq,const char * name)644fab26bfaSMatthew Dillon taskqgroup_attach(struct taskqgroup *qgroup, struct grouptask *gtask,
645fab26bfaSMatthew Dillon     void *uniq, device_t dev, struct resource *irq, const char *name)
646fab26bfaSMatthew Dillon {
647fab26bfaSMatthew Dillon 	int cpu, qid, error;
648fab26bfaSMatthew Dillon 
649fab26bfaSMatthew Dillon 	KASSERT(qgroup->tqg_cnt > 0,
650fab26bfaSMatthew Dillon 	    ("qgroup %s has no queues", qgroup->tqg_name));
651fab26bfaSMatthew Dillon 
652fab26bfaSMatthew Dillon 	gtask->gt_uniq = uniq;
653fab26bfaSMatthew Dillon 	ksnprintf(gtask->gt_name, GROUPTASK_NAMELEN, "%s", name ? name : "grouptask");
654fab26bfaSMatthew Dillon 	gtask->gt_dev = dev;
655fab26bfaSMatthew Dillon 	gtask->gt_irq = irq;
656fab26bfaSMatthew Dillon 	gtask->gt_cpu = -1;
657fab26bfaSMatthew Dillon 	lockmgr(&qgroup->tqg_lock, LK_EXCLUSIVE);
658fab26bfaSMatthew Dillon 	qid = taskqgroup_find(qgroup, uniq);
659fab26bfaSMatthew Dillon 	qgroup->tqg_queue[qid].tgc_cnt++;
660fab26bfaSMatthew Dillon 	LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
661fab26bfaSMatthew Dillon 	gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
662fab26bfaSMatthew Dillon 	if (dev != NULL && irq != NULL) {
663fab26bfaSMatthew Dillon 		cpu = qgroup->tqg_queue[qid].tgc_cpu;
664fab26bfaSMatthew Dillon 		gtask->gt_cpu = cpu;
665fab26bfaSMatthew Dillon 		lockmgr(&qgroup->tqg_lock, LK_RELEASE);
666fab26bfaSMatthew Dillon #if 0
667fab26bfaSMatthew Dillon 		/*
668fab26bfaSMatthew Dillon 		 * XXX FreeBSD created a mess by separating out the cpu
669fab26bfaSMatthew Dillon 		 * binding from bus_setup_intr().  Punt for now.
670fab26bfaSMatthew Dillon 		 */
671fab26bfaSMatthew Dillon 		error = bus_bind_intr(dev, irq, cpu);
672fab26bfaSMatthew Dillon #endif
673fab26bfaSMatthew Dillon 		error = 0;
674fab26bfaSMatthew Dillon 
675fab26bfaSMatthew Dillon 		if (error)
676fab26bfaSMatthew Dillon 			kprintf("%s: binding interrupt failed for %s: %d\n",
677fab26bfaSMatthew Dillon 			    __func__, gtask->gt_name, error);
678fab26bfaSMatthew Dillon 	} else {
679fab26bfaSMatthew Dillon 		lockmgr(&qgroup->tqg_lock, LK_RELEASE);
680fab26bfaSMatthew Dillon 	}
681fab26bfaSMatthew Dillon }
682fab26bfaSMatthew Dillon 
683fab26bfaSMatthew Dillon int
taskqgroup_attach_cpu(struct taskqgroup * qgroup,struct grouptask * gtask,void * uniq,int cpu,device_t dev,struct resource * irq,const char * name)684fab26bfaSMatthew Dillon taskqgroup_attach_cpu(struct taskqgroup *qgroup, struct grouptask *gtask,
685fab26bfaSMatthew Dillon     void *uniq, int cpu, device_t dev, struct resource *irq, const char *name)
686fab26bfaSMatthew Dillon {
687fab26bfaSMatthew Dillon 	int i, qid, error;
688fab26bfaSMatthew Dillon 
689fab26bfaSMatthew Dillon 	gtask->gt_uniq = uniq;
690fab26bfaSMatthew Dillon 	ksnprintf(gtask->gt_name, GROUPTASK_NAMELEN, "%s", name ? name : "grouptask");
691fab26bfaSMatthew Dillon 	gtask->gt_dev = dev;
692fab26bfaSMatthew Dillon 	gtask->gt_irq = irq;
693fab26bfaSMatthew Dillon 	gtask->gt_cpu = cpu;
694fab26bfaSMatthew Dillon 	lockmgr(&qgroup->tqg_lock, LK_EXCLUSIVE);
695fab26bfaSMatthew Dillon 	for (i = 0, qid = -1; i < qgroup->tqg_cnt; i++) {
696fab26bfaSMatthew Dillon 		if (qgroup->tqg_queue[i].tgc_cpu == cpu) {
697fab26bfaSMatthew Dillon 			qid = i;
698fab26bfaSMatthew Dillon 			break;
699fab26bfaSMatthew Dillon 		}
700fab26bfaSMatthew Dillon 	}
701fab26bfaSMatthew Dillon 	if (qid == -1) {
702fab26bfaSMatthew Dillon 		lockmgr(&qgroup->tqg_lock, LK_RELEASE);
703fab26bfaSMatthew Dillon 		kprintf("%s: qid not found for %s cpu=%d\n",
704fab26bfaSMatthew Dillon 			__func__, gtask->gt_name, cpu);
705fab26bfaSMatthew Dillon 		return (EINVAL);
706fab26bfaSMatthew Dillon 	}
707fab26bfaSMatthew Dillon 	qgroup->tqg_queue[qid].tgc_cnt++;
708fab26bfaSMatthew Dillon 	LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
709fab26bfaSMatthew Dillon 	gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
710fab26bfaSMatthew Dillon 	cpu = qgroup->tqg_queue[qid].tgc_cpu;
711fab26bfaSMatthew Dillon 	lockmgr(&qgroup->tqg_lock, LK_RELEASE);
712fab26bfaSMatthew Dillon 
713fab26bfaSMatthew Dillon 	if (dev != NULL && irq != NULL) {
714fab26bfaSMatthew Dillon #if 0
715fab26bfaSMatthew Dillon 		/*
716fab26bfaSMatthew Dillon 		 * XXX FreeBSD created a mess by separating out the cpu
717fab26bfaSMatthew Dillon 		 * binding from bus_setup_intr().  Punt for now.
718fab26bfaSMatthew Dillon 		 */
719fab26bfaSMatthew Dillon 		error = bus_bind_intr(dev, irq, cpu);
720fab26bfaSMatthew Dillon #endif
721fab26bfaSMatthew Dillon 		error = 0;
722fab26bfaSMatthew Dillon 
723fab26bfaSMatthew Dillon 		if (error) {
724fab26bfaSMatthew Dillon 			kprintf("%s: binding interrupt failed for %s: %d\n",
725fab26bfaSMatthew Dillon 			    __func__, gtask->gt_name, error);
726fab26bfaSMatthew Dillon 		}
727fab26bfaSMatthew Dillon 	}
728fab26bfaSMatthew Dillon 	return (0);
729fab26bfaSMatthew Dillon }
730fab26bfaSMatthew Dillon 
731fab26bfaSMatthew Dillon void
taskqgroup_detach(struct taskqgroup * qgroup,struct grouptask * gtask)732fab26bfaSMatthew Dillon taskqgroup_detach(struct taskqgroup *qgroup, struct grouptask *gtask)
733fab26bfaSMatthew Dillon {
734fab26bfaSMatthew Dillon 	int i;
735fab26bfaSMatthew Dillon 
736fab26bfaSMatthew Dillon 	grouptask_block(gtask);
737fab26bfaSMatthew Dillon 	lockmgr(&qgroup->tqg_lock, LK_EXCLUSIVE);
738fab26bfaSMatthew Dillon 	for (i = 0; i < qgroup->tqg_cnt; i++)
739fab26bfaSMatthew Dillon 		if (qgroup->tqg_queue[i].tgc_taskq == gtask->gt_taskqueue)
740fab26bfaSMatthew Dillon 			break;
741fab26bfaSMatthew Dillon 	if (i == qgroup->tqg_cnt)
742fab26bfaSMatthew Dillon 		panic("%s: task %s not in group", __func__, gtask->gt_name);
743fab26bfaSMatthew Dillon 	qgroup->tqg_queue[i].tgc_cnt--;
744fab26bfaSMatthew Dillon 	LIST_REMOVE(gtask, gt_list);
745fab26bfaSMatthew Dillon 	lockmgr(&qgroup->tqg_lock, LK_RELEASE);
746fab26bfaSMatthew Dillon 	gtask->gt_taskqueue = NULL;
747fab26bfaSMatthew Dillon 	gtask->gt_task.ta_flags &= ~TASK_NOENQUEUE;
748fab26bfaSMatthew Dillon }
749fab26bfaSMatthew Dillon 
750fab26bfaSMatthew Dillon static void
taskqgroup_binder(void * ctx)751fab26bfaSMatthew Dillon taskqgroup_binder(void *ctx)
752fab26bfaSMatthew Dillon {
753fab26bfaSMatthew Dillon 	struct taskq_bind_task *gtask;
754fab26bfaSMatthew Dillon 
755fab26bfaSMatthew Dillon 	gtask = ctx;
756fab26bfaSMatthew Dillon 	lwkt_migratecpu(gtask->bt_cpuid);
757fab26bfaSMatthew Dillon 	kfree(gtask, M_DEVBUF);
758fab26bfaSMatthew Dillon }
759fab26bfaSMatthew Dillon 
760fab26bfaSMatthew Dillon void
taskqgroup_bind(struct taskqgroup * qgroup)761fab26bfaSMatthew Dillon taskqgroup_bind(struct taskqgroup *qgroup)
762fab26bfaSMatthew Dillon {
763fab26bfaSMatthew Dillon 	struct taskq_bind_task *gtask;
764fab26bfaSMatthew Dillon 	int i;
765fab26bfaSMatthew Dillon 
766fab26bfaSMatthew Dillon 	/*
767fab26bfaSMatthew Dillon 	 * Bind taskqueue threads to specific CPUs, if they have been assigned
768fab26bfaSMatthew Dillon 	 * one.
769fab26bfaSMatthew Dillon 	 */
770fab26bfaSMatthew Dillon 	if (qgroup->tqg_cnt == 1)
771fab26bfaSMatthew Dillon 		return;
772fab26bfaSMatthew Dillon 
773fab26bfaSMatthew Dillon 	for (i = 0; i < qgroup->tqg_cnt; i++) {
774fab26bfaSMatthew Dillon 		gtask = kmalloc(sizeof(*gtask), M_DEVBUF, M_WAITOK);
775fab26bfaSMatthew Dillon 		GTASK_INIT(&gtask->bt_task, 0, 0, taskqgroup_binder, gtask);
776fab26bfaSMatthew Dillon 		gtask->bt_cpuid = qgroup->tqg_queue[i].tgc_cpu;
777fab26bfaSMatthew Dillon 		grouptaskqueue_enqueue(qgroup->tqg_queue[i].tgc_taskq,
778fab26bfaSMatthew Dillon 				       &gtask->bt_task);
779fab26bfaSMatthew Dillon 	}
780fab26bfaSMatthew Dillon }
781fab26bfaSMatthew Dillon 
782fab26bfaSMatthew Dillon struct taskqgroup *
taskqgroup_create(const char * name,int cnt,int stride)783fab26bfaSMatthew Dillon taskqgroup_create(const char *name, int cnt, int stride)
784fab26bfaSMatthew Dillon {
785fab26bfaSMatthew Dillon 	struct taskqgroup *qgroup;
786fab26bfaSMatthew Dillon 	int cpu, i, j;
787fab26bfaSMatthew Dillon 
788fab26bfaSMatthew Dillon 	qgroup = kmalloc(sizeof(*qgroup), M_GTASKQUEUE, M_WAITOK | M_ZERO);
789fab26bfaSMatthew Dillon 	lockinit(&qgroup->tqg_lock, "taskqgroup", 0, 0);
790fab26bfaSMatthew Dillon 	qgroup->tqg_name = name;
791fab26bfaSMatthew Dillon 	qgroup->tqg_cnt = cnt;
792fab26bfaSMatthew Dillon 
793fab26bfaSMatthew Dillon 	for (cpu = i = 0; i < cnt; i++) {
794fab26bfaSMatthew Dillon 		taskqgroup_cpu_create(qgroup, i, cpu);
795fab26bfaSMatthew Dillon 		for (j = 0; j < stride; j++)
796fab26bfaSMatthew Dillon 			cpu = (cpu + 1) % ncpus;
797fab26bfaSMatthew Dillon 	}
798fab26bfaSMatthew Dillon 	return (qgroup);
799fab26bfaSMatthew Dillon }
800fab26bfaSMatthew Dillon 
801fab26bfaSMatthew Dillon void
taskqgroup_destroy(struct taskqgroup * qgroup)802fab26bfaSMatthew Dillon taskqgroup_destroy(struct taskqgroup *qgroup)
803fab26bfaSMatthew Dillon {
804fab26bfaSMatthew Dillon }
805fab26bfaSMatthew Dillon 
806fab26bfaSMatthew Dillon void
taskqgroup_drain_all(struct taskqgroup * tqg)807fab26bfaSMatthew Dillon taskqgroup_drain_all(struct taskqgroup *tqg)
808fab26bfaSMatthew Dillon {
809fab26bfaSMatthew Dillon 	struct gtaskqueue *q;
810fab26bfaSMatthew Dillon 
811fab26bfaSMatthew Dillon 	for (int i = 0; i < ncpus; i++) {
812fab26bfaSMatthew Dillon 		q = tqg->tqg_queue[i].tgc_taskq;
813fab26bfaSMatthew Dillon 		if (q == NULL)
814fab26bfaSMatthew Dillon 			continue;
815fab26bfaSMatthew Dillon 		gtaskqueue_drain_all(q);
816fab26bfaSMatthew Dillon 	}
817fab26bfaSMatthew Dillon }
818