xref: /freebsd/sys/kern/subr_gtaskqueue.c (revision e1f33670)
123ac9029SStephen Hurd /*-
223ac9029SStephen Hurd  * Copyright (c) 2000 Doug Rabson
323ac9029SStephen Hurd  * Copyright (c) 2014 Jeff Roberson
423ac9029SStephen Hurd  * Copyright (c) 2016 Matthew Macy
523ac9029SStephen Hurd  * All rights reserved.
623ac9029SStephen Hurd  *
723ac9029SStephen Hurd  * Redistribution and use in source and binary forms, with or without
823ac9029SStephen Hurd  * modification, are permitted provided that the following conditions
923ac9029SStephen Hurd  * are met:
1023ac9029SStephen Hurd  * 1. Redistributions of source code must retain the above copyright
1123ac9029SStephen Hurd  *    notice, this list of conditions and the following disclaimer.
1223ac9029SStephen Hurd  * 2. Redistributions in binary form must reproduce the above copyright
1323ac9029SStephen Hurd  *    notice, this list of conditions and the following disclaimer in the
1423ac9029SStephen Hurd  *    documentation and/or other materials provided with the distribution.
1523ac9029SStephen Hurd  *
1623ac9029SStephen Hurd  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1723ac9029SStephen Hurd  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1823ac9029SStephen Hurd  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1923ac9029SStephen Hurd  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
2023ac9029SStephen Hurd  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2123ac9029SStephen Hurd  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2223ac9029SStephen Hurd  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2323ac9029SStephen Hurd  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2423ac9029SStephen Hurd  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2523ac9029SStephen Hurd  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2623ac9029SStephen Hurd  * SUCH DAMAGE.
2723ac9029SStephen Hurd  */
2823ac9029SStephen Hurd 
2923ac9029SStephen Hurd #include <sys/param.h>
3023ac9029SStephen Hurd #include <sys/systm.h>
3123ac9029SStephen Hurd #include <sys/bus.h>
3223ac9029SStephen Hurd #include <sys/cpuset.h>
3323ac9029SStephen Hurd #include <sys/kernel.h>
3423ac9029SStephen Hurd #include <sys/kthread.h>
3523ac9029SStephen Hurd #include <sys/libkern.h>
3623ac9029SStephen Hurd #include <sys/limits.h>
3723ac9029SStephen Hurd #include <sys/lock.h>
3823ac9029SStephen Hurd #include <sys/malloc.h>
3923ac9029SStephen Hurd #include <sys/mutex.h>
4023ac9029SStephen Hurd #include <sys/proc.h>
414426b2e6SGleb Smirnoff #include <sys/epoch.h>
4223ac9029SStephen Hurd #include <sys/sched.h>
4323ac9029SStephen Hurd #include <sys/smp.h>
4423ac9029SStephen Hurd #include <sys/gtaskqueue.h>
4523ac9029SStephen Hurd #include <sys/unistd.h>
4623ac9029SStephen Hurd #include <machine/stdarg.h>
4723ac9029SStephen Hurd 
48a0fcc371SStephen Hurd static MALLOC_DEFINE(M_GTASKQUEUE, "gtaskqueue", "Group Task Queues");
4923ac9029SStephen Hurd static void	gtaskqueue_thread_enqueue(void *);
5023ac9029SStephen Hurd static void	gtaskqueue_thread_loop(void *arg);
515201e0f1SStephen Hurd static int	task_is_running(struct gtaskqueue *queue, struct gtask *gtask);
525201e0f1SStephen Hurd static void	gtaskqueue_drain_locked(struct gtaskqueue *queue, struct gtask *gtask);
53ab2e3f79SStephen Hurd 
54ab2e3f79SStephen Hurd TASKQGROUP_DEFINE(softirq, mp_ncpus, 1);
55d945ed64SSean Bruno 
5623ac9029SStephen Hurd struct gtaskqueue_busy {
5723ac9029SStephen Hurd 	struct gtask		*tb_running;
583db35ffaSAlexander Motin 	u_int			 tb_seq;
593db35ffaSAlexander Motin 	LIST_ENTRY(gtaskqueue_busy) tb_link;
6023ac9029SStephen Hurd };
6123ac9029SStephen Hurd 
62f855ec81SMarius Strobl typedef void (*gtaskqueue_enqueue_fn)(void *context);
63f855ec81SMarius Strobl 
6423ac9029SStephen Hurd struct gtaskqueue {
6523ac9029SStephen Hurd 	STAILQ_HEAD(, gtask)	tq_queue;
663db35ffaSAlexander Motin 	LIST_HEAD(, gtaskqueue_busy) tq_active;
673db35ffaSAlexander Motin 	u_int			tq_seq;
683db35ffaSAlexander Motin 	int			tq_callouts;
693db35ffaSAlexander Motin 	struct mtx_padalign	tq_mutex;
7023ac9029SStephen Hurd 	gtaskqueue_enqueue_fn	tq_enqueue;
7123ac9029SStephen Hurd 	void			*tq_context;
7223ac9029SStephen Hurd 	char			*tq_name;
7323ac9029SStephen Hurd 	struct thread		**tq_threads;
7423ac9029SStephen Hurd 	int			tq_tcount;
7523ac9029SStephen Hurd 	int			tq_spin;
7623ac9029SStephen Hurd 	int			tq_flags;
7723ac9029SStephen Hurd 	taskqueue_callback_fn	tq_callbacks[TASKQUEUE_NUM_CALLBACKS];
7823ac9029SStephen Hurd 	void			*tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS];
7923ac9029SStephen Hurd };
8023ac9029SStephen Hurd 
8123ac9029SStephen Hurd #define	TQ_FLAGS_ACTIVE		(1 << 0)
8223ac9029SStephen Hurd #define	TQ_FLAGS_BLOCKED	(1 << 1)
8323ac9029SStephen Hurd #define	TQ_FLAGS_UNLOCKED_ENQUEUE	(1 << 2)
8423ac9029SStephen Hurd 
8523ac9029SStephen Hurd #define	DT_CALLOUT_ARMED	(1 << 0)
8623ac9029SStephen Hurd 
8723ac9029SStephen Hurd #define	TQ_LOCK(tq)							\
8823ac9029SStephen Hurd 	do {								\
8923ac9029SStephen Hurd 		if ((tq)->tq_spin)					\
9023ac9029SStephen Hurd 			mtx_lock_spin(&(tq)->tq_mutex);			\
9123ac9029SStephen Hurd 		else							\
9223ac9029SStephen Hurd 			mtx_lock(&(tq)->tq_mutex);			\
9323ac9029SStephen Hurd 	} while (0)
9423ac9029SStephen Hurd #define	TQ_ASSERT_LOCKED(tq)	mtx_assert(&(tq)->tq_mutex, MA_OWNED)
9523ac9029SStephen Hurd 
9623ac9029SStephen Hurd #define	TQ_UNLOCK(tq)							\
9723ac9029SStephen Hurd 	do {								\
9823ac9029SStephen Hurd 		if ((tq)->tq_spin)					\
9923ac9029SStephen Hurd 			mtx_unlock_spin(&(tq)->tq_mutex);		\
10023ac9029SStephen Hurd 		else							\
10123ac9029SStephen Hurd 			mtx_unlock(&(tq)->tq_mutex);			\
10223ac9029SStephen Hurd 	} while (0)
10323ac9029SStephen Hurd #define	TQ_ASSERT_UNLOCKED(tq)	mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED)
10423ac9029SStephen Hurd 
1051248952aSSean Bruno #ifdef INVARIANTS
1061248952aSSean Bruno static void
gtask_dump(struct gtask * gtask)1071248952aSSean Bruno gtask_dump(struct gtask *gtask)
1081248952aSSean Bruno {
1091248952aSSean Bruno 	printf("gtask: %p ta_flags=%x ta_priority=%d ta_func=%p ta_context=%p\n",
1101248952aSSean Bruno 	       gtask, gtask->ta_flags, gtask->ta_priority, gtask->ta_func, gtask->ta_context);
1111248952aSSean Bruno }
1121248952aSSean Bruno #endif
1131248952aSSean Bruno 
11423ac9029SStephen Hurd static __inline int
TQ_SLEEP(struct gtaskqueue * tq,void * p,const char * wm)1153db35ffaSAlexander Motin TQ_SLEEP(struct gtaskqueue *tq, void *p, const char *wm)
11623ac9029SStephen Hurd {
11723ac9029SStephen Hurd 	if (tq->tq_spin)
1183db35ffaSAlexander Motin 		return (msleep_spin(p, (struct mtx *)&tq->tq_mutex, wm, 0));
1193db35ffaSAlexander Motin 	return (msleep(p, &tq->tq_mutex, 0, wm, 0));
12023ac9029SStephen Hurd }
12123ac9029SStephen Hurd 
12223ac9029SStephen Hurd static struct gtaskqueue *
_gtaskqueue_create(const char * name,int mflags,taskqueue_enqueue_fn enqueue,void * context,int mtxflags,const char * mtxname __unused)12323ac9029SStephen Hurd _gtaskqueue_create(const char *name, int mflags,
12423ac9029SStephen Hurd 		 taskqueue_enqueue_fn enqueue, void *context,
12523ac9029SStephen Hurd 		 int mtxflags, const char *mtxname __unused)
12623ac9029SStephen Hurd {
12723ac9029SStephen Hurd 	struct gtaskqueue *queue;
12823ac9029SStephen Hurd 	char *tq_name;
12923ac9029SStephen Hurd 
13023ac9029SStephen Hurd 	tq_name = malloc(TASKQUEUE_NAMELEN, M_GTASKQUEUE, mflags | M_ZERO);
13123ac9029SStephen Hurd 	if (!tq_name)
13223ac9029SStephen Hurd 		return (NULL);
13323ac9029SStephen Hurd 
13423ac9029SStephen Hurd 	snprintf(tq_name, TASKQUEUE_NAMELEN, "%s", (name) ? name : "taskqueue");
13523ac9029SStephen Hurd 
13623ac9029SStephen Hurd 	queue = malloc(sizeof(struct gtaskqueue), M_GTASKQUEUE, mflags | M_ZERO);
1378e0e6abcSConrad Meyer 	if (!queue) {
138dec5441aSConrad Meyer 		free(tq_name, M_GTASKQUEUE);
13923ac9029SStephen Hurd 		return (NULL);
1408e0e6abcSConrad Meyer 	}
14123ac9029SStephen Hurd 
14223ac9029SStephen Hurd 	STAILQ_INIT(&queue->tq_queue);
1433db35ffaSAlexander Motin 	LIST_INIT(&queue->tq_active);
14423ac9029SStephen Hurd 	queue->tq_enqueue = enqueue;
14523ac9029SStephen Hurd 	queue->tq_context = context;
14623ac9029SStephen Hurd 	queue->tq_name = tq_name;
14723ac9029SStephen Hurd 	queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
14823ac9029SStephen Hurd 	queue->tq_flags |= TQ_FLAGS_ACTIVE;
14923ac9029SStephen Hurd 	if (enqueue == gtaskqueue_thread_enqueue)
15023ac9029SStephen Hurd 		queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE;
15123ac9029SStephen Hurd 	mtx_init(&queue->tq_mutex, tq_name, NULL, mtxflags);
15223ac9029SStephen Hurd 
15323ac9029SStephen Hurd 	return (queue);
15423ac9029SStephen Hurd }
15523ac9029SStephen Hurd 
15623ac9029SStephen Hurd /*
15723ac9029SStephen Hurd  * Signal a taskqueue thread to terminate.
15823ac9029SStephen Hurd  */
15923ac9029SStephen Hurd static void
gtaskqueue_terminate(struct thread ** pp,struct gtaskqueue * tq)16023ac9029SStephen Hurd gtaskqueue_terminate(struct thread **pp, struct gtaskqueue *tq)
16123ac9029SStephen Hurd {
16223ac9029SStephen Hurd 
16323ac9029SStephen Hurd 	while (tq->tq_tcount > 0 || tq->tq_callouts > 0) {
16423ac9029SStephen Hurd 		wakeup(tq);
1653db35ffaSAlexander Motin 		TQ_SLEEP(tq, pp, "gtq_destroy");
16623ac9029SStephen Hurd 	}
16723ac9029SStephen Hurd }
16823ac9029SStephen Hurd 
16959d50fe5SMark Johnston static void __unused
gtaskqueue_free(struct gtaskqueue * queue)17023ac9029SStephen Hurd gtaskqueue_free(struct gtaskqueue *queue)
17123ac9029SStephen Hurd {
17223ac9029SStephen Hurd 
17323ac9029SStephen Hurd 	TQ_LOCK(queue);
17423ac9029SStephen Hurd 	queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
17523ac9029SStephen Hurd 	gtaskqueue_terminate(queue->tq_threads, queue);
1763db35ffaSAlexander Motin 	KASSERT(LIST_EMPTY(&queue->tq_active), ("Tasks still running?"));
17723ac9029SStephen Hurd 	KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks"));
17823ac9029SStephen Hurd 	mtx_destroy(&queue->tq_mutex);
17923ac9029SStephen Hurd 	free(queue->tq_threads, M_GTASKQUEUE);
18023ac9029SStephen Hurd 	free(queue->tq_name, M_GTASKQUEUE);
18123ac9029SStephen Hurd 	free(queue, M_GTASKQUEUE);
18223ac9029SStephen Hurd }
18323ac9029SStephen Hurd 
1845201e0f1SStephen Hurd /*
1855201e0f1SStephen Hurd  * Wait for all to complete, then prevent it from being enqueued
1865201e0f1SStephen Hurd  */
1875201e0f1SStephen Hurd void
grouptask_block(struct grouptask * grouptask)1885201e0f1SStephen Hurd grouptask_block(struct grouptask *grouptask)
1895201e0f1SStephen Hurd {
1905201e0f1SStephen Hurd 	struct gtaskqueue *queue = grouptask->gt_taskqueue;
1915201e0f1SStephen Hurd 	struct gtask *gtask = &grouptask->gt_task;
1925201e0f1SStephen Hurd 
1935201e0f1SStephen Hurd #ifdef INVARIANTS
1945201e0f1SStephen Hurd 	if (queue == NULL) {
1955201e0f1SStephen Hurd 		gtask_dump(gtask);
1965201e0f1SStephen Hurd 		panic("queue == NULL");
1975201e0f1SStephen Hurd 	}
1985201e0f1SStephen Hurd #endif
1995201e0f1SStephen Hurd 	TQ_LOCK(queue);
2005201e0f1SStephen Hurd 	gtask->ta_flags |= TASK_NOENQUEUE;
2015201e0f1SStephen Hurd   	gtaskqueue_drain_locked(queue, gtask);
2025201e0f1SStephen Hurd 	TQ_UNLOCK(queue);
2035201e0f1SStephen Hurd }
2045201e0f1SStephen Hurd 
2055201e0f1SStephen Hurd void
grouptask_unblock(struct grouptask * grouptask)2065201e0f1SStephen Hurd grouptask_unblock(struct grouptask *grouptask)
2075201e0f1SStephen Hurd {
2085201e0f1SStephen Hurd 	struct gtaskqueue *queue = grouptask->gt_taskqueue;
2095201e0f1SStephen Hurd 	struct gtask *gtask = &grouptask->gt_task;
2105201e0f1SStephen Hurd 
2115201e0f1SStephen Hurd #ifdef INVARIANTS
2125201e0f1SStephen Hurd 	if (queue == NULL) {
2135201e0f1SStephen Hurd 		gtask_dump(gtask);
2145201e0f1SStephen Hurd 		panic("queue == NULL");
2155201e0f1SStephen Hurd 	}
2165201e0f1SStephen Hurd #endif
2175201e0f1SStephen Hurd 	TQ_LOCK(queue);
2185201e0f1SStephen Hurd 	gtask->ta_flags &= ~TASK_NOENQUEUE;
2195201e0f1SStephen Hurd 	TQ_UNLOCK(queue);
2205201e0f1SStephen Hurd }
2215201e0f1SStephen Hurd 
22223ac9029SStephen Hurd int
grouptaskqueue_enqueue(struct gtaskqueue * queue,struct gtask * gtask)22323ac9029SStephen Hurd grouptaskqueue_enqueue(struct gtaskqueue *queue, struct gtask *gtask)
22423ac9029SStephen Hurd {
2251248952aSSean Bruno #ifdef INVARIANTS
2261248952aSSean Bruno 	if (queue == NULL) {
2271248952aSSean Bruno 		gtask_dump(gtask);
2281248952aSSean Bruno 		panic("queue == NULL");
2291248952aSSean Bruno 	}
2301248952aSSean Bruno #endif
23123ac9029SStephen Hurd 	TQ_LOCK(queue);
23223ac9029SStephen Hurd 	if (gtask->ta_flags & TASK_ENQUEUED) {
23323ac9029SStephen Hurd 		TQ_UNLOCK(queue);
23423ac9029SStephen Hurd 		return (0);
23523ac9029SStephen Hurd 	}
2365201e0f1SStephen Hurd 	if (gtask->ta_flags & TASK_NOENQUEUE) {
2375201e0f1SStephen Hurd 		TQ_UNLOCK(queue);
2385201e0f1SStephen Hurd 		return (EAGAIN);
2395201e0f1SStephen Hurd 	}
24023ac9029SStephen Hurd 	STAILQ_INSERT_TAIL(&queue->tq_queue, gtask, ta_link);
24123ac9029SStephen Hurd 	gtask->ta_flags |= TASK_ENQUEUED;
24223ac9029SStephen Hurd 	TQ_UNLOCK(queue);
243ab2e3f79SStephen Hurd 	if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
24423ac9029SStephen Hurd 		queue->tq_enqueue(queue->tq_context);
24523ac9029SStephen Hurd 	return (0);
24623ac9029SStephen Hurd }
24723ac9029SStephen Hurd 
24823ac9029SStephen Hurd static void
gtaskqueue_task_nop_fn(void * context)24923ac9029SStephen Hurd gtaskqueue_task_nop_fn(void *context)
25023ac9029SStephen Hurd {
25123ac9029SStephen Hurd }
25223ac9029SStephen Hurd 
25323ac9029SStephen Hurd /*
25423ac9029SStephen Hurd  * Block until all currently queued tasks in this taskqueue
25523ac9029SStephen Hurd  * have begun execution.  Tasks queued during execution of
25623ac9029SStephen Hurd  * this function are ignored.
25723ac9029SStephen Hurd  */
25823ac9029SStephen Hurd static void
gtaskqueue_drain_tq_queue(struct gtaskqueue * queue)25923ac9029SStephen Hurd gtaskqueue_drain_tq_queue(struct gtaskqueue *queue)
26023ac9029SStephen Hurd {
26123ac9029SStephen Hurd 	struct gtask t_barrier;
26223ac9029SStephen Hurd 
26323ac9029SStephen Hurd 	if (STAILQ_EMPTY(&queue->tq_queue))
26423ac9029SStephen Hurd 		return;
26523ac9029SStephen Hurd 
26623ac9029SStephen Hurd 	/*
26723ac9029SStephen Hurd 	 * Enqueue our barrier after all current tasks, but with
26823ac9029SStephen Hurd 	 * the highest priority so that newly queued tasks cannot
26923ac9029SStephen Hurd 	 * pass it.  Because of the high priority, we can not use
27023ac9029SStephen Hurd 	 * taskqueue_enqueue_locked directly (which drops the lock
27123ac9029SStephen Hurd 	 * anyway) so just insert it at tail while we have the
27223ac9029SStephen Hurd 	 * queue lock.
27323ac9029SStephen Hurd 	 */
27423ac9029SStephen Hurd 	GTASK_INIT(&t_barrier, 0, USHRT_MAX, gtaskqueue_task_nop_fn, &t_barrier);
27523ac9029SStephen Hurd 	STAILQ_INSERT_TAIL(&queue->tq_queue, &t_barrier, ta_link);
27623ac9029SStephen Hurd 	t_barrier.ta_flags |= TASK_ENQUEUED;
27723ac9029SStephen Hurd 
27823ac9029SStephen Hurd 	/*
27923ac9029SStephen Hurd 	 * Once the barrier has executed, all previously queued tasks
28023ac9029SStephen Hurd 	 * have completed or are currently executing.
28123ac9029SStephen Hurd 	 */
28223ac9029SStephen Hurd 	while (t_barrier.ta_flags & TASK_ENQUEUED)
2833db35ffaSAlexander Motin 		TQ_SLEEP(queue, &t_barrier, "gtq_qdrain");
28423ac9029SStephen Hurd }
28523ac9029SStephen Hurd 
28623ac9029SStephen Hurd /*
28723ac9029SStephen Hurd  * Block until all currently executing tasks for this taskqueue
28823ac9029SStephen Hurd  * complete.  Tasks that begin execution during the execution
28923ac9029SStephen Hurd  * of this function are ignored.
29023ac9029SStephen Hurd  */
29123ac9029SStephen Hurd static void
gtaskqueue_drain_tq_active(struct gtaskqueue * queue)29223ac9029SStephen Hurd gtaskqueue_drain_tq_active(struct gtaskqueue *queue)
29323ac9029SStephen Hurd {
2943db35ffaSAlexander Motin 	struct gtaskqueue_busy *tb;
2953db35ffaSAlexander Motin 	u_int seq;
29623ac9029SStephen Hurd 
2973db35ffaSAlexander Motin 	if (LIST_EMPTY(&queue->tq_active))
29823ac9029SStephen Hurd 		return;
29923ac9029SStephen Hurd 
30023ac9029SStephen Hurd 	/* Block taskq_terminate().*/
30123ac9029SStephen Hurd 	queue->tq_callouts++;
30223ac9029SStephen Hurd 
3033db35ffaSAlexander Motin 	/* Wait for any active task with sequence from the past. */
3043db35ffaSAlexander Motin 	seq = queue->tq_seq;
3053db35ffaSAlexander Motin restart:
3063db35ffaSAlexander Motin 	LIST_FOREACH(tb, &queue->tq_active, tb_link) {
3073db35ffaSAlexander Motin 		if ((int)(tb->tb_seq - seq) <= 0) {
3083db35ffaSAlexander Motin 			TQ_SLEEP(queue, tb->tb_running, "gtq_adrain");
3093db35ffaSAlexander Motin 			goto restart;
3103db35ffaSAlexander Motin 		}
3113db35ffaSAlexander Motin 	}
31223ac9029SStephen Hurd 
31323ac9029SStephen Hurd 	/* Release taskqueue_terminate(). */
31423ac9029SStephen Hurd 	queue->tq_callouts--;
31523ac9029SStephen Hurd 	if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0)
31623ac9029SStephen Hurd 		wakeup_one(queue->tq_threads);
31723ac9029SStephen Hurd }
31823ac9029SStephen Hurd 
31923ac9029SStephen Hurd void
gtaskqueue_block(struct gtaskqueue * queue)32023ac9029SStephen Hurd gtaskqueue_block(struct gtaskqueue *queue)
32123ac9029SStephen Hurd {
32223ac9029SStephen Hurd 
32323ac9029SStephen Hurd 	TQ_LOCK(queue);
32423ac9029SStephen Hurd 	queue->tq_flags |= TQ_FLAGS_BLOCKED;
32523ac9029SStephen Hurd 	TQ_UNLOCK(queue);
32623ac9029SStephen Hurd }
32723ac9029SStephen Hurd 
32823ac9029SStephen Hurd void
gtaskqueue_unblock(struct gtaskqueue * queue)32923ac9029SStephen Hurd gtaskqueue_unblock(struct gtaskqueue *queue)
33023ac9029SStephen Hurd {
33123ac9029SStephen Hurd 
33223ac9029SStephen Hurd 	TQ_LOCK(queue);
33323ac9029SStephen Hurd 	queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
33423ac9029SStephen Hurd 	if (!STAILQ_EMPTY(&queue->tq_queue))
33523ac9029SStephen Hurd 		queue->tq_enqueue(queue->tq_context);
33623ac9029SStephen Hurd 	TQ_UNLOCK(queue);
33723ac9029SStephen Hurd }
33823ac9029SStephen Hurd 
33923ac9029SStephen Hurd static void
gtaskqueue_run_locked(struct gtaskqueue * queue)34023ac9029SStephen Hurd gtaskqueue_run_locked(struct gtaskqueue *queue)
34123ac9029SStephen Hurd {
3424426b2e6SGleb Smirnoff 	struct epoch_tracker et;
34323ac9029SStephen Hurd 	struct gtaskqueue_busy tb;
34423ac9029SStephen Hurd 	struct gtask *gtask;
3454426b2e6SGleb Smirnoff 	bool in_net_epoch;
34623ac9029SStephen Hurd 
34723ac9029SStephen Hurd 	KASSERT(queue != NULL, ("tq is NULL"));
34823ac9029SStephen Hurd 	TQ_ASSERT_LOCKED(queue);
34923ac9029SStephen Hurd 	tb.tb_running = NULL;
3503db35ffaSAlexander Motin 	LIST_INSERT_HEAD(&queue->tq_active, &tb, tb_link);
3514426b2e6SGleb Smirnoff 	in_net_epoch = false;
35223ac9029SStephen Hurd 
3533db35ffaSAlexander Motin 	while ((gtask = STAILQ_FIRST(&queue->tq_queue)) != NULL) {
35423ac9029SStephen Hurd 		STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
35523ac9029SStephen Hurd 		gtask->ta_flags &= ~TASK_ENQUEUED;
35623ac9029SStephen Hurd 		tb.tb_running = gtask;
3573db35ffaSAlexander Motin 		tb.tb_seq = ++queue->tq_seq;
35823ac9029SStephen Hurd 		TQ_UNLOCK(queue);
35923ac9029SStephen Hurd 
36023ac9029SStephen Hurd 		KASSERT(gtask->ta_func != NULL, ("task->ta_func is NULL"));
3614426b2e6SGleb Smirnoff 		if (!in_net_epoch && TASK_IS_NET(gtask)) {
3624426b2e6SGleb Smirnoff 			in_net_epoch = true;
3634426b2e6SGleb Smirnoff 			NET_EPOCH_ENTER(et);
3644426b2e6SGleb Smirnoff 		} else if (in_net_epoch && !TASK_IS_NET(gtask)) {
3654426b2e6SGleb Smirnoff 			NET_EPOCH_EXIT(et);
3664426b2e6SGleb Smirnoff 			in_net_epoch = false;
3674426b2e6SGleb Smirnoff 		}
36823ac9029SStephen Hurd 		gtask->ta_func(gtask->ta_context);
36923ac9029SStephen Hurd 
37023ac9029SStephen Hurd 		TQ_LOCK(queue);
37123ac9029SStephen Hurd 		wakeup(gtask);
37223ac9029SStephen Hurd 	}
3734426b2e6SGleb Smirnoff 	if (in_net_epoch)
3744426b2e6SGleb Smirnoff 		NET_EPOCH_EXIT(et);
3753db35ffaSAlexander Motin 	LIST_REMOVE(&tb, tb_link);
37623ac9029SStephen Hurd }
37723ac9029SStephen Hurd 
37823ac9029SStephen Hurd static int
task_is_running(struct gtaskqueue * queue,struct gtask * gtask)37923ac9029SStephen Hurd task_is_running(struct gtaskqueue *queue, struct gtask *gtask)
38023ac9029SStephen Hurd {
38123ac9029SStephen Hurd 	struct gtaskqueue_busy *tb;
38223ac9029SStephen Hurd 
38323ac9029SStephen Hurd 	TQ_ASSERT_LOCKED(queue);
3843db35ffaSAlexander Motin 	LIST_FOREACH(tb, &queue->tq_active, tb_link) {
38523ac9029SStephen Hurd 		if (tb->tb_running == gtask)
38623ac9029SStephen Hurd 			return (1);
38723ac9029SStephen Hurd 	}
38823ac9029SStephen Hurd 	return (0);
38923ac9029SStephen Hurd }
39023ac9029SStephen Hurd 
39123ac9029SStephen Hurd static int
gtaskqueue_cancel_locked(struct gtaskqueue * queue,struct gtask * gtask)39223ac9029SStephen Hurd gtaskqueue_cancel_locked(struct gtaskqueue *queue, struct gtask *gtask)
39323ac9029SStephen Hurd {
39423ac9029SStephen Hurd 
39523ac9029SStephen Hurd 	if (gtask->ta_flags & TASK_ENQUEUED)
39623ac9029SStephen Hurd 		STAILQ_REMOVE(&queue->tq_queue, gtask, gtask, ta_link);
39723ac9029SStephen Hurd 	gtask->ta_flags &= ~TASK_ENQUEUED;
39823ac9029SStephen Hurd 	return (task_is_running(queue, gtask) ? EBUSY : 0);
39923ac9029SStephen Hurd }
40023ac9029SStephen Hurd 
40123ac9029SStephen Hurd int
gtaskqueue_cancel(struct gtaskqueue * queue,struct gtask * gtask)40223ac9029SStephen Hurd gtaskqueue_cancel(struct gtaskqueue *queue, struct gtask *gtask)
40323ac9029SStephen Hurd {
40423ac9029SStephen Hurd 	int error;
40523ac9029SStephen Hurd 
40623ac9029SStephen Hurd 	TQ_LOCK(queue);
40723ac9029SStephen Hurd 	error = gtaskqueue_cancel_locked(queue, gtask);
40823ac9029SStephen Hurd 	TQ_UNLOCK(queue);
40923ac9029SStephen Hurd 
41023ac9029SStephen Hurd 	return (error);
41123ac9029SStephen Hurd }
41223ac9029SStephen Hurd 
4135201e0f1SStephen Hurd static void
gtaskqueue_drain_locked(struct gtaskqueue * queue,struct gtask * gtask)4145201e0f1SStephen Hurd gtaskqueue_drain_locked(struct gtaskqueue *queue, struct gtask *gtask)
4155201e0f1SStephen Hurd {
4165201e0f1SStephen Hurd 	while ((gtask->ta_flags & TASK_ENQUEUED) || task_is_running(queue, gtask))
4173db35ffaSAlexander Motin 		TQ_SLEEP(queue, gtask, "gtq_drain");
4185201e0f1SStephen Hurd }
4195201e0f1SStephen Hurd 
42023ac9029SStephen Hurd void
gtaskqueue_drain(struct gtaskqueue * queue,struct gtask * gtask)42123ac9029SStephen Hurd gtaskqueue_drain(struct gtaskqueue *queue, struct gtask *gtask)
42223ac9029SStephen Hurd {
42323ac9029SStephen Hurd 
42423ac9029SStephen Hurd 	if (!queue->tq_spin)
42523ac9029SStephen Hurd 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
42623ac9029SStephen Hurd 
42723ac9029SStephen Hurd 	TQ_LOCK(queue);
4285201e0f1SStephen Hurd 	gtaskqueue_drain_locked(queue, gtask);
42923ac9029SStephen Hurd 	TQ_UNLOCK(queue);
43023ac9029SStephen Hurd }
43123ac9029SStephen Hurd 
43223ac9029SStephen Hurd void
gtaskqueue_drain_all(struct gtaskqueue * queue)43323ac9029SStephen Hurd gtaskqueue_drain_all(struct gtaskqueue *queue)
43423ac9029SStephen Hurd {
43523ac9029SStephen Hurd 
43623ac9029SStephen Hurd 	if (!queue->tq_spin)
43723ac9029SStephen Hurd 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
43823ac9029SStephen Hurd 
43923ac9029SStephen Hurd 	TQ_LOCK(queue);
44023ac9029SStephen Hurd 	gtaskqueue_drain_tq_queue(queue);
44123ac9029SStephen Hurd 	gtaskqueue_drain_tq_active(queue);
44223ac9029SStephen Hurd 	TQ_UNLOCK(queue);
44323ac9029SStephen Hurd }
44423ac9029SStephen Hurd 
44523ac9029SStephen Hurd static int
_gtaskqueue_start_threads(struct gtaskqueue ** tqp,int count,int pri,cpuset_t * mask,const char * name,va_list ap)44623ac9029SStephen Hurd _gtaskqueue_start_threads(struct gtaskqueue **tqp, int count, int pri,
447ab2e3f79SStephen Hurd     cpuset_t *mask, const char *name, va_list ap)
44823ac9029SStephen Hurd {
44923ac9029SStephen Hurd 	char ktname[MAXCOMLEN + 1];
45023ac9029SStephen Hurd 	struct thread *td;
45123ac9029SStephen Hurd 	struct gtaskqueue *tq;
45223ac9029SStephen Hurd 	int i, error;
45323ac9029SStephen Hurd 
45423ac9029SStephen Hurd 	if (count <= 0)
45523ac9029SStephen Hurd 		return (EINVAL);
45623ac9029SStephen Hurd 
45723ac9029SStephen Hurd 	vsnprintf(ktname, sizeof(ktname), name, ap);
45823ac9029SStephen Hurd 	tq = *tqp;
45923ac9029SStephen Hurd 
46023ac9029SStephen Hurd 	tq->tq_threads = malloc(sizeof(struct thread *) * count, M_GTASKQUEUE,
46123ac9029SStephen Hurd 	    M_NOWAIT | M_ZERO);
46223ac9029SStephen Hurd 	if (tq->tq_threads == NULL) {
46323ac9029SStephen Hurd 		printf("%s: no memory for %s threads\n", __func__, ktname);
46423ac9029SStephen Hurd 		return (ENOMEM);
46523ac9029SStephen Hurd 	}
46623ac9029SStephen Hurd 
46723ac9029SStephen Hurd 	for (i = 0; i < count; i++) {
46823ac9029SStephen Hurd 		if (count == 1)
46923ac9029SStephen Hurd 			error = kthread_add(gtaskqueue_thread_loop, tqp, NULL,
47023ac9029SStephen Hurd 			    &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname);
47123ac9029SStephen Hurd 		else
47223ac9029SStephen Hurd 			error = kthread_add(gtaskqueue_thread_loop, tqp, NULL,
47323ac9029SStephen Hurd 			    &tq->tq_threads[i], RFSTOPPED, 0,
47423ac9029SStephen Hurd 			    "%s_%d", ktname, i);
47523ac9029SStephen Hurd 		if (error) {
47623ac9029SStephen Hurd 			/* should be ok to continue, taskqueue_free will dtrt */
47723ac9029SStephen Hurd 			printf("%s: kthread_add(%s): error %d", __func__,
47823ac9029SStephen Hurd 			    ktname, error);
47923ac9029SStephen Hurd 			tq->tq_threads[i] = NULL;		/* paranoid */
48023ac9029SStephen Hurd 		} else
48123ac9029SStephen Hurd 			tq->tq_tcount++;
48223ac9029SStephen Hurd 	}
48323ac9029SStephen Hurd 	for (i = 0; i < count; i++) {
48423ac9029SStephen Hurd 		if (tq->tq_threads[i] == NULL)
48523ac9029SStephen Hurd 			continue;
48623ac9029SStephen Hurd 		td = tq->tq_threads[i];
48723ac9029SStephen Hurd 		if (mask) {
48823ac9029SStephen Hurd 			error = cpuset_setthread(td->td_tid, mask);
48923ac9029SStephen Hurd 			/*
49023ac9029SStephen Hurd 			 * Failing to pin is rarely an actual fatal error;
49123ac9029SStephen Hurd 			 * it'll just affect performance.
49223ac9029SStephen Hurd 			 */
49323ac9029SStephen Hurd 			if (error)
49423ac9029SStephen Hurd 				printf("%s: curthread=%llu: can't pin; "
49523ac9029SStephen Hurd 				    "error=%d\n",
49623ac9029SStephen Hurd 				    __func__,
49723ac9029SStephen Hurd 				    (unsigned long long) td->td_tid,
49823ac9029SStephen Hurd 				    error);
49923ac9029SStephen Hurd 		}
50023ac9029SStephen Hurd 		thread_lock(td);
50123ac9029SStephen Hurd 		sched_prio(td, pri);
50223ac9029SStephen Hurd 		sched_add(td, SRQ_BORING);
50323ac9029SStephen Hurd 	}
50423ac9029SStephen Hurd 
50523ac9029SStephen Hurd 	return (0);
50623ac9029SStephen Hurd }
50723ac9029SStephen Hurd 
50823ac9029SStephen Hurd static int
gtaskqueue_start_threads(struct gtaskqueue ** tqp,int count,int pri,const char * name,...)50923ac9029SStephen Hurd gtaskqueue_start_threads(struct gtaskqueue **tqp, int count, int pri,
510ab2e3f79SStephen Hurd     const char *name, ...)
51123ac9029SStephen Hurd {
51223ac9029SStephen Hurd 	va_list ap;
51323ac9029SStephen Hurd 	int error;
51423ac9029SStephen Hurd 
51523ac9029SStephen Hurd 	va_start(ap, name);
516ab2e3f79SStephen Hurd 	error = _gtaskqueue_start_threads(tqp, count, pri, NULL, name, ap);
51723ac9029SStephen Hurd 	va_end(ap);
51823ac9029SStephen Hurd 	return (error);
51923ac9029SStephen Hurd }
52023ac9029SStephen Hurd 
52123ac9029SStephen Hurd static inline void
gtaskqueue_run_callback(struct gtaskqueue * tq,enum taskqueue_callback_type cb_type)52223ac9029SStephen Hurd gtaskqueue_run_callback(struct gtaskqueue *tq,
52323ac9029SStephen Hurd     enum taskqueue_callback_type cb_type)
52423ac9029SStephen Hurd {
52523ac9029SStephen Hurd 	taskqueue_callback_fn tq_callback;
52623ac9029SStephen Hurd 
52723ac9029SStephen Hurd 	TQ_ASSERT_UNLOCKED(tq);
52823ac9029SStephen Hurd 	tq_callback = tq->tq_callbacks[cb_type];
52923ac9029SStephen Hurd 	if (tq_callback != NULL)
53023ac9029SStephen Hurd 		tq_callback(tq->tq_cb_contexts[cb_type]);
53123ac9029SStephen Hurd }
53223ac9029SStephen Hurd 
53323ac9029SStephen Hurd static void
gtaskqueue_thread_loop(void * arg)534ab2e3f79SStephen Hurd gtaskqueue_thread_loop(void *arg)
53523ac9029SStephen Hurd {
536ab2e3f79SStephen Hurd 	struct gtaskqueue **tqp, *tq;
53723ac9029SStephen Hurd 
538ab2e3f79SStephen Hurd 	tqp = arg;
539ab2e3f79SStephen Hurd 	tq = *tqp;
540ab2e3f79SStephen Hurd 	gtaskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT);
541d300df01SStephen Hurd 	TQ_LOCK(tq);
542d300df01SStephen Hurd 	while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
543ab2e3f79SStephen Hurd 		/* XXX ? */
54423ac9029SStephen Hurd 		gtaskqueue_run_locked(tq);
54523ac9029SStephen Hurd 		/*
54623ac9029SStephen Hurd 		 * Because taskqueue_run() can drop tq_mutex, we need to
54723ac9029SStephen Hurd 		 * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
54823ac9029SStephen Hurd 		 * meantime, which means we missed a wakeup.
54923ac9029SStephen Hurd 		 */
55023ac9029SStephen Hurd 		if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0)
55123ac9029SStephen Hurd 			break;
5523db35ffaSAlexander Motin 		TQ_SLEEP(tq, tq, "-");
55323ac9029SStephen Hurd 	}
55423ac9029SStephen Hurd 	gtaskqueue_run_locked(tq);
55523ac9029SStephen Hurd 	/*
55623ac9029SStephen Hurd 	 * This thread is on its way out, so just drop the lock temporarily
55723ac9029SStephen Hurd 	 * in order to call the shutdown callback.  This allows the callback
55823ac9029SStephen Hurd 	 * to look at the taskqueue, even just before it dies.
55923ac9029SStephen Hurd 	 */
56023ac9029SStephen Hurd 	TQ_UNLOCK(tq);
56123ac9029SStephen Hurd 	gtaskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN);
56223ac9029SStephen Hurd 	TQ_LOCK(tq);
56323ac9029SStephen Hurd 
56423ac9029SStephen Hurd 	/* rendezvous with thread that asked us to terminate */
56523ac9029SStephen Hurd 	tq->tq_tcount--;
56623ac9029SStephen Hurd 	wakeup_one(tq->tq_threads);
56723ac9029SStephen Hurd 	TQ_UNLOCK(tq);
56823ac9029SStephen Hurd 	kthread_exit();
56923ac9029SStephen Hurd }
57023ac9029SStephen Hurd 
57123ac9029SStephen Hurd static void
gtaskqueue_thread_enqueue(void * context)57223ac9029SStephen Hurd gtaskqueue_thread_enqueue(void *context)
57323ac9029SStephen Hurd {
57423ac9029SStephen Hurd 	struct gtaskqueue **tqp, *tq;
57523ac9029SStephen Hurd 
57623ac9029SStephen Hurd 	tqp = context;
57723ac9029SStephen Hurd 	tq = *tqp;
5783db35ffaSAlexander Motin 	wakeup_any(tq);
57923ac9029SStephen Hurd }
58023ac9029SStephen Hurd 
58123ac9029SStephen Hurd static struct gtaskqueue *
gtaskqueue_create_fast(const char * name,int mflags,taskqueue_enqueue_fn enqueue,void * context)58223ac9029SStephen Hurd gtaskqueue_create_fast(const char *name, int mflags,
58323ac9029SStephen Hurd 		 taskqueue_enqueue_fn enqueue, void *context)
58423ac9029SStephen Hurd {
58523ac9029SStephen Hurd 	return _gtaskqueue_create(name, mflags, enqueue, context,
58623ac9029SStephen Hurd 			MTX_SPIN, "fast_taskqueue");
58723ac9029SStephen Hurd }
58823ac9029SStephen Hurd 
58923ac9029SStephen Hurd struct taskqgroup_cpu {
59023ac9029SStephen Hurd 	LIST_HEAD(, grouptask) tgc_tasks;
59123ac9029SStephen Hurd 	struct gtaskqueue *tgc_taskq;
59223ac9029SStephen Hurd 	int		tgc_cnt;
59323ac9029SStephen Hurd 	int		tgc_cpu;
59423ac9029SStephen Hurd };
59523ac9029SStephen Hurd 
59623ac9029SStephen Hurd struct taskqgroup {
59723ac9029SStephen Hurd 	struct taskqgroup_cpu tqg_queue[MAXCPU];
59823ac9029SStephen Hurd 	struct mtx	tqg_lock;
5993e0e6330SStephen Hurd 	const char *	tqg_name;
60023ac9029SStephen Hurd 	int		tqg_cnt;
60123ac9029SStephen Hurd };
60223ac9029SStephen Hurd 
60323ac9029SStephen Hurd struct taskq_bind_task {
60423ac9029SStephen Hurd 	struct gtask bt_task;
60523ac9029SStephen Hurd 	int	bt_cpuid;
60623ac9029SStephen Hurd };
60723ac9029SStephen Hurd 
60823ac9029SStephen Hurd static void
taskqgroup_cpu_create(struct taskqgroup * qgroup,int idx,int cpu)609ab2e3f79SStephen Hurd taskqgroup_cpu_create(struct taskqgroup *qgroup, int idx, int cpu)
61023ac9029SStephen Hurd {
61123ac9029SStephen Hurd 	struct taskqgroup_cpu *qcpu;
61223ac9029SStephen Hurd 
61323ac9029SStephen Hurd 	qcpu = &qgroup->tqg_queue[idx];
61423ac9029SStephen Hurd 	LIST_INIT(&qcpu->tgc_tasks);
615ab2e3f79SStephen Hurd 	qcpu->tgc_taskq = gtaskqueue_create_fast(NULL, M_WAITOK,
616e1f33670SMark Johnston 	    gtaskqueue_thread_enqueue, &qcpu->tgc_taskq);
617ab2e3f79SStephen Hurd 	gtaskqueue_start_threads(&qcpu->tgc_taskq, 1, PI_SOFT,
618ab2e3f79SStephen Hurd 	    "%s_%d", qgroup->tqg_name, idx);
61912d1b8c9SSean Bruno 	qcpu->tgc_cpu = cpu;
62023ac9029SStephen Hurd }
62123ac9029SStephen Hurd 
62223ac9029SStephen Hurd /*
62323ac9029SStephen Hurd  * Find the taskq with least # of tasks that doesn't currently have any
62423ac9029SStephen Hurd  * other queues from the uniq identifier.
62523ac9029SStephen Hurd  */
62623ac9029SStephen Hurd static int
taskqgroup_find(struct taskqgroup * qgroup,void * uniq)62723ac9029SStephen Hurd taskqgroup_find(struct taskqgroup *qgroup, void *uniq)
62823ac9029SStephen Hurd {
62923ac9029SStephen Hurd 	struct grouptask *n;
63023ac9029SStephen Hurd 	int i, idx, mincnt;
63123ac9029SStephen Hurd 	int strict;
63223ac9029SStephen Hurd 
63323ac9029SStephen Hurd 	mtx_assert(&qgroup->tqg_lock, MA_OWNED);
63459d50fe5SMark Johnston 	KASSERT(qgroup->tqg_cnt != 0,
63559d50fe5SMark Johnston 	    ("qgroup %s has no queues", qgroup->tqg_name));
63659d50fe5SMark Johnston 
63723ac9029SStephen Hurd 	/*
63859d50fe5SMark Johnston 	 * Two passes: first scan for a queue with the least tasks that
63923ac9029SStephen Hurd 	 * does not already service this uniq id.  If that fails simply find
64059d50fe5SMark Johnston 	 * the queue with the least total tasks.
64123ac9029SStephen Hurd 	 */
64259d50fe5SMark Johnston 	for (idx = -1, mincnt = INT_MAX, strict = 1; mincnt == INT_MAX;
64359d50fe5SMark Johnston 	    strict = 0) {
64423ac9029SStephen Hurd 		for (i = 0; i < qgroup->tqg_cnt; i++) {
64523ac9029SStephen Hurd 			if (qgroup->tqg_queue[i].tgc_cnt > mincnt)
64623ac9029SStephen Hurd 				continue;
64723ac9029SStephen Hurd 			if (strict) {
64859d50fe5SMark Johnston 				LIST_FOREACH(n, &qgroup->tqg_queue[i].tgc_tasks,
64959d50fe5SMark Johnston 				    gt_list)
65023ac9029SStephen Hurd 					if (n->gt_uniq == uniq)
65123ac9029SStephen Hurd 						break;
65223ac9029SStephen Hurd 				if (n != NULL)
65323ac9029SStephen Hurd 					continue;
65423ac9029SStephen Hurd 			}
65523ac9029SStephen Hurd 			mincnt = qgroup->tqg_queue[i].tgc_cnt;
65623ac9029SStephen Hurd 			idx = i;
65723ac9029SStephen Hurd 		}
65823ac9029SStephen Hurd 	}
65923ac9029SStephen Hurd 	if (idx == -1)
660f855ec81SMarius Strobl 		panic("%s: failed to pick a qid.", __func__);
66123ac9029SStephen Hurd 
66223ac9029SStephen Hurd 	return (idx);
66323ac9029SStephen Hurd }
664de414cfeSSean Bruno 
66523ac9029SStephen Hurd void
taskqgroup_attach(struct taskqgroup * qgroup,struct grouptask * gtask,void * uniq,device_t dev,struct resource * irq,const char * name)66623ac9029SStephen Hurd taskqgroup_attach(struct taskqgroup *qgroup, struct grouptask *gtask,
667f855ec81SMarius Strobl     void *uniq, device_t dev, struct resource *irq, const char *name)
66823ac9029SStephen Hurd {
669f855ec81SMarius Strobl 	int cpu, qid, error;
67023ac9029SStephen Hurd 
67159d50fe5SMark Johnston 	KASSERT(qgroup->tqg_cnt > 0,
67259d50fe5SMark Johnston 	    ("qgroup %s has no queues", qgroup->tqg_name));
67359d50fe5SMark Johnston 
67423ac9029SStephen Hurd 	gtask->gt_uniq = uniq;
675d57a7858SStephen Hurd 	snprintf(gtask->gt_name, GROUPTASK_NAMELEN, "%s", name ? name : "grouptask");
676f855ec81SMarius Strobl 	gtask->gt_dev = dev;
67723ac9029SStephen Hurd 	gtask->gt_irq = irq;
67823ac9029SStephen Hurd 	gtask->gt_cpu = -1;
67923ac9029SStephen Hurd 	mtx_lock(&qgroup->tqg_lock);
68023ac9029SStephen Hurd 	qid = taskqgroup_find(qgroup, uniq);
68123ac9029SStephen Hurd 	qgroup->tqg_queue[qid].tgc_cnt++;
68223ac9029SStephen Hurd 	LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
68323ac9029SStephen Hurd 	gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
68459d50fe5SMark Johnston 	if (dev != NULL && irq != NULL) {
685f855ec81SMarius Strobl 		cpu = qgroup->tqg_queue[qid].tgc_cpu;
686f855ec81SMarius Strobl 		gtask->gt_cpu = cpu;
68723ac9029SStephen Hurd 		mtx_unlock(&qgroup->tqg_lock);
688f855ec81SMarius Strobl 		error = bus_bind_intr(dev, irq, cpu);
689326aacb0SStephen Hurd 		if (error)
690f855ec81SMarius Strobl 			printf("%s: binding interrupt failed for %s: %d\n",
691f855ec81SMarius Strobl 			    __func__, gtask->gt_name, error);
69223ac9029SStephen Hurd 	} else
69323ac9029SStephen Hurd 		mtx_unlock(&qgroup->tqg_lock);
69423ac9029SStephen Hurd }
69523ac9029SStephen Hurd 
69623ac9029SStephen Hurd int
taskqgroup_attach_cpu(struct taskqgroup * qgroup,struct grouptask * gtask,void * uniq,int cpu,device_t dev,struct resource * irq,const char * name)69723ac9029SStephen Hurd taskqgroup_attach_cpu(struct taskqgroup *qgroup, struct grouptask *gtask,
698f855ec81SMarius Strobl     void *uniq, int cpu, device_t dev, struct resource *irq, const char *name)
69923ac9029SStephen Hurd {
700326aacb0SStephen Hurd 	int i, qid, error;
70123ac9029SStephen Hurd 
70223ac9029SStephen Hurd 	gtask->gt_uniq = uniq;
703d57a7858SStephen Hurd 	snprintf(gtask->gt_name, GROUPTASK_NAMELEN, "%s", name ? name : "grouptask");
704f855ec81SMarius Strobl 	gtask->gt_dev = dev;
70523ac9029SStephen Hurd 	gtask->gt_irq = irq;
70623ac9029SStephen Hurd 	gtask->gt_cpu = cpu;
70723ac9029SStephen Hurd 	mtx_lock(&qgroup->tqg_lock);
70859d50fe5SMark Johnston 	for (i = 0, qid = -1; i < qgroup->tqg_cnt; i++)
70923ac9029SStephen Hurd 		if (qgroup->tqg_queue[i].tgc_cpu == cpu) {
71023ac9029SStephen Hurd 			qid = i;
71123ac9029SStephen Hurd 			break;
71223ac9029SStephen Hurd 		}
71323ac9029SStephen Hurd 	if (qid == -1) {
71423ac9029SStephen Hurd 		mtx_unlock(&qgroup->tqg_lock);
715d57a7858SStephen Hurd 		printf("%s: qid not found for %s cpu=%d\n", __func__, gtask->gt_name, cpu);
71623ac9029SStephen Hurd 		return (EINVAL);
71723ac9029SStephen Hurd 	}
71823ac9029SStephen Hurd 	qgroup->tqg_queue[qid].tgc_cnt++;
71923ac9029SStephen Hurd 	LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
72023ac9029SStephen Hurd 	gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
72112d1b8c9SSean Bruno 	cpu = qgroup->tqg_queue[qid].tgc_cpu;
72212d1b8c9SSean Bruno 	mtx_unlock(&qgroup->tqg_lock);
72312d1b8c9SSean Bruno 
724f855ec81SMarius Strobl 	if (dev != NULL && irq != NULL) {
725f855ec81SMarius Strobl 		error = bus_bind_intr(dev, irq, cpu);
726326aacb0SStephen Hurd 		if (error)
727f855ec81SMarius Strobl 			printf("%s: binding interrupt failed for %s: %d\n",
728f855ec81SMarius Strobl 			    __func__, gtask->gt_name, error);
729326aacb0SStephen Hurd 	}
73023ac9029SStephen Hurd 	return (0);
73123ac9029SStephen Hurd }
73223ac9029SStephen Hurd 
73323ac9029SStephen Hurd void
taskqgroup_detach(struct taskqgroup * qgroup,struct grouptask * gtask)73423ac9029SStephen Hurd taskqgroup_detach(struct taskqgroup *qgroup, struct grouptask *gtask)
73523ac9029SStephen Hurd {
73623ac9029SStephen Hurd 	int i;
73723ac9029SStephen Hurd 
7385201e0f1SStephen Hurd 	grouptask_block(gtask);
73923ac9029SStephen Hurd 	mtx_lock(&qgroup->tqg_lock);
74023ac9029SStephen Hurd 	for (i = 0; i < qgroup->tqg_cnt; i++)
74123ac9029SStephen Hurd 		if (qgroup->tqg_queue[i].tgc_taskq == gtask->gt_taskqueue)
74223ac9029SStephen Hurd 			break;
74323ac9029SStephen Hurd 	if (i == qgroup->tqg_cnt)
744f855ec81SMarius Strobl 		panic("%s: task %s not in group", __func__, gtask->gt_name);
74523ac9029SStephen Hurd 	qgroup->tqg_queue[i].tgc_cnt--;
74623ac9029SStephen Hurd 	LIST_REMOVE(gtask, gt_list);
74723ac9029SStephen Hurd 	mtx_unlock(&qgroup->tqg_lock);
74823ac9029SStephen Hurd 	gtask->gt_taskqueue = NULL;
7495201e0f1SStephen Hurd 	gtask->gt_task.ta_flags &= ~TASK_NOENQUEUE;
75023ac9029SStephen Hurd }
75123ac9029SStephen Hurd 
75223ac9029SStephen Hurd static void
taskqgroup_binder(void * ctx)75323ac9029SStephen Hurd taskqgroup_binder(void *ctx)
75423ac9029SStephen Hurd {
75559d50fe5SMark Johnston 	struct taskq_bind_task *gtask;
75623ac9029SStephen Hurd 	cpuset_t mask;
75723ac9029SStephen Hurd 	int error;
75823ac9029SStephen Hurd 
75959d50fe5SMark Johnston 	gtask = ctx;
76023ac9029SStephen Hurd 	CPU_ZERO(&mask);
76123ac9029SStephen Hurd 	CPU_SET(gtask->bt_cpuid, &mask);
76223ac9029SStephen Hurd 	error = cpuset_setthread(curthread->td_tid, &mask);
76323ac9029SStephen Hurd 	thread_lock(curthread);
76423ac9029SStephen Hurd 	sched_bind(curthread, gtask->bt_cpuid);
76523ac9029SStephen Hurd 	thread_unlock(curthread);
76623ac9029SStephen Hurd 
76723ac9029SStephen Hurd 	if (error)
768f855ec81SMarius Strobl 		printf("%s: binding curthread failed: %d\n", __func__, error);
76923ac9029SStephen Hurd 	free(gtask, M_DEVBUF);
770d300df01SStephen Hurd }
771d300df01SStephen Hurd 
77259d50fe5SMark Johnston void
taskqgroup_bind(struct taskqgroup * qgroup)77323ac9029SStephen Hurd taskqgroup_bind(struct taskqgroup *qgroup)
77423ac9029SStephen Hurd {
77523ac9029SStephen Hurd 	struct taskq_bind_task *gtask;
77623ac9029SStephen Hurd 	int i;
77723ac9029SStephen Hurd 
77823ac9029SStephen Hurd 	/*
77923ac9029SStephen Hurd 	 * Bind taskqueue threads to specific CPUs, if they have been assigned
78023ac9029SStephen Hurd 	 * one.
78123ac9029SStephen Hurd 	 */
782026204b4SSean Bruno 	if (qgroup->tqg_cnt == 1)
783026204b4SSean Bruno 		return;
784026204b4SSean Bruno 
78523ac9029SStephen Hurd 	for (i = 0; i < qgroup->tqg_cnt; i++) {
7861ee17b07SSean Bruno 		gtask = malloc(sizeof(*gtask), M_DEVBUF, M_WAITOK);
78723ac9029SStephen Hurd 		GTASK_INIT(&gtask->bt_task, 0, 0, taskqgroup_binder, gtask);
78823ac9029SStephen Hurd 		gtask->bt_cpuid = qgroup->tqg_queue[i].tgc_cpu;
78923ac9029SStephen Hurd 		grouptaskqueue_enqueue(qgroup->tqg_queue[i].tgc_taskq,
79023ac9029SStephen Hurd 		    &gtask->bt_task);
79123ac9029SStephen Hurd 	}
79223ac9029SStephen Hurd }
79323ac9029SStephen Hurd 
79423ac9029SStephen Hurd struct taskqgroup *
taskqgroup_create(const char * name,int cnt,int stride)79559d50fe5SMark Johnston taskqgroup_create(const char *name, int cnt, int stride)
79623ac9029SStephen Hurd {
79723ac9029SStephen Hurd 	struct taskqgroup *qgroup;
79859d50fe5SMark Johnston 	int cpu, i, j;
79923ac9029SStephen Hurd 
80023ac9029SStephen Hurd 	qgroup = malloc(sizeof(*qgroup), M_GTASKQUEUE, M_WAITOK | M_ZERO);
80123ac9029SStephen Hurd 	mtx_init(&qgroup->tqg_lock, "taskqgroup", NULL, MTX_DEF);
80223ac9029SStephen Hurd 	qgroup->tqg_name = name;
80359d50fe5SMark Johnston 	qgroup->tqg_cnt = cnt;
804ab2e3f79SStephen Hurd 
80559d50fe5SMark Johnston 	for (cpu = i = 0; i < cnt; i++) {
80659d50fe5SMark Johnston 		taskqgroup_cpu_create(qgroup, i, cpu);
80759d50fe5SMark Johnston 		for (j = 0; j < stride; j++)
80859d50fe5SMark Johnston 			cpu = CPU_NEXT(cpu);
80959d50fe5SMark Johnston 	}
81023ac9029SStephen Hurd 	return (qgroup);
81123ac9029SStephen Hurd }
81223ac9029SStephen Hurd 
81323ac9029SStephen Hurd void
taskqgroup_destroy(struct taskqgroup * qgroup)81423ac9029SStephen Hurd taskqgroup_destroy(struct taskqgroup *qgroup)
81523ac9029SStephen Hurd {
81636688f70SMatt Macy }
8172338da03SMatt Macy 
8182338da03SMatt Macy void
taskqgroup_drain_all(struct taskqgroup * tqg)8192338da03SMatt Macy taskqgroup_drain_all(struct taskqgroup *tqg)
8202338da03SMatt Macy {
8212338da03SMatt Macy 	struct gtaskqueue *q;
8222338da03SMatt Macy 
8232338da03SMatt Macy 	for (int i = 0; i < mp_ncpus; i++) {
8242338da03SMatt Macy 		q = tqg->tqg_queue[i].tgc_taskq;
8252338da03SMatt Macy 		if (q == NULL)
8262338da03SMatt Macy 			continue;
8272338da03SMatt Macy 		gtaskqueue_drain_all(q);
8282338da03SMatt Macy 	}
8292338da03SMatt Macy }
830