1 /*-
2 * Copyright (c) 2000 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: src/sys/kern/subr_taskqueue.c,v 1.69 2012/08/28 13:35:37 jhb Exp $"
27 */
28
29 #include <sys/param.h>
30 #include <sys/queue.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/taskqueue.h>
34 #include <sys/interrupt.h>
35 #include <sys/lock.h>
36 #include <sys/malloc.h>
37 #include <sys/kthread.h>
38 #include <sys/spinlock.h>
39 #include <sys/spinlock2.h>
40 #include <sys/serialize.h>
41 #include <sys/proc.h>
42
43 MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
44
45 static STAILQ_HEAD(taskqueue_list, taskqueue) taskqueue_queues;
46 static struct lock taskqueue_queues_lock;
47 static struct spinlock taskqueue_queues_spin;
48
49 struct taskqueue {
50 STAILQ_ENTRY(taskqueue) tq_link;
51 STAILQ_HEAD(, task) tq_queue;
52 const char *tq_name;
53 /* NOTE: tq must be locked before calling tq_enqueue */
54 taskqueue_enqueue_fn tq_enqueue;
55 void *tq_context;
56
57 struct task *tq_running;
58 struct spinlock tq_lock;
59 struct thread **tq_threads;
60 int tq_tcount;
61 int tq_flags;
62 int tq_callouts;
63 };
64
65 #define TQ_FLAGS_ACTIVE (1 << 0)
66 #define TQ_FLAGS_BLOCKED (1 << 1)
67 #define TQ_FLAGS_PENDING (1 << 2)
68
69 #define DT_CALLOUT_ARMED (1 << 0)
70
71 void
_timeout_task_init(struct taskqueue * queue,struct timeout_task * timeout_task,int priority,task_fn_t func,void * context)72 _timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task,
73 int priority, task_fn_t func, void *context)
74 {
75
76 TASK_INIT(&timeout_task->t, priority, func, context);
77 callout_init_mp(&timeout_task->c);
78 timeout_task->t.ta_queue = queue;
79 timeout_task->f = 0;
80 }
81
82 static void taskqueue_run(struct taskqueue *queue, int lock_held);
83
84 static __inline void
TQ_LOCK_INIT(struct taskqueue * tq)85 TQ_LOCK_INIT(struct taskqueue *tq)
86 {
87 spin_init(&tq->tq_lock, "tqlock");
88 }
89
90 static __inline void
TQ_LOCK_UNINIT(struct taskqueue * tq)91 TQ_LOCK_UNINIT(struct taskqueue *tq)
92 {
93 spin_uninit(&tq->tq_lock);
94 }
95
96 static __inline void
TQ_LOCK(struct taskqueue * tq)97 TQ_LOCK(struct taskqueue *tq)
98 {
99 spin_lock(&tq->tq_lock);
100 }
101
102 static __inline void
TQ_UNLOCK(struct taskqueue * tq)103 TQ_UNLOCK(struct taskqueue *tq)
104 {
105 spin_unlock(&tq->tq_lock);
106 }
107
108 static __inline void
TQ_SLEEP(struct taskqueue * tq,void * ident,const char * wmesg)109 TQ_SLEEP(struct taskqueue *tq, void *ident, const char *wmesg)
110 {
111 ssleep(ident, &tq->tq_lock, 0, wmesg, 0);
112 }
113
114 struct taskqueue *
taskqueue_create(const char * name,int mflags,taskqueue_enqueue_fn enqueue,void * context)115 taskqueue_create(const char *name, int mflags,
116 taskqueue_enqueue_fn enqueue, void *context)
117 {
118 struct taskqueue *queue;
119
120 queue = kmalloc(sizeof(*queue), M_TASKQUEUE, mflags | M_ZERO);
121 if (!queue)
122 return NULL;
123 STAILQ_INIT(&queue->tq_queue);
124 queue->tq_name = name;
125 queue->tq_enqueue = enqueue;
126 queue->tq_context = context;
127 queue->tq_flags |= TQ_FLAGS_ACTIVE;
128 TQ_LOCK_INIT(queue);
129
130 lockmgr(&taskqueue_queues_lock, LK_EXCLUSIVE);
131 STAILQ_INSERT_TAIL(&taskqueue_queues, queue, tq_link);
132 lockmgr(&taskqueue_queues_lock, LK_RELEASE);
133
134 return queue;
135 }
136
137 /* NOTE: tq must be locked */
138 static void
taskqueue_terminate(struct thread ** pp,struct taskqueue * tq)139 taskqueue_terminate(struct thread **pp, struct taskqueue *tq)
140 {
141 while (tq->tq_tcount > 0) {
142 /* Unlock spinlock before wakeup() */
143 TQ_UNLOCK(tq);
144 wakeup(tq);
145 TQ_LOCK(tq);
146 TQ_SLEEP(tq, pp, "taskqueue_terminate");
147 }
148 }
149
150 void
taskqueue_free(struct taskqueue * queue)151 taskqueue_free(struct taskqueue *queue)
152 {
153 TQ_LOCK(queue);
154 queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
155 taskqueue_run(queue, 1);
156 taskqueue_terminate(queue->tq_threads, queue);
157 TQ_UNLOCK(queue);
158
159 lockmgr(&taskqueue_queues_lock, LK_EXCLUSIVE);
160 STAILQ_REMOVE(&taskqueue_queues, queue, taskqueue, tq_link);
161 lockmgr(&taskqueue_queues_lock, LK_RELEASE);
162
163 TQ_LOCK_UNINIT(queue);
164
165 kfree(queue, M_TASKQUEUE);
166 }
167
168 struct taskqueue *
taskqueue_find(const char * name)169 taskqueue_find(const char *name)
170 {
171 struct taskqueue *queue;
172
173 lockmgr(&taskqueue_queues_lock, LK_EXCLUSIVE);
174 STAILQ_FOREACH(queue, &taskqueue_queues, tq_link) {
175 if (!strcmp(queue->tq_name, name)) {
176 lockmgr(&taskqueue_queues_lock, LK_RELEASE);
177 return queue;
178 }
179 }
180 lockmgr(&taskqueue_queues_lock, LK_RELEASE);
181 return NULL;
182 }
183
184 /*
185 * NOTE! If using the per-cpu taskqueues ``taskqueue_thread[mycpuid]'',
186 * be sure NOT TO SHARE the ``task'' between CPUs. TASKS ARE NOT LOCKED.
187 * So either use a throwaway task which will only be enqueued once, or
188 * use one task per CPU!
189 */
190 static int
taskqueue_enqueue_locked(struct taskqueue * queue,struct task * task)191 taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task)
192 {
193 struct task *ins;
194 struct task *prev;
195
196 /*
197 * Don't allow new tasks on a queue which is being freed.
198 */
199 if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0)
200 return EPIPE;
201
202 /*
203 * Count multiple enqueues.
204 */
205 if (task->ta_pending) {
206 KKASSERT(queue == task->ta_queue);
207 task->ta_pending++;
208 return 0;
209 }
210 task->ta_queue = queue;
211
212 /*
213 * Optimise the case when all tasks have the same priority.
214 */
215 prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
216 if (!prev || prev->ta_priority >= task->ta_priority) {
217 STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
218 } else {
219 prev = NULL;
220 for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
221 prev = ins, ins = STAILQ_NEXT(ins, ta_link))
222 if (ins->ta_priority < task->ta_priority)
223 break;
224
225 if (prev)
226 STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
227 else
228 STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
229 }
230
231 task->ta_pending = 1;
232 if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0) {
233 if (queue->tq_enqueue)
234 queue->tq_enqueue(queue->tq_context);
235 } else {
236 queue->tq_flags |= TQ_FLAGS_PENDING;
237 }
238
239 return 0;
240 }
241
242 /*
243 * This version requires that the task not be moved between queues
244 * in an uncontrolled fashion.
245 */
246 int
taskqueue_enqueue(struct taskqueue * queue,struct task * task)247 taskqueue_enqueue(struct taskqueue *queue, struct task *task)
248 {
249 int res;
250
251 TQ_LOCK(queue);
252 res = taskqueue_enqueue_locked(queue, task);
253 TQ_UNLOCK(queue);
254
255 return (res);
256 }
257
258 /*
259 * This version allows a task to be moved between queues in an uncontrolled
260 * fashion. (*qpp) is set to the queue the task is (possibly already)
261 * enqueued on, or the specified queue if it is possible to move the task.
262 */
263 int
taskqueue_enqueue_optq(struct taskqueue * queue,struct taskqueue ** qpp,struct task * task)264 taskqueue_enqueue_optq(struct taskqueue *queue, struct taskqueue **qpp,
265 struct task *task)
266 {
267 struct taskqueue *qtmp;
268 int res;
269
270 /*
271 * Interlock for task structure check, handle the case where we
272 * are unable to safely shift the task to the specified queue.
273 */
274 for (;;) {
275 qtmp = task->ta_queue;
276 cpu_ccfence();
277
278 if (qtmp == NULL) {
279 spin_lock(&taskqueue_queues_spin);
280 if (task->ta_queue == NULL)
281 task->ta_queue = queue;
282 spin_unlock(&taskqueue_queues_spin);
283 } else {
284 TQ_LOCK(qtmp);
285 if (task->ta_queue == qtmp) {
286 if (qtmp == queue)
287 break;
288
289 /*
290 * If qtmp is pending on a different queue
291 * it must stay on that queue.
292 *
293 * WARNING: Once ta_queue is reassigned
294 * our qtmp lock is no longer
295 * sufficient and we lose control
296 * of the task.
297 */
298 if (task->ta_pending) {
299 task->ta_pending++;
300 *qpp = qtmp;
301 TQ_UNLOCK(qtmp);
302 return 0;
303 }
304 cpu_sfence();
305 task->ta_queue = queue;
306 cpu_ccfence();
307 }
308 TQ_UNLOCK(qtmp);
309 }
310 /* retry */
311 }
312
313 /*
314 * The task is assigned to (queue), enqueue it there.
315 */
316 *qpp = queue;
317 res = taskqueue_enqueue_locked(queue, task);
318 TQ_UNLOCK(queue);
319
320 return (res);
321 }
322
323 static void
taskqueue_timeout_func(void * arg)324 taskqueue_timeout_func(void *arg)
325 {
326 struct taskqueue *queue;
327 struct timeout_task *timeout_task;
328
329 timeout_task = arg;
330 queue = timeout_task->t.ta_queue;
331
332 TQ_LOCK(queue);
333 KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout"));
334 timeout_task->f &= ~DT_CALLOUT_ARMED;
335 queue->tq_callouts--;
336 taskqueue_enqueue_locked(queue, &timeout_task->t);
337 TQ_UNLOCK(queue);
338 }
339
340 int
taskqueue_enqueue_timeout(struct taskqueue * queue,struct timeout_task * timeout_task,int ticks)341 taskqueue_enqueue_timeout(struct taskqueue *queue,
342 struct timeout_task *timeout_task, int ticks)
343 {
344 int res;
345
346 TQ_LOCK(queue);
347 KASSERT(timeout_task->t.ta_queue == NULL ||
348 timeout_task->t.ta_queue == queue,
349 ("Migrated queue"));
350 timeout_task->t.ta_queue = queue;
351 res = timeout_task->t.ta_pending;
352 if (ticks == 0) {
353 taskqueue_enqueue_locked(queue, &timeout_task->t);
354 TQ_UNLOCK(queue);
355 } else {
356 if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
357 res++;
358 } else {
359 queue->tq_callouts++;
360 timeout_task->f |= DT_CALLOUT_ARMED;
361 }
362 TQ_UNLOCK(queue);
363 callout_reset(&timeout_task->c, ticks, taskqueue_timeout_func,
364 timeout_task);
365 }
366 return (res);
367 }
368
369 void
taskqueue_block(struct taskqueue * queue)370 taskqueue_block(struct taskqueue *queue)
371 {
372 TQ_LOCK(queue);
373 queue->tq_flags |= TQ_FLAGS_BLOCKED;
374 TQ_UNLOCK(queue);
375 }
376
377 void
taskqueue_unblock(struct taskqueue * queue)378 taskqueue_unblock(struct taskqueue *queue)
379 {
380 TQ_LOCK(queue);
381 queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
382 if (queue->tq_flags & TQ_FLAGS_PENDING) {
383 queue->tq_flags &= ~TQ_FLAGS_PENDING;
384 if (queue->tq_enqueue)
385 queue->tq_enqueue(queue->tq_context);
386 }
387 TQ_UNLOCK(queue);
388 }
389
390 static void
taskqueue_run(struct taskqueue * queue,int lock_held)391 taskqueue_run(struct taskqueue *queue, int lock_held)
392 {
393 struct task *task;
394 int pending;
395
396 if (lock_held == 0)
397 TQ_LOCK(queue);
398 while (STAILQ_FIRST(&queue->tq_queue)) {
399 /*
400 * Carefully remove the first task from the queue and
401 * zero its pending count.
402 */
403 task = STAILQ_FIRST(&queue->tq_queue);
404 STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
405 pending = task->ta_pending;
406 task->ta_pending = 0;
407 queue->tq_running = task;
408
409 TQ_UNLOCK(queue);
410 task->ta_func(task->ta_context, pending);
411 queue->tq_running = NULL;
412 wakeup(task);
413 TQ_LOCK(queue);
414 }
415 if (lock_held == 0)
416 TQ_UNLOCK(queue);
417 }
418
419 static int
taskqueue_cancel_locked(struct taskqueue * queue,struct task * task,u_int * pendp)420 taskqueue_cancel_locked(struct taskqueue *queue, struct task *task,
421 u_int *pendp)
422 {
423
424 if (task->ta_pending > 0)
425 STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link);
426 if (pendp != NULL)
427 *pendp = task->ta_pending;
428 task->ta_pending = 0;
429 return (task == queue->tq_running ? EBUSY : 0);
430 }
431
432 int
taskqueue_cancel(struct taskqueue * queue,struct task * task,u_int * pendp)433 taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp)
434 {
435 int error;
436
437 TQ_LOCK(queue);
438 error = taskqueue_cancel_locked(queue, task, pendp);
439 TQ_UNLOCK(queue);
440
441 return (error);
442 }
443
444 int
taskqueue_cancel_simple(struct task * task)445 taskqueue_cancel_simple(struct task *task)
446 {
447 struct taskqueue *queue;
448 int error;
449
450 for (;;) {
451 queue = task->ta_queue;
452 cpu_ccfence();
453 if (queue == NULL) {
454 error = 0;
455 break;
456 }
457 TQ_LOCK(queue);
458 if (queue == task->ta_queue) {
459 error = taskqueue_cancel_locked(queue, task, NULL);
460 TQ_UNLOCK(queue);
461 break;
462 }
463 TQ_UNLOCK(queue);
464 }
465 return error;
466 }
467
468 int
taskqueue_cancel_timeout(struct taskqueue * queue,struct timeout_task * timeout_task,u_int * pendp)469 taskqueue_cancel_timeout(struct taskqueue *queue,
470 struct timeout_task *timeout_task, u_int *pendp)
471 {
472 u_int pending, pending1;
473 int error;
474
475 pending = !!callout_stop(&timeout_task->c);
476 TQ_LOCK(queue);
477 error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1);
478 if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
479 timeout_task->f &= ~DT_CALLOUT_ARMED;
480 queue->tq_callouts--;
481 }
482 TQ_UNLOCK(queue);
483
484 if (pendp != NULL)
485 *pendp = pending + pending1;
486 return (error);
487 }
488
489 void
taskqueue_drain(struct taskqueue * queue,struct task * task)490 taskqueue_drain(struct taskqueue *queue, struct task *task)
491 {
492 TQ_LOCK(queue);
493 while (task->ta_pending != 0 || task == queue->tq_running)
494 TQ_SLEEP(queue, task, "-");
495 TQ_UNLOCK(queue);
496 }
497
498 /*
499 * Wait for the task to drain and return
500 */
501 void
taskqueue_drain_simple(struct task * task)502 taskqueue_drain_simple(struct task *task)
503 {
504 struct taskqueue *queue;
505
506 for (;;) {
507 queue = task->ta_queue;
508 cpu_ccfence();
509 if (queue == NULL)
510 return;
511 TQ_LOCK(queue);
512 if (task->ta_pending == 0 && task != queue->tq_running) {
513 TQ_UNLOCK(queue);
514 return;
515 }
516 TQ_SLEEP(queue, task, "-");
517 TQ_UNLOCK(queue);
518 }
519 }
520
521 void
taskqueue_drain_timeout(struct taskqueue * queue,struct timeout_task * timeout_task)522 taskqueue_drain_timeout(struct taskqueue *queue,
523 struct timeout_task *timeout_task)
524 {
525 callout_cancel(&timeout_task->c);
526 taskqueue_drain(queue, &timeout_task->t);
527 }
528
529 static void
taskqueue_swi_enqueue(void * context)530 taskqueue_swi_enqueue(void *context)
531 {
532 setsofttq();
533 }
534
535 static void
taskqueue_swi_run(void * arg,void * frame)536 taskqueue_swi_run(void *arg, void *frame)
537 {
538 taskqueue_run(taskqueue_swi, 0);
539 }
540
541 static void
taskqueue_swi_mp_run(void * arg,void * frame)542 taskqueue_swi_mp_run(void *arg, void *frame)
543 {
544 taskqueue_run(taskqueue_swi_mp, 0);
545 }
546
547 int
taskqueue_start_threads(struct taskqueue ** tqp,int count,int pri,int ncpu,const char * fmt,...)548 taskqueue_start_threads(struct taskqueue **tqp, int count, int pri, int ncpu,
549 const char *fmt, ...)
550 {
551 __va_list ap;
552 struct thread *td;
553 struct taskqueue *tq;
554 int i, error, cpu;
555 char ktname[MAXCOMLEN];
556
557 tq = *tqp;
558 cpu = ncpu;
559
560 /* catch call argument mistakes */
561 KKASSERT(pri > 0 && pri < TDPRI_MAX);
562 KKASSERT(tq->tq_enqueue == taskqueue_thread_enqueue);
563
564 if (count <= 0)
565 return EINVAL;
566
567 __va_start(ap, fmt);
568 kvsnprintf(ktname, MAXCOMLEN, fmt, ap);
569 __va_end(ap);
570
571 tq->tq_threads = kmalloc(sizeof(struct thread *) * count, M_TASKQUEUE,
572 M_WAITOK | M_ZERO);
573
574 for (i = 0; i < count; i++) {
575 /*
576 * If no specific cpu was specified and more than one thread
577 * is to be created, we distribute the threads amongst all
578 * cpus.
579 */
580 if ((ncpu <= -1) && (count > 1))
581 cpu = i % ncpus;
582
583 if (count == 1) {
584 error = lwkt_create(taskqueue_thread_loop, tqp,
585 &tq->tq_threads[i], NULL,
586 TDF_NOSTART, cpu,
587 "%s", ktname);
588 } else {
589 error = lwkt_create(taskqueue_thread_loop, tqp,
590 &tq->tq_threads[i], NULL,
591 TDF_NOSTART, cpu,
592 "%s_%d", ktname, i);
593 }
594 if (error) {
595 kprintf("%s: lwkt_create(%s): error %d", __func__,
596 ktname, error);
597 tq->tq_threads[i] = NULL;
598 } else {
599 td = tq->tq_threads[i];
600 lwkt_setpri_initial(td, pri);
601 lwkt_schedule(td);
602 tq->tq_tcount++;
603 }
604 }
605
606 return 0;
607 }
608
609 void
taskqueue_thread_loop(void * arg)610 taskqueue_thread_loop(void *arg)
611 {
612 struct taskqueue **tqp, *tq;
613
614 tqp = arg;
615 tq = *tqp;
616 TQ_LOCK(tq);
617 while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
618 taskqueue_run(tq, 1);
619 TQ_SLEEP(tq, tq, "tqthr");
620 }
621
622 /* rendezvous with thread that asked us to terminate */
623 tq->tq_tcount--;
624 TQ_UNLOCK(tq);
625 wakeup_one(tq->tq_threads);
626 lwkt_exit();
627 }
628
629 /* NOTE: tq must be locked */
630 void
taskqueue_thread_enqueue(void * context)631 taskqueue_thread_enqueue(void *context)
632 {
633 struct taskqueue **tqp, *tq;
634
635 tqp = context;
636 tq = *tqp;
637
638 /* Unlock spinlock before wakeup_one() */
639 TQ_UNLOCK(tq);
640 wakeup_one(tq);
641 TQ_LOCK(tq);
642 }
643
644 TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, 0,
645 register_swi(SWI_TQ, taskqueue_swi_run, NULL, "swi_taskq", NULL, -1));
646 /*
647 * XXX: possibly use a different SWI_TQ_MP or so.
648 * related: sys/interrupt.h
649 * related: platform/XXX/isa/ipl_funcs.c
650 */
651 TASKQUEUE_DEFINE(swi_mp, taskqueue_swi_enqueue, 0,
652 register_swi_mp(SWI_TQ, taskqueue_swi_mp_run, NULL, "swi_mp_taskq", NULL,
653 -1));
654
655 struct taskqueue *taskqueue_thread[MAXCPU];
656
657 static void
taskqueue_init(void)658 taskqueue_init(void)
659 {
660 int cpu;
661
662 lockinit(&taskqueue_queues_lock, "tqqueues", 0, 0);
663 spin_init(&taskqueue_queues_spin, "tqspin");
664 STAILQ_INIT(&taskqueue_queues);
665
666 for (cpu = 0; cpu < ncpus; cpu++) {
667 taskqueue_thread[cpu] = taskqueue_create("thread", M_INTWAIT,
668 taskqueue_thread_enqueue, &taskqueue_thread[cpu]);
669 taskqueue_start_threads(&taskqueue_thread[cpu], 1,
670 TDPRI_KERN_DAEMON, cpu, "taskq_cpu %d", cpu);
671 }
672 }
673
674 SYSINIT(taskqueueinit, SI_SUB_PRE_DRIVERS, SI_ORDER_FIRST, taskqueue_init, NULL);
675