1 /*- 2 * Copyright (c) 2000 Doug Rabson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: src/sys/kern/subr_taskqueue.c,v 1.69 2012/08/28 13:35:37 jhb Exp $" 27 */ 28 29 #include <sys/param.h> 30 #include <sys/queue.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/taskqueue.h> 34 #include <sys/interrupt.h> 35 #include <sys/lock.h> 36 #include <sys/malloc.h> 37 #include <sys/kthread.h> 38 #include <sys/thread2.h> 39 #include <sys/spinlock.h> 40 #include <sys/spinlock2.h> 41 #include <sys/serialize.h> 42 #include <sys/proc.h> 43 #include <machine/varargs.h> 44 45 MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues"); 46 47 static STAILQ_HEAD(taskqueue_list, taskqueue) taskqueue_queues; 48 static struct lock taskqueue_queues_lock; 49 50 struct taskqueue { 51 STAILQ_ENTRY(taskqueue) tq_link; 52 STAILQ_HEAD(, task) tq_queue; 53 const char *tq_name; 54 taskqueue_enqueue_fn tq_enqueue; 55 void *tq_context; 56 57 struct task *tq_running; 58 struct spinlock tq_lock; 59 struct thread **tq_threads; 60 int tq_tcount; 61 int tq_flags; 62 int tq_callouts; 63 }; 64 65 #define TQ_FLAGS_ACTIVE (1 << 0) 66 #define TQ_FLAGS_BLOCKED (1 << 1) 67 #define TQ_FLAGS_PENDING (1 << 2) 68 69 #define DT_CALLOUT_ARMED (1 << 0) 70 71 void 72 _timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task, 73 int priority, task_fn_t func, void *context) 74 { 75 76 TASK_INIT(&timeout_task->t, priority, func, context); 77 callout_init(&timeout_task->c); 78 timeout_task->q = queue; 79 timeout_task->f = 0; 80 } 81 82 static void taskqueue_run(struct taskqueue *queue, int lock_held); 83 84 static __inline void 85 TQ_LOCK_INIT(struct taskqueue *tq) 86 { 87 spin_init(&tq->tq_lock, "tqlock"); 88 } 89 90 static __inline void 91 TQ_LOCK_UNINIT(struct taskqueue *tq) 92 { 93 spin_uninit(&tq->tq_lock); 94 } 95 96 static __inline void 97 TQ_LOCK(struct taskqueue *tq) 98 { 99 spin_lock(&tq->tq_lock); 100 } 101 102 static __inline void 103 TQ_UNLOCK(struct taskqueue *tq) 104 { 105 spin_unlock(&tq->tq_lock); 106 } 107 108 static __inline void 109 TQ_SLEEP(struct taskqueue *tq, void *ident, const char *wmesg) 110 { 111 ssleep(ident, &tq->tq_lock, 0, wmesg, 0); 112 } 113 114 struct taskqueue * 115 taskqueue_create(const char *name, int mflags, 116 taskqueue_enqueue_fn enqueue, void *context) 117 { 118 struct taskqueue *queue; 119 120 queue = kmalloc(sizeof(*queue), M_TASKQUEUE, mflags | M_ZERO); 121 if (!queue) 122 return NULL; 123 STAILQ_INIT(&queue->tq_queue); 124 queue->tq_name = name; 125 queue->tq_enqueue = enqueue; 126 queue->tq_context = context; 127 queue->tq_flags |= TQ_FLAGS_ACTIVE; 128 TQ_LOCK_INIT(queue); 129 130 lockmgr(&taskqueue_queues_lock, LK_EXCLUSIVE); 131 STAILQ_INSERT_TAIL(&taskqueue_queues, queue, tq_link); 132 lockmgr(&taskqueue_queues_lock, LK_RELEASE); 133 134 return queue; 135 } 136 137 static void 138 taskqueue_terminate(struct thread **pp, struct taskqueue *tq) 139 { 140 while(tq->tq_tcount > 0) { 141 wakeup(tq); 142 TQ_SLEEP(tq, pp, "taskqueue_terminate"); 143 } 144 } 145 146 void 147 taskqueue_free(struct taskqueue *queue) 148 { 149 TQ_LOCK(queue); 150 queue->tq_flags &= ~TQ_FLAGS_ACTIVE; 151 taskqueue_run(queue, 1); 152 taskqueue_terminate(queue->tq_threads, queue); 153 TQ_UNLOCK(queue); 154 155 lockmgr(&taskqueue_queues_lock, LK_EXCLUSIVE); 156 STAILQ_REMOVE(&taskqueue_queues, queue, taskqueue, tq_link); 157 lockmgr(&taskqueue_queues_lock, LK_RELEASE); 158 159 TQ_LOCK_UNINIT(queue); 160 161 kfree(queue, M_TASKQUEUE); 162 } 163 164 struct taskqueue * 165 taskqueue_find(const char *name) 166 { 167 struct taskqueue *queue; 168 169 lockmgr(&taskqueue_queues_lock, LK_EXCLUSIVE); 170 STAILQ_FOREACH(queue, &taskqueue_queues, tq_link) { 171 if (!strcmp(queue->tq_name, name)) { 172 lockmgr(&taskqueue_queues_lock, LK_RELEASE); 173 return queue; 174 } 175 } 176 lockmgr(&taskqueue_queues_lock, LK_RELEASE); 177 return NULL; 178 } 179 180 /* 181 * NOTE! If using the per-cpu taskqueues ``taskqueue_thread[mycpuid]'', 182 * be sure NOT TO SHARE the ``task'' between CPUs. TASKS ARE NOT LOCKED. 183 * So either use a throwaway task which will only be enqueued once, or 184 * use one task per CPU! 185 */ 186 static int 187 taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task) 188 { 189 struct task *ins; 190 struct task *prev; 191 192 /* 193 * Don't allow new tasks on a queue which is being freed. 194 */ 195 if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0) 196 return EPIPE; 197 198 /* 199 * Count multiple enqueues. 200 */ 201 if (task->ta_pending) { 202 task->ta_pending++; 203 return 0; 204 } 205 206 /* 207 * Optimise the case when all tasks have the same priority. 208 */ 209 prev = STAILQ_LAST(&queue->tq_queue, task, ta_link); 210 if (!prev || prev->ta_priority >= task->ta_priority) { 211 STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link); 212 } else { 213 prev = NULL; 214 for (ins = STAILQ_FIRST(&queue->tq_queue); ins; 215 prev = ins, ins = STAILQ_NEXT(ins, ta_link)) 216 if (ins->ta_priority < task->ta_priority) 217 break; 218 219 if (prev) 220 STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link); 221 else 222 STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link); 223 } 224 225 task->ta_pending = 1; 226 if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0) { 227 if (queue->tq_enqueue) 228 queue->tq_enqueue(queue->tq_context); 229 } else { 230 queue->tq_flags |= TQ_FLAGS_PENDING; 231 } 232 233 return 0; 234 } 235 236 int 237 taskqueue_enqueue(struct taskqueue *queue, struct task *task) 238 { 239 int res; 240 241 TQ_LOCK(queue); 242 res = taskqueue_enqueue_locked(queue, task); 243 TQ_UNLOCK(queue); 244 245 return (res); 246 } 247 248 static void 249 taskqueue_timeout_func(void *arg) 250 { 251 struct taskqueue *queue; 252 struct timeout_task *timeout_task; 253 254 timeout_task = arg; 255 queue = timeout_task->q; 256 KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout")); 257 timeout_task->f &= ~DT_CALLOUT_ARMED; 258 queue->tq_callouts--; 259 taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t); 260 } 261 262 int 263 taskqueue_enqueue_timeout(struct taskqueue *queue, 264 struct timeout_task *timeout_task, int ticks) 265 { 266 int res; 267 268 TQ_LOCK(queue); 269 KASSERT(timeout_task->q == NULL || timeout_task->q == queue, 270 ("Migrated queue")); 271 timeout_task->q = queue; 272 res = timeout_task->t.ta_pending; 273 if (ticks == 0) { 274 taskqueue_enqueue_locked(queue, &timeout_task->t); 275 TQ_UNLOCK(queue); 276 } else { 277 if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) { 278 res++; 279 } else { 280 queue->tq_callouts++; 281 timeout_task->f |= DT_CALLOUT_ARMED; 282 } 283 TQ_UNLOCK(queue); 284 callout_reset(&timeout_task->c, ticks, taskqueue_timeout_func, 285 timeout_task); 286 } 287 return (res); 288 } 289 290 void 291 taskqueue_block(struct taskqueue *queue) 292 { 293 TQ_LOCK(queue); 294 queue->tq_flags |= TQ_FLAGS_BLOCKED; 295 TQ_UNLOCK(queue); 296 } 297 298 void 299 taskqueue_unblock(struct taskqueue *queue) 300 { 301 TQ_LOCK(queue); 302 queue->tq_flags &= ~TQ_FLAGS_BLOCKED; 303 if (queue->tq_flags & TQ_FLAGS_PENDING) { 304 queue->tq_flags &= ~TQ_FLAGS_PENDING; 305 if (queue->tq_enqueue) 306 queue->tq_enqueue(queue->tq_context); 307 } 308 TQ_UNLOCK(queue); 309 } 310 311 static void 312 taskqueue_run(struct taskqueue *queue, int lock_held) 313 { 314 struct task *task; 315 int pending; 316 317 if (lock_held == 0) 318 TQ_LOCK(queue); 319 while (STAILQ_FIRST(&queue->tq_queue)) { 320 /* 321 * Carefully remove the first task from the queue and 322 * zero its pending count. 323 */ 324 task = STAILQ_FIRST(&queue->tq_queue); 325 STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link); 326 pending = task->ta_pending; 327 task->ta_pending = 0; 328 queue->tq_running = task; 329 330 TQ_UNLOCK(queue); 331 task->ta_func(task->ta_context, pending); 332 queue->tq_running = NULL; 333 wakeup(task); 334 TQ_LOCK(queue); 335 } 336 if (lock_held == 0) 337 TQ_UNLOCK(queue); 338 } 339 340 static int 341 taskqueue_cancel_locked(struct taskqueue *queue, struct task *task, 342 u_int *pendp) 343 { 344 345 if (task->ta_pending > 0) 346 STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link); 347 if (pendp != NULL) 348 *pendp = task->ta_pending; 349 task->ta_pending = 0; 350 return (task == queue->tq_running ? EBUSY : 0); 351 } 352 353 int 354 taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp) 355 { 356 int error; 357 358 TQ_LOCK(queue); 359 error = taskqueue_cancel_locked(queue, task, pendp); 360 TQ_UNLOCK(queue); 361 362 return (error); 363 } 364 365 int 366 taskqueue_cancel_timeout(struct taskqueue *queue, 367 struct timeout_task *timeout_task, u_int *pendp) 368 { 369 u_int pending, pending1; 370 int error; 371 372 pending = !!callout_stop(&timeout_task->c); 373 TQ_LOCK(queue); 374 error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1); 375 if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) { 376 timeout_task->f &= ~DT_CALLOUT_ARMED; 377 queue->tq_callouts--; 378 } 379 TQ_UNLOCK(queue); 380 381 if (pendp != NULL) 382 *pendp = pending + pending1; 383 return (error); 384 } 385 386 void 387 taskqueue_drain(struct taskqueue *queue, struct task *task) 388 { 389 TQ_LOCK(queue); 390 while (task->ta_pending != 0 || task == queue->tq_running) 391 TQ_SLEEP(queue, task, "-"); 392 TQ_UNLOCK(queue); 393 } 394 395 void 396 taskqueue_drain_timeout(struct taskqueue *queue, 397 struct timeout_task *timeout_task) 398 { 399 400 callout_stop_sync(&timeout_task->c); 401 taskqueue_drain(queue, &timeout_task->t); 402 } 403 404 static void 405 taskqueue_swi_enqueue(void *context) 406 { 407 setsofttq(); 408 } 409 410 static void 411 taskqueue_swi_run(void *arg, void *frame) 412 { 413 taskqueue_run(taskqueue_swi, 0); 414 } 415 416 static void 417 taskqueue_swi_mp_run(void *arg, void *frame) 418 { 419 taskqueue_run(taskqueue_swi_mp, 0); 420 } 421 422 int 423 taskqueue_start_threads(struct taskqueue **tqp, int count, int pri, int ncpu, 424 const char *fmt, ...) 425 { 426 __va_list ap; 427 struct thread *td; 428 struct taskqueue *tq; 429 int i, error, cpu; 430 char ktname[MAXCOMLEN]; 431 432 if (count <= 0) 433 return EINVAL; 434 435 tq = *tqp; 436 cpu = ncpu; 437 438 __va_start(ap, fmt); 439 kvsnprintf(ktname, MAXCOMLEN, fmt, ap); 440 __va_end(ap); 441 442 tq->tq_threads = kmalloc(sizeof(struct thread *) * count, M_TASKQUEUE, 443 M_WAITOK | M_ZERO); 444 445 for (i = 0; i < count; i++) { 446 /* 447 * If no specific cpu was specified and more than one thread 448 * is to be created, we distribute the threads amongst all 449 * cpus. 450 */ 451 if ((ncpu <= -1) && (count > 1)) 452 cpu = i%ncpus; 453 454 if (count == 1) { 455 error = lwkt_create(taskqueue_thread_loop, tqp, 456 &tq->tq_threads[i], NULL, 457 TDF_NOSTART, cpu, 458 "%s", ktname); 459 } else { 460 error = lwkt_create(taskqueue_thread_loop, tqp, 461 &tq->tq_threads[i], NULL, 462 TDF_NOSTART, cpu, 463 "%s_%d", ktname, i); 464 } 465 if (error) { 466 kprintf("%s: lwkt_create(%s): error %d", __func__, 467 ktname, error); 468 tq->tq_threads[i] = NULL; 469 } else { 470 td = tq->tq_threads[i]; 471 lwkt_setpri_initial(td, pri); 472 lwkt_schedule(td); 473 tq->tq_tcount++; 474 } 475 } 476 477 return 0; 478 } 479 480 void 481 taskqueue_thread_loop(void *arg) 482 { 483 struct taskqueue **tqp, *tq; 484 485 tqp = arg; 486 tq = *tqp; 487 TQ_LOCK(tq); 488 while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) { 489 taskqueue_run(tq, 1); 490 TQ_SLEEP(tq, tq, "tqthr"); 491 } 492 493 /* rendezvous with thread that asked us to terminate */ 494 tq->tq_tcount--; 495 TQ_UNLOCK(tq); 496 wakeup_one(tq->tq_threads); 497 lwkt_exit(); 498 } 499 500 void 501 taskqueue_thread_enqueue(void *context) 502 { 503 struct taskqueue **tqp, *tq; 504 505 tqp = context; 506 tq = *tqp; 507 508 wakeup_one(tq); 509 } 510 511 TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, 0, 512 register_swi(SWI_TQ, taskqueue_swi_run, NULL, "swi_taskq", NULL, -1)); 513 /* 514 * XXX: possibly use a different SWI_TQ_MP or so. 515 * related: sys/interrupt.h 516 * related: platform/XXX/isa/ipl_funcs.c 517 */ 518 TASKQUEUE_DEFINE(swi_mp, taskqueue_swi_enqueue, 0, 519 register_swi_mp(SWI_TQ, taskqueue_swi_mp_run, NULL, "swi_mp_taskq", NULL, 520 -1)); 521 522 struct taskqueue *taskqueue_thread[MAXCPU]; 523 524 static void 525 taskqueue_init(void) 526 { 527 int cpu; 528 529 lockinit(&taskqueue_queues_lock, "tqqueues", 0, 0); 530 STAILQ_INIT(&taskqueue_queues); 531 532 for (cpu = 0; cpu < ncpus; cpu++) { 533 taskqueue_thread[cpu] = taskqueue_create("thread", M_INTWAIT, 534 taskqueue_thread_enqueue, &taskqueue_thread[cpu]); 535 taskqueue_start_threads(&taskqueue_thread[cpu], 1, 536 TDPRI_KERN_DAEMON, cpu, "taskq_cpu %d", cpu); 537 } 538 } 539 540 SYSINIT(taskqueueinit, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, taskqueue_init, NULL); 541