1 /*- 2 * Copyright (c) 2000 Doug Rabson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/bus.h> 33 #include <sys/interrupt.h> 34 #include <sys/kernel.h> 35 #include <sys/kthread.h> 36 #include <sys/lock.h> 37 #include <sys/malloc.h> 38 #include <sys/mutex.h> 39 #include <sys/proc.h> 40 #include <sys/sched.h> 41 #include <sys/taskqueue.h> 42 #include <sys/unistd.h> 43 #include <machine/stdarg.h> 44 45 static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues"); 46 static void *taskqueue_giant_ih; 47 static void *taskqueue_ih; 48 49 struct taskqueue_busy { 50 struct task *tb_running; 51 TAILQ_ENTRY(taskqueue_busy) tb_link; 52 }; 53 54 struct taskqueue { 55 STAILQ_HEAD(, task) tq_queue; 56 const char *tq_name; 57 taskqueue_enqueue_fn tq_enqueue; 58 void *tq_context; 59 TAILQ_HEAD(, taskqueue_busy) tq_active; 60 struct mtx tq_mutex; 61 struct thread **tq_threads; 62 int tq_tcount; 63 int tq_spin; 64 int tq_flags; 65 }; 66 67 #define TQ_FLAGS_ACTIVE (1 << 0) 68 #define TQ_FLAGS_BLOCKED (1 << 1) 69 #define TQ_FLAGS_PENDING (1 << 2) 70 71 static __inline void 72 TQ_LOCK(struct taskqueue *tq) 73 { 74 if (tq->tq_spin) 75 mtx_lock_spin(&tq->tq_mutex); 76 else 77 mtx_lock(&tq->tq_mutex); 78 } 79 80 static __inline void 81 TQ_UNLOCK(struct taskqueue *tq) 82 { 83 if (tq->tq_spin) 84 mtx_unlock_spin(&tq->tq_mutex); 85 else 86 mtx_unlock(&tq->tq_mutex); 87 } 88 89 static __inline int 90 TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm, 91 int t) 92 { 93 if (tq->tq_spin) 94 return (msleep_spin(p, m, wm, t)); 95 return (msleep(p, m, pri, wm, t)); 96 } 97 98 static struct taskqueue * 99 _taskqueue_create(const char *name, int mflags, 100 taskqueue_enqueue_fn enqueue, void *context, 101 int mtxflags, const char *mtxname) 102 { 103 struct taskqueue *queue; 104 105 queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO); 106 if (!queue) 107 return NULL; 108 109 STAILQ_INIT(&queue->tq_queue); 110 TAILQ_INIT(&queue->tq_active); 111 queue->tq_name = name; 112 queue->tq_enqueue = enqueue; 113 queue->tq_context = context; 114 queue->tq_spin = (mtxflags & MTX_SPIN) != 0; 115 queue->tq_flags |= TQ_FLAGS_ACTIVE; 116 mtx_init(&queue->tq_mutex, mtxname, NULL, mtxflags); 117 118 return queue; 119 } 120 121 struct taskqueue * 122 taskqueue_create(const char *name, int mflags, 123 taskqueue_enqueue_fn enqueue, void *context) 124 { 125 return _taskqueue_create(name, mflags, enqueue, context, 126 MTX_DEF, "taskqueue"); 127 } 128 129 /* 130 * Signal a taskqueue thread to terminate. 131 */ 132 static void 133 taskqueue_terminate(struct thread **pp, struct taskqueue *tq) 134 { 135 136 while (tq->tq_tcount > 0) { 137 wakeup(tq); 138 TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0); 139 } 140 } 141 142 void 143 taskqueue_free(struct taskqueue *queue) 144 { 145 146 TQ_LOCK(queue); 147 queue->tq_flags &= ~TQ_FLAGS_ACTIVE; 148 taskqueue_terminate(queue->tq_threads, queue); 149 KASSERT(TAILQ_EMPTY(&queue->tq_active), ("Tasks still running?")); 150 mtx_destroy(&queue->tq_mutex); 151 free(queue->tq_threads, M_TASKQUEUE); 152 free(queue, M_TASKQUEUE); 153 } 154 155 int 156 taskqueue_enqueue(struct taskqueue *queue, struct task *task) 157 { 158 struct task *ins; 159 struct task *prev; 160 161 TQ_LOCK(queue); 162 163 /* 164 * Count multiple enqueues. 165 */ 166 if (task->ta_pending) { 167 task->ta_pending++; 168 TQ_UNLOCK(queue); 169 return 0; 170 } 171 172 /* 173 * Optimise the case when all tasks have the same priority. 174 */ 175 prev = STAILQ_LAST(&queue->tq_queue, task, ta_link); 176 if (!prev || prev->ta_priority >= task->ta_priority) { 177 STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link); 178 } else { 179 prev = NULL; 180 for (ins = STAILQ_FIRST(&queue->tq_queue); ins; 181 prev = ins, ins = STAILQ_NEXT(ins, ta_link)) 182 if (ins->ta_priority < task->ta_priority) 183 break; 184 185 if (prev) 186 STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link); 187 else 188 STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link); 189 } 190 191 task->ta_pending = 1; 192 if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0) 193 queue->tq_enqueue(queue->tq_context); 194 else 195 queue->tq_flags |= TQ_FLAGS_PENDING; 196 197 TQ_UNLOCK(queue); 198 199 return 0; 200 } 201 202 void 203 taskqueue_block(struct taskqueue *queue) 204 { 205 206 TQ_LOCK(queue); 207 queue->tq_flags |= TQ_FLAGS_BLOCKED; 208 TQ_UNLOCK(queue); 209 } 210 211 void 212 taskqueue_unblock(struct taskqueue *queue) 213 { 214 215 TQ_LOCK(queue); 216 queue->tq_flags &= ~TQ_FLAGS_BLOCKED; 217 if (queue->tq_flags & TQ_FLAGS_PENDING) { 218 queue->tq_flags &= ~TQ_FLAGS_PENDING; 219 queue->tq_enqueue(queue->tq_context); 220 } 221 TQ_UNLOCK(queue); 222 } 223 224 static void 225 taskqueue_run_locked(struct taskqueue *queue) 226 { 227 struct taskqueue_busy tb; 228 struct task *task; 229 int pending; 230 231 mtx_assert(&queue->tq_mutex, MA_OWNED); 232 tb.tb_running = NULL; 233 TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link); 234 235 while (STAILQ_FIRST(&queue->tq_queue)) { 236 /* 237 * Carefully remove the first task from the queue and 238 * zero its pending count. 239 */ 240 task = STAILQ_FIRST(&queue->tq_queue); 241 STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link); 242 pending = task->ta_pending; 243 task->ta_pending = 0; 244 tb.tb_running = task; 245 TQ_UNLOCK(queue); 246 247 task->ta_func(task->ta_context, pending); 248 249 TQ_LOCK(queue); 250 tb.tb_running = NULL; 251 wakeup(task); 252 } 253 TAILQ_REMOVE(&queue->tq_active, &tb, tb_link); 254 } 255 256 void 257 taskqueue_run(struct taskqueue *queue) 258 { 259 260 TQ_LOCK(queue); 261 taskqueue_run_locked(queue); 262 TQ_UNLOCK(queue); 263 } 264 265 static int 266 task_is_running(struct taskqueue *queue, struct task *task) 267 { 268 struct taskqueue_busy *tb; 269 270 mtx_assert(&queue->tq_mutex, MA_OWNED); 271 TAILQ_FOREACH(tb, &queue->tq_active, tb_link) { 272 if (tb->tb_running == task) 273 return (1); 274 } 275 return (0); 276 } 277 278 void 279 taskqueue_drain(struct taskqueue *queue, struct task *task) 280 { 281 282 if (!queue->tq_spin) 283 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); 284 285 TQ_LOCK(queue); 286 while (task->ta_pending != 0 || task_is_running(queue, task)) 287 TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0); 288 TQ_UNLOCK(queue); 289 } 290 291 static void 292 taskqueue_swi_enqueue(void *context) 293 { 294 swi_sched(taskqueue_ih, 0); 295 } 296 297 static void 298 taskqueue_swi_run(void *dummy) 299 { 300 taskqueue_run(taskqueue_swi); 301 } 302 303 static void 304 taskqueue_swi_giant_enqueue(void *context) 305 { 306 swi_sched(taskqueue_giant_ih, 0); 307 } 308 309 static void 310 taskqueue_swi_giant_run(void *dummy) 311 { 312 taskqueue_run(taskqueue_swi_giant); 313 } 314 315 int 316 taskqueue_start_threads(struct taskqueue **tqp, int count, int pri, 317 const char *name, ...) 318 { 319 va_list ap; 320 struct thread *td; 321 struct taskqueue *tq; 322 int i, error; 323 char ktname[MAXCOMLEN + 1]; 324 325 if (count <= 0) 326 return (EINVAL); 327 328 tq = *tqp; 329 330 va_start(ap, name); 331 vsnprintf(ktname, sizeof(ktname), name, ap); 332 va_end(ap); 333 334 tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE, 335 M_NOWAIT | M_ZERO); 336 if (tq->tq_threads == NULL) { 337 printf("%s: no memory for %s threads\n", __func__, ktname); 338 return (ENOMEM); 339 } 340 341 for (i = 0; i < count; i++) { 342 if (count == 1) 343 error = kthread_add(taskqueue_thread_loop, tqp, NULL, 344 &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname); 345 else 346 error = kthread_add(taskqueue_thread_loop, tqp, NULL, 347 &tq->tq_threads[i], RFSTOPPED, 0, 348 "%s_%d", ktname, i); 349 if (error) { 350 /* should be ok to continue, taskqueue_free will dtrt */ 351 printf("%s: kthread_add(%s): error %d", __func__, 352 ktname, error); 353 tq->tq_threads[i] = NULL; /* paranoid */ 354 } else 355 tq->tq_tcount++; 356 } 357 for (i = 0; i < count; i++) { 358 if (tq->tq_threads[i] == NULL) 359 continue; 360 td = tq->tq_threads[i]; 361 thread_lock(td); 362 sched_prio(td, pri); 363 sched_add(td, SRQ_BORING); 364 thread_unlock(td); 365 } 366 367 return (0); 368 } 369 370 void 371 taskqueue_thread_loop(void *arg) 372 { 373 struct taskqueue **tqp, *tq; 374 375 tqp = arg; 376 tq = *tqp; 377 TQ_LOCK(tq); 378 while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) { 379 taskqueue_run_locked(tq); 380 /* 381 * Because taskqueue_run() can drop tq_mutex, we need to 382 * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the 383 * meantime, which means we missed a wakeup. 384 */ 385 if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0) 386 break; 387 TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0); 388 } 389 taskqueue_run_locked(tq); 390 391 /* rendezvous with thread that asked us to terminate */ 392 tq->tq_tcount--; 393 wakeup_one(tq->tq_threads); 394 TQ_UNLOCK(tq); 395 kthread_exit(); 396 } 397 398 void 399 taskqueue_thread_enqueue(void *context) 400 { 401 struct taskqueue **tqp, *tq; 402 403 tqp = context; 404 tq = *tqp; 405 406 mtx_assert(&tq->tq_mutex, MA_OWNED); 407 wakeup_one(tq); 408 } 409 410 TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL, 411 swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ, 412 INTR_MPSAFE, &taskqueue_ih)); 413 414 TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL, 415 swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run, 416 NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih)); 417 418 TASKQUEUE_DEFINE_THREAD(thread); 419 420 struct taskqueue * 421 taskqueue_create_fast(const char *name, int mflags, 422 taskqueue_enqueue_fn enqueue, void *context) 423 { 424 return _taskqueue_create(name, mflags, enqueue, context, 425 MTX_SPIN, "fast_taskqueue"); 426 } 427 428 /* NB: for backwards compatibility */ 429 int 430 taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task) 431 { 432 return taskqueue_enqueue(queue, task); 433 } 434 435 static void *taskqueue_fast_ih; 436 437 static void 438 taskqueue_fast_enqueue(void *context) 439 { 440 swi_sched(taskqueue_fast_ih, 0); 441 } 442 443 static void 444 taskqueue_fast_run(void *dummy) 445 { 446 taskqueue_run(taskqueue_fast); 447 } 448 449 TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL, 450 swi_add(NULL, "Fast task queue", taskqueue_fast_run, NULL, 451 SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih)); 452 453 int 454 taskqueue_member(struct taskqueue *queue, struct thread *td) 455 { 456 int i, j, ret = 0; 457 458 TQ_LOCK(queue); 459 for (i = 0, j = 0; ; i++) { 460 if (queue->tq_threads[i] == NULL) 461 continue; 462 if (queue->tq_threads[i] == td) { 463 ret = 1; 464 break; 465 } 466 if (++j >= queue->tq_tcount) 467 break; 468 } 469 TQ_UNLOCK(queue); 470 return (ret); 471 } 472