1 /*
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 *
10 * The SPL is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 * The SPL is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * for more details.
19 *
20 * You should have received a copy of the GNU General Public License along
21 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
22 *
23 * Solaris Porting Layer (SPL) Task Queue Implementation.
24 */
25
26 #include <sys/timer.h>
27 #include <sys/taskq.h>
28 #include <sys/kmem.h>
29 #include <sys/tsd.h>
30 #include <sys/trace_spl.h>
31 #ifdef HAVE_CPU_HOTPLUG
32 #include <linux/cpuhotplug.h>
33 #endif
34
35 static int spl_taskq_thread_bind = 0;
36 module_param(spl_taskq_thread_bind, int, 0644);
37 MODULE_PARM_DESC(spl_taskq_thread_bind, "Bind taskq thread to CPU by default");
38
39 static uint_t spl_taskq_thread_timeout_ms = 5000;
40 /* BEGIN CSTYLED */
41 module_param(spl_taskq_thread_timeout_ms, uint, 0644);
42 /* END CSTYLED */
43 MODULE_PARM_DESC(spl_taskq_thread_timeout_ms,
44 "Minimum idle threads exit interval for dynamic taskqs");
45
46 static int spl_taskq_thread_dynamic = 1;
47 module_param(spl_taskq_thread_dynamic, int, 0444);
48 MODULE_PARM_DESC(spl_taskq_thread_dynamic, "Allow dynamic taskq threads");
49
50 static int spl_taskq_thread_priority = 1;
51 module_param(spl_taskq_thread_priority, int, 0644);
52 MODULE_PARM_DESC(spl_taskq_thread_priority,
53 "Allow non-default priority for taskq threads");
54
55 static uint_t spl_taskq_thread_sequential = 4;
56 /* BEGIN CSTYLED */
57 module_param(spl_taskq_thread_sequential, uint, 0644);
58 /* END CSTYLED */
59 MODULE_PARM_DESC(spl_taskq_thread_sequential,
60 "Create new taskq threads after N sequential tasks");
61
62 /*
63 * Global system-wide dynamic task queue available for all consumers. This
64 * taskq is not intended for long-running tasks; instead, a dedicated taskq
65 * should be created.
66 */
67 taskq_t *system_taskq;
68 EXPORT_SYMBOL(system_taskq);
69 /* Global dynamic task queue for long delay */
70 taskq_t *system_delay_taskq;
71 EXPORT_SYMBOL(system_delay_taskq);
72
73 /* Private dedicated taskq for creating new taskq threads on demand. */
74 static taskq_t *dynamic_taskq;
75 static taskq_thread_t *taskq_thread_create(taskq_t *);
76
77 #ifdef HAVE_CPU_HOTPLUG
78 /* Multi-callback id for cpu hotplugging. */
79 static int spl_taskq_cpuhp_state;
80 #endif
81
82 /* List of all taskqs */
83 LIST_HEAD(tq_list);
84 struct rw_semaphore tq_list_sem;
85 static uint_t taskq_tsd;
86
87 static int
task_km_flags(uint_t flags)88 task_km_flags(uint_t flags)
89 {
90 if (flags & TQ_NOSLEEP)
91 return (KM_NOSLEEP);
92
93 if (flags & TQ_PUSHPAGE)
94 return (KM_PUSHPAGE);
95
96 return (KM_SLEEP);
97 }
98
99 /*
100 * taskq_find_by_name - Find the largest instance number of a named taskq.
101 */
102 static int
taskq_find_by_name(const char * name)103 taskq_find_by_name(const char *name)
104 {
105 struct list_head *tql = NULL;
106 taskq_t *tq;
107
108 list_for_each_prev(tql, &tq_list) {
109 tq = list_entry(tql, taskq_t, tq_taskqs);
110 if (strcmp(name, tq->tq_name) == 0)
111 return (tq->tq_instance);
112 }
113 return (-1);
114 }
115
116 /*
117 * NOTE: Must be called with tq->tq_lock held, returns a list_t which
118 * is not attached to the free, work, or pending taskq lists.
119 */
120 static taskq_ent_t *
task_alloc(taskq_t * tq,uint_t flags,unsigned long * irqflags)121 task_alloc(taskq_t *tq, uint_t flags, unsigned long *irqflags)
122 {
123 taskq_ent_t *t;
124 int count = 0;
125
126 ASSERT(tq);
127 retry:
128 /* Acquire taskq_ent_t's from free list if available */
129 if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) {
130 t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list);
131
132 ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
133 ASSERT(!(t->tqent_flags & TQENT_FLAG_CANCEL));
134 ASSERT(!timer_pending(&t->tqent_timer));
135
136 list_del_init(&t->tqent_list);
137 return (t);
138 }
139
140 /* Free list is empty and memory allocations are prohibited */
141 if (flags & TQ_NOALLOC)
142 return (NULL);
143
144 /* Hit maximum taskq_ent_t pool size */
145 if (tq->tq_nalloc >= tq->tq_maxalloc) {
146 if (flags & TQ_NOSLEEP)
147 return (NULL);
148
149 /*
150 * Sleep periodically polling the free list for an available
151 * taskq_ent_t. Dispatching with TQ_SLEEP should always succeed
152 * but we cannot block forever waiting for an taskq_ent_t to
153 * show up in the free list, otherwise a deadlock can happen.
154 *
155 * Therefore, we need to allocate a new task even if the number
156 * of allocated tasks is above tq->tq_maxalloc, but we still
157 * end up delaying the task allocation by one second, thereby
158 * throttling the task dispatch rate.
159 */
160 spin_unlock_irqrestore(&tq->tq_lock, *irqflags);
161 schedule_timeout_interruptible(HZ / 100);
162 spin_lock_irqsave_nested(&tq->tq_lock, *irqflags,
163 tq->tq_lock_class);
164 if (count < 100) {
165 count++;
166 goto retry;
167 }
168 }
169
170 spin_unlock_irqrestore(&tq->tq_lock, *irqflags);
171 t = kmem_alloc(sizeof (taskq_ent_t), task_km_flags(flags));
172 spin_lock_irqsave_nested(&tq->tq_lock, *irqflags, tq->tq_lock_class);
173
174 if (t) {
175 taskq_init_ent(t);
176 tq->tq_nalloc++;
177 }
178
179 return (t);
180 }
181
182 /*
183 * NOTE: Must be called with tq->tq_lock held, expects the taskq_ent_t
184 * to already be removed from the free, work, or pending taskq lists.
185 */
186 static void
task_free(taskq_t * tq,taskq_ent_t * t)187 task_free(taskq_t *tq, taskq_ent_t *t)
188 {
189 ASSERT(tq);
190 ASSERT(t);
191 ASSERT(list_empty(&t->tqent_list));
192 ASSERT(!timer_pending(&t->tqent_timer));
193
194 kmem_free(t, sizeof (taskq_ent_t));
195 tq->tq_nalloc--;
196 }
197
198 /*
199 * NOTE: Must be called with tq->tq_lock held, either destroys the
200 * taskq_ent_t if too many exist or moves it to the free list for later use.
201 */
202 static void
task_done(taskq_t * tq,taskq_ent_t * t)203 task_done(taskq_t *tq, taskq_ent_t *t)
204 {
205 ASSERT(tq);
206 ASSERT(t);
207
208 /* Wake tasks blocked in taskq_wait_id() */
209 wake_up_all(&t->tqent_waitq);
210
211 list_del_init(&t->tqent_list);
212
213 if (tq->tq_nalloc <= tq->tq_minalloc) {
214 t->tqent_id = TASKQID_INVALID;
215 t->tqent_func = NULL;
216 t->tqent_arg = NULL;
217 t->tqent_flags = 0;
218
219 list_add_tail(&t->tqent_list, &tq->tq_free_list);
220 } else {
221 task_free(tq, t);
222 }
223 }
224
225 /*
226 * When a delayed task timer expires remove it from the delay list and
227 * add it to the priority list in order for immediate processing.
228 */
229 static void
task_expire_impl(taskq_ent_t * t)230 task_expire_impl(taskq_ent_t *t)
231 {
232 taskq_ent_t *w;
233 taskq_t *tq = t->tqent_taskq;
234 struct list_head *l = NULL;
235 unsigned long flags;
236
237 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
238
239 if (t->tqent_flags & TQENT_FLAG_CANCEL) {
240 ASSERT(list_empty(&t->tqent_list));
241 spin_unlock_irqrestore(&tq->tq_lock, flags);
242 return;
243 }
244
245 t->tqent_birth = jiffies;
246 DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t);
247
248 /*
249 * The priority list must be maintained in strict task id order
250 * from lowest to highest for lowest_id to be easily calculable.
251 */
252 list_del(&t->tqent_list);
253 list_for_each_prev(l, &tq->tq_prio_list) {
254 w = list_entry(l, taskq_ent_t, tqent_list);
255 if (w->tqent_id < t->tqent_id) {
256 list_add(&t->tqent_list, l);
257 break;
258 }
259 }
260 if (l == &tq->tq_prio_list)
261 list_add(&t->tqent_list, &tq->tq_prio_list);
262
263 spin_unlock_irqrestore(&tq->tq_lock, flags);
264
265 wake_up(&tq->tq_work_waitq);
266 }
267
268 static void
task_expire(spl_timer_list_t tl)269 task_expire(spl_timer_list_t tl)
270 {
271 struct timer_list *tmr = (struct timer_list *)tl;
272 taskq_ent_t *t = from_timer(t, tmr, tqent_timer);
273 task_expire_impl(t);
274 }
275
276 /*
277 * Returns the lowest incomplete taskqid_t. The taskqid_t may
278 * be queued on the pending list, on the priority list, on the
279 * delay list, or on the work list currently being handled, but
280 * it is not 100% complete yet.
281 */
282 static taskqid_t
taskq_lowest_id(taskq_t * tq)283 taskq_lowest_id(taskq_t *tq)
284 {
285 taskqid_t lowest_id = tq->tq_next_id;
286 taskq_ent_t *t;
287 taskq_thread_t *tqt;
288
289 if (!list_empty(&tq->tq_pend_list)) {
290 t = list_entry(tq->tq_pend_list.next, taskq_ent_t, tqent_list);
291 lowest_id = MIN(lowest_id, t->tqent_id);
292 }
293
294 if (!list_empty(&tq->tq_prio_list)) {
295 t = list_entry(tq->tq_prio_list.next, taskq_ent_t, tqent_list);
296 lowest_id = MIN(lowest_id, t->tqent_id);
297 }
298
299 if (!list_empty(&tq->tq_delay_list)) {
300 t = list_entry(tq->tq_delay_list.next, taskq_ent_t, tqent_list);
301 lowest_id = MIN(lowest_id, t->tqent_id);
302 }
303
304 if (!list_empty(&tq->tq_active_list)) {
305 tqt = list_entry(tq->tq_active_list.next, taskq_thread_t,
306 tqt_active_list);
307 ASSERT(tqt->tqt_id != TASKQID_INVALID);
308 lowest_id = MIN(lowest_id, tqt->tqt_id);
309 }
310
311 return (lowest_id);
312 }
313
314 /*
315 * Insert a task into a list keeping the list sorted by increasing taskqid.
316 */
317 static void
taskq_insert_in_order(taskq_t * tq,taskq_thread_t * tqt)318 taskq_insert_in_order(taskq_t *tq, taskq_thread_t *tqt)
319 {
320 taskq_thread_t *w;
321 struct list_head *l = NULL;
322
323 ASSERT(tq);
324 ASSERT(tqt);
325
326 list_for_each_prev(l, &tq->tq_active_list) {
327 w = list_entry(l, taskq_thread_t, tqt_active_list);
328 if (w->tqt_id < tqt->tqt_id) {
329 list_add(&tqt->tqt_active_list, l);
330 break;
331 }
332 }
333 if (l == &tq->tq_active_list)
334 list_add(&tqt->tqt_active_list, &tq->tq_active_list);
335 }
336
337 /*
338 * Find and return a task from the given list if it exists. The list
339 * must be in lowest to highest task id order.
340 */
341 static taskq_ent_t *
taskq_find_list(taskq_t * tq,struct list_head * lh,taskqid_t id)342 taskq_find_list(taskq_t *tq, struct list_head *lh, taskqid_t id)
343 {
344 struct list_head *l = NULL;
345 taskq_ent_t *t;
346
347 list_for_each(l, lh) {
348 t = list_entry(l, taskq_ent_t, tqent_list);
349
350 if (t->tqent_id == id)
351 return (t);
352
353 if (t->tqent_id > id)
354 break;
355 }
356
357 return (NULL);
358 }
359
360 /*
361 * Find an already dispatched task given the task id regardless of what
362 * state it is in. If a task is still pending it will be returned.
363 * If a task is executing, then -EBUSY will be returned instead.
364 * If the task has already been run then NULL is returned.
365 */
366 static taskq_ent_t *
taskq_find(taskq_t * tq,taskqid_t id)367 taskq_find(taskq_t *tq, taskqid_t id)
368 {
369 taskq_thread_t *tqt;
370 struct list_head *l = NULL;
371 taskq_ent_t *t;
372
373 t = taskq_find_list(tq, &tq->tq_delay_list, id);
374 if (t)
375 return (t);
376
377 t = taskq_find_list(tq, &tq->tq_prio_list, id);
378 if (t)
379 return (t);
380
381 t = taskq_find_list(tq, &tq->tq_pend_list, id);
382 if (t)
383 return (t);
384
385 list_for_each(l, &tq->tq_active_list) {
386 tqt = list_entry(l, taskq_thread_t, tqt_active_list);
387 if (tqt->tqt_id == id) {
388 /*
389 * Instead of returning tqt_task, we just return a non
390 * NULL value to prevent misuse, since tqt_task only
391 * has two valid fields.
392 */
393 return (ERR_PTR(-EBUSY));
394 }
395 }
396
397 return (NULL);
398 }
399
400 /*
401 * Theory for the taskq_wait_id(), taskq_wait_outstanding(), and
402 * taskq_wait() functions below.
403 *
404 * Taskq waiting is accomplished by tracking the lowest outstanding task
405 * id and the next available task id. As tasks are dispatched they are
406 * added to the tail of the pending, priority, or delay lists. As worker
407 * threads become available the tasks are removed from the heads of these
408 * lists and linked to the worker threads. This ensures the lists are
409 * kept sorted by lowest to highest task id.
410 *
411 * Therefore the lowest outstanding task id can be quickly determined by
412 * checking the head item from all of these lists. This value is stored
413 * with the taskq as the lowest id. It only needs to be recalculated when
414 * either the task with the current lowest id completes or is canceled.
415 *
416 * By blocking until the lowest task id exceeds the passed task id the
417 * taskq_wait_outstanding() function can be easily implemented. Similarly,
418 * by blocking until the lowest task id matches the next task id taskq_wait()
419 * can be implemented.
420 *
421 * Callers should be aware that when there are multiple worked threads it
422 * is possible for larger task ids to complete before smaller ones. Also
423 * when the taskq contains delay tasks with small task ids callers may
424 * block for a considerable length of time waiting for them to expire and
425 * execute.
426 */
427 static int
taskq_wait_id_check(taskq_t * tq,taskqid_t id)428 taskq_wait_id_check(taskq_t *tq, taskqid_t id)
429 {
430 int rc;
431 unsigned long flags;
432
433 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
434 rc = (taskq_find(tq, id) == NULL);
435 spin_unlock_irqrestore(&tq->tq_lock, flags);
436
437 return (rc);
438 }
439
440 /*
441 * The taskq_wait_id() function blocks until the passed task id completes.
442 * This does not guarantee that all lower task ids have completed.
443 */
444 void
taskq_wait_id(taskq_t * tq,taskqid_t id)445 taskq_wait_id(taskq_t *tq, taskqid_t id)
446 {
447 wait_event(tq->tq_wait_waitq, taskq_wait_id_check(tq, id));
448 }
449 EXPORT_SYMBOL(taskq_wait_id);
450
451 static int
taskq_wait_outstanding_check(taskq_t * tq,taskqid_t id)452 taskq_wait_outstanding_check(taskq_t *tq, taskqid_t id)
453 {
454 int rc;
455 unsigned long flags;
456
457 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
458 rc = (id < tq->tq_lowest_id);
459 spin_unlock_irqrestore(&tq->tq_lock, flags);
460
461 return (rc);
462 }
463
464 /*
465 * The taskq_wait_outstanding() function will block until all tasks with a
466 * lower taskqid than the passed 'id' have been completed. Note that all
467 * task id's are assigned monotonically at dispatch time. Zero may be
468 * passed for the id to indicate all tasks dispatch up to this point,
469 * but not after, should be waited for.
470 */
471 void
taskq_wait_outstanding(taskq_t * tq,taskqid_t id)472 taskq_wait_outstanding(taskq_t *tq, taskqid_t id)
473 {
474 id = id ? id : tq->tq_next_id - 1;
475 wait_event(tq->tq_wait_waitq, taskq_wait_outstanding_check(tq, id));
476 }
477 EXPORT_SYMBOL(taskq_wait_outstanding);
478
479 static int
taskq_wait_check(taskq_t * tq)480 taskq_wait_check(taskq_t *tq)
481 {
482 int rc;
483 unsigned long flags;
484
485 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
486 rc = (tq->tq_lowest_id == tq->tq_next_id);
487 spin_unlock_irqrestore(&tq->tq_lock, flags);
488
489 return (rc);
490 }
491
492 /*
493 * The taskq_wait() function will block until the taskq is empty.
494 * This means that if a taskq re-dispatches work to itself taskq_wait()
495 * callers will block indefinitely.
496 */
497 void
taskq_wait(taskq_t * tq)498 taskq_wait(taskq_t *tq)
499 {
500 wait_event(tq->tq_wait_waitq, taskq_wait_check(tq));
501 }
502 EXPORT_SYMBOL(taskq_wait);
503
504 int
taskq_member(taskq_t * tq,kthread_t * t)505 taskq_member(taskq_t *tq, kthread_t *t)
506 {
507 return (tq == (taskq_t *)tsd_get_by_thread(taskq_tsd, t));
508 }
509 EXPORT_SYMBOL(taskq_member);
510
511 taskq_t *
taskq_of_curthread(void)512 taskq_of_curthread(void)
513 {
514 return (tsd_get(taskq_tsd));
515 }
516 EXPORT_SYMBOL(taskq_of_curthread);
517
518 /*
519 * Cancel an already dispatched task given the task id. Still pending tasks
520 * will be immediately canceled, and if the task is active the function will
521 * block until it completes. Preallocated tasks which are canceled must be
522 * freed by the caller.
523 */
524 int
taskq_cancel_id(taskq_t * tq,taskqid_t id)525 taskq_cancel_id(taskq_t *tq, taskqid_t id)
526 {
527 taskq_ent_t *t;
528 int rc = ENOENT;
529 unsigned long flags;
530
531 ASSERT(tq);
532
533 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
534 t = taskq_find(tq, id);
535 if (t && t != ERR_PTR(-EBUSY)) {
536 list_del_init(&t->tqent_list);
537 t->tqent_flags |= TQENT_FLAG_CANCEL;
538
539 /*
540 * When canceling the lowest outstanding task id we
541 * must recalculate the new lowest outstanding id.
542 */
543 if (tq->tq_lowest_id == t->tqent_id) {
544 tq->tq_lowest_id = taskq_lowest_id(tq);
545 ASSERT3S(tq->tq_lowest_id, >, t->tqent_id);
546 }
547
548 /*
549 * The task_expire() function takes the tq->tq_lock so drop
550 * drop the lock before synchronously cancelling the timer.
551 */
552 if (timer_pending(&t->tqent_timer)) {
553 spin_unlock_irqrestore(&tq->tq_lock, flags);
554 del_timer_sync(&t->tqent_timer);
555 spin_lock_irqsave_nested(&tq->tq_lock, flags,
556 tq->tq_lock_class);
557 }
558
559 if (!(t->tqent_flags & TQENT_FLAG_PREALLOC))
560 task_done(tq, t);
561
562 rc = 0;
563 }
564 spin_unlock_irqrestore(&tq->tq_lock, flags);
565
566 if (t == ERR_PTR(-EBUSY)) {
567 taskq_wait_id(tq, id);
568 rc = EBUSY;
569 }
570
571 return (rc);
572 }
573 EXPORT_SYMBOL(taskq_cancel_id);
574
575 static int taskq_thread_spawn(taskq_t *tq);
576
577 taskqid_t
taskq_dispatch(taskq_t * tq,task_func_t func,void * arg,uint_t flags)578 taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
579 {
580 taskq_ent_t *t;
581 taskqid_t rc = TASKQID_INVALID;
582 unsigned long irqflags;
583
584 ASSERT(tq);
585 ASSERT(func);
586
587 spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class);
588
589 /* Taskq being destroyed and all tasks drained */
590 if (!(tq->tq_flags & TASKQ_ACTIVE))
591 goto out;
592
593 /* Do not queue the task unless there is idle thread for it */
594 ASSERT(tq->tq_nactive <= tq->tq_nthreads);
595 if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) {
596 /* Dynamic taskq may be able to spawn another thread */
597 if (taskq_thread_spawn(tq) == 0)
598 goto out;
599 }
600
601 if ((t = task_alloc(tq, flags, &irqflags)) == NULL)
602 goto out;
603
604 spin_lock(&t->tqent_lock);
605
606 /* Queue to the front of the list to enforce TQ_NOQUEUE semantics */
607 if (flags & TQ_NOQUEUE)
608 list_add(&t->tqent_list, &tq->tq_prio_list);
609 /* Queue to the priority list instead of the pending list */
610 else if (flags & TQ_FRONT)
611 list_add_tail(&t->tqent_list, &tq->tq_prio_list);
612 else
613 list_add_tail(&t->tqent_list, &tq->tq_pend_list);
614
615 t->tqent_id = rc = tq->tq_next_id;
616 tq->tq_next_id++;
617 t->tqent_func = func;
618 t->tqent_arg = arg;
619 t->tqent_taskq = tq;
620 t->tqent_timer.function = NULL;
621 t->tqent_timer.expires = 0;
622
623 t->tqent_birth = jiffies;
624 DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t);
625
626 ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
627
628 spin_unlock(&t->tqent_lock);
629
630 wake_up(&tq->tq_work_waitq);
631
632 /* Spawn additional taskq threads if required. */
633 if (!(flags & TQ_NOQUEUE) && tq->tq_nactive == tq->tq_nthreads)
634 (void) taskq_thread_spawn(tq);
635 out:
636 spin_unlock_irqrestore(&tq->tq_lock, irqflags);
637 return (rc);
638 }
639 EXPORT_SYMBOL(taskq_dispatch);
640
641 taskqid_t
taskq_dispatch_delay(taskq_t * tq,task_func_t func,void * arg,uint_t flags,clock_t expire_time)642 taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg,
643 uint_t flags, clock_t expire_time)
644 {
645 taskqid_t rc = TASKQID_INVALID;
646 taskq_ent_t *t;
647 unsigned long irqflags;
648
649 ASSERT(tq);
650 ASSERT(func);
651
652 spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class);
653
654 /* Taskq being destroyed and all tasks drained */
655 if (!(tq->tq_flags & TASKQ_ACTIVE))
656 goto out;
657
658 if ((t = task_alloc(tq, flags, &irqflags)) == NULL)
659 goto out;
660
661 spin_lock(&t->tqent_lock);
662
663 /* Queue to the delay list for subsequent execution */
664 list_add_tail(&t->tqent_list, &tq->tq_delay_list);
665
666 t->tqent_id = rc = tq->tq_next_id;
667 tq->tq_next_id++;
668 t->tqent_func = func;
669 t->tqent_arg = arg;
670 t->tqent_taskq = tq;
671 t->tqent_timer.function = task_expire;
672 t->tqent_timer.expires = (unsigned long)expire_time;
673 add_timer(&t->tqent_timer);
674
675 ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
676
677 spin_unlock(&t->tqent_lock);
678
679 /* Spawn additional taskq threads if required. */
680 if (tq->tq_nactive == tq->tq_nthreads)
681 (void) taskq_thread_spawn(tq);
682 out:
683 spin_unlock_irqrestore(&tq->tq_lock, irqflags);
684 return (rc);
685 }
686 EXPORT_SYMBOL(taskq_dispatch_delay);
687
688 void
taskq_dispatch_ent(taskq_t * tq,task_func_t func,void * arg,uint_t flags,taskq_ent_t * t)689 taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
690 taskq_ent_t *t)
691 {
692 unsigned long irqflags;
693 ASSERT(tq);
694 ASSERT(func);
695
696 spin_lock_irqsave_nested(&tq->tq_lock, irqflags,
697 tq->tq_lock_class);
698
699 /* Taskq being destroyed and all tasks drained */
700 if (!(tq->tq_flags & TASKQ_ACTIVE)) {
701 t->tqent_id = TASKQID_INVALID;
702 goto out;
703 }
704
705 if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) {
706 /* Dynamic taskq may be able to spawn another thread */
707 if (taskq_thread_spawn(tq) == 0)
708 goto out;
709 flags |= TQ_FRONT;
710 }
711
712 spin_lock(&t->tqent_lock);
713
714 /*
715 * Make sure the entry is not on some other taskq; it is important to
716 * ASSERT() under lock
717 */
718 ASSERT(taskq_empty_ent(t));
719
720 /*
721 * Mark it as a prealloc'd task. This is important
722 * to ensure that we don't free it later.
723 */
724 t->tqent_flags |= TQENT_FLAG_PREALLOC;
725
726 /* Queue to the priority list instead of the pending list */
727 if (flags & TQ_FRONT)
728 list_add_tail(&t->tqent_list, &tq->tq_prio_list);
729 else
730 list_add_tail(&t->tqent_list, &tq->tq_pend_list);
731
732 t->tqent_id = tq->tq_next_id;
733 tq->tq_next_id++;
734 t->tqent_func = func;
735 t->tqent_arg = arg;
736 t->tqent_taskq = tq;
737
738 t->tqent_birth = jiffies;
739 DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t);
740
741 spin_unlock(&t->tqent_lock);
742
743 wake_up(&tq->tq_work_waitq);
744
745 /* Spawn additional taskq threads if required. */
746 if (tq->tq_nactive == tq->tq_nthreads)
747 (void) taskq_thread_spawn(tq);
748 out:
749 spin_unlock_irqrestore(&tq->tq_lock, irqflags);
750 }
751 EXPORT_SYMBOL(taskq_dispatch_ent);
752
753 int
taskq_empty_ent(taskq_ent_t * t)754 taskq_empty_ent(taskq_ent_t *t)
755 {
756 return (list_empty(&t->tqent_list));
757 }
758 EXPORT_SYMBOL(taskq_empty_ent);
759
760 void
taskq_init_ent(taskq_ent_t * t)761 taskq_init_ent(taskq_ent_t *t)
762 {
763 spin_lock_init(&t->tqent_lock);
764 init_waitqueue_head(&t->tqent_waitq);
765 timer_setup(&t->tqent_timer, NULL, 0);
766 INIT_LIST_HEAD(&t->tqent_list);
767 t->tqent_id = 0;
768 t->tqent_func = NULL;
769 t->tqent_arg = NULL;
770 t->tqent_flags = 0;
771 t->tqent_taskq = NULL;
772 }
773 EXPORT_SYMBOL(taskq_init_ent);
774
775 /*
776 * Return the next pending task, preference is given to tasks on the
777 * priority list which were dispatched with TQ_FRONT.
778 */
779 static taskq_ent_t *
taskq_next_ent(taskq_t * tq)780 taskq_next_ent(taskq_t *tq)
781 {
782 struct list_head *list;
783
784 if (!list_empty(&tq->tq_prio_list))
785 list = &tq->tq_prio_list;
786 else if (!list_empty(&tq->tq_pend_list))
787 list = &tq->tq_pend_list;
788 else
789 return (NULL);
790
791 return (list_entry(list->next, taskq_ent_t, tqent_list));
792 }
793
794 /*
795 * Spawns a new thread for the specified taskq.
796 */
797 static void
taskq_thread_spawn_task(void * arg)798 taskq_thread_spawn_task(void *arg)
799 {
800 taskq_t *tq = (taskq_t *)arg;
801 unsigned long flags;
802
803 if (taskq_thread_create(tq) == NULL) {
804 /* restore spawning count if failed */
805 spin_lock_irqsave_nested(&tq->tq_lock, flags,
806 tq->tq_lock_class);
807 tq->tq_nspawn--;
808 spin_unlock_irqrestore(&tq->tq_lock, flags);
809 }
810 }
811
812 /*
813 * Spawn addition threads for dynamic taskqs (TASKQ_DYNAMIC) the current
814 * number of threads is insufficient to handle the pending tasks. These
815 * new threads must be created by the dedicated dynamic_taskq to avoid
816 * deadlocks between thread creation and memory reclaim. The system_taskq
817 * which is also a dynamic taskq cannot be safely used for this.
818 */
819 static int
taskq_thread_spawn(taskq_t * tq)820 taskq_thread_spawn(taskq_t *tq)
821 {
822 int spawning = 0;
823
824 if (!(tq->tq_flags & TASKQ_DYNAMIC))
825 return (0);
826
827 tq->lastspawnstop = jiffies;
828 if ((tq->tq_nthreads + tq->tq_nspawn < tq->tq_maxthreads) &&
829 (tq->tq_flags & TASKQ_ACTIVE)) {
830 spawning = (++tq->tq_nspawn);
831 taskq_dispatch(dynamic_taskq, taskq_thread_spawn_task,
832 tq, TQ_NOSLEEP);
833 }
834
835 return (spawning);
836 }
837
838 /*
839 * Threads in a dynamic taskq may exit once there is no more work to do.
840 * To prevent threads from being created and destroyed too often limit
841 * the exit rate to one per spl_taskq_thread_timeout_ms.
842 *
843 * The first thread is the thread list is treated as the primary thread.
844 * There is nothing special about the primary thread but in order to avoid
845 * all the taskq pids from changing we opt to make it long running.
846 */
847 static int
taskq_thread_should_stop(taskq_t * tq,taskq_thread_t * tqt)848 taskq_thread_should_stop(taskq_t *tq, taskq_thread_t *tqt)
849 {
850 ASSERT(!taskq_next_ent(tq));
851 if (!(tq->tq_flags & TASKQ_DYNAMIC) || !spl_taskq_thread_dynamic)
852 return (0);
853 if (!(tq->tq_flags & TASKQ_ACTIVE))
854 return (1);
855 if (list_first_entry(&(tq->tq_thread_list), taskq_thread_t,
856 tqt_thread_list) == tqt)
857 return (0);
858 ASSERT3U(tq->tq_nthreads, >, 1);
859 if (tq->tq_nspawn != 0)
860 return (0);
861 if (time_before(jiffies, tq->lastspawnstop +
862 msecs_to_jiffies(spl_taskq_thread_timeout_ms)))
863 return (0);
864 tq->lastspawnstop = jiffies;
865 return (1);
866 }
867
868 static int
taskq_thread(void * args)869 taskq_thread(void *args)
870 {
871 DECLARE_WAITQUEUE(wait, current);
872 sigset_t blocked;
873 taskq_thread_t *tqt = args;
874 taskq_t *tq;
875 taskq_ent_t *t;
876 int seq_tasks = 0;
877 unsigned long flags;
878 taskq_ent_t dup_task = {};
879
880 ASSERT(tqt);
881 ASSERT(tqt->tqt_tq);
882 tq = tqt->tqt_tq;
883 current->flags |= PF_NOFREEZE;
884
885 (void) spl_fstrans_mark();
886
887 sigfillset(&blocked);
888 sigprocmask(SIG_BLOCK, &blocked, NULL);
889 flush_signals(current);
890
891 tsd_set(taskq_tsd, tq);
892 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
893 /*
894 * If we are dynamically spawned, decrease spawning count. Note that
895 * we could be created during taskq_create, in which case we shouldn't
896 * do the decrement. But it's fine because taskq_create will reset
897 * tq_nspawn later.
898 */
899 if (tq->tq_flags & TASKQ_DYNAMIC)
900 tq->tq_nspawn--;
901
902 /* Immediately exit if more threads than allowed were created. */
903 if (tq->tq_nthreads >= tq->tq_maxthreads)
904 goto error;
905
906 tq->tq_nthreads++;
907 list_add_tail(&tqt->tqt_thread_list, &tq->tq_thread_list);
908 wake_up(&tq->tq_wait_waitq);
909 set_current_state(TASK_INTERRUPTIBLE);
910
911 while (!kthread_should_stop()) {
912
913 if (list_empty(&tq->tq_pend_list) &&
914 list_empty(&tq->tq_prio_list)) {
915
916 if (taskq_thread_should_stop(tq, tqt))
917 break;
918
919 add_wait_queue_exclusive(&tq->tq_work_waitq, &wait);
920 spin_unlock_irqrestore(&tq->tq_lock, flags);
921
922 schedule();
923 seq_tasks = 0;
924
925 spin_lock_irqsave_nested(&tq->tq_lock, flags,
926 tq->tq_lock_class);
927 remove_wait_queue(&tq->tq_work_waitq, &wait);
928 } else {
929 __set_current_state(TASK_RUNNING);
930 }
931
932 if ((t = taskq_next_ent(tq)) != NULL) {
933 list_del_init(&t->tqent_list);
934
935 /*
936 * A TQENT_FLAG_PREALLOC task may be reused or freed
937 * during the task function call. Store tqent_id and
938 * tqent_flags here.
939 *
940 * Also use an on stack taskq_ent_t for tqt_task
941 * assignment in this case; we want to make sure
942 * to duplicate all fields, so the values are
943 * correct when it's accessed via DTRACE_PROBE*.
944 */
945 tqt->tqt_id = t->tqent_id;
946 tqt->tqt_flags = t->tqent_flags;
947
948 if (t->tqent_flags & TQENT_FLAG_PREALLOC) {
949 dup_task = *t;
950 t = &dup_task;
951 }
952 tqt->tqt_task = t;
953
954 taskq_insert_in_order(tq, tqt);
955 tq->tq_nactive++;
956 spin_unlock_irqrestore(&tq->tq_lock, flags);
957
958 DTRACE_PROBE1(taskq_ent__start, taskq_ent_t *, t);
959
960 /* Perform the requested task */
961 t->tqent_func(t->tqent_arg);
962
963 DTRACE_PROBE1(taskq_ent__finish, taskq_ent_t *, t);
964
965 spin_lock_irqsave_nested(&tq->tq_lock, flags,
966 tq->tq_lock_class);
967 tq->tq_nactive--;
968 list_del_init(&tqt->tqt_active_list);
969 tqt->tqt_task = NULL;
970
971 /* For prealloc'd tasks, we don't free anything. */
972 if (!(tqt->tqt_flags & TQENT_FLAG_PREALLOC))
973 task_done(tq, t);
974
975 /*
976 * When the current lowest outstanding taskqid is
977 * done calculate the new lowest outstanding id
978 */
979 if (tq->tq_lowest_id == tqt->tqt_id) {
980 tq->tq_lowest_id = taskq_lowest_id(tq);
981 ASSERT3S(tq->tq_lowest_id, >, tqt->tqt_id);
982 }
983
984 /* Spawn additional taskq threads if required. */
985 if ((++seq_tasks) > spl_taskq_thread_sequential &&
986 taskq_thread_spawn(tq))
987 seq_tasks = 0;
988
989 tqt->tqt_id = TASKQID_INVALID;
990 tqt->tqt_flags = 0;
991 wake_up_all(&tq->tq_wait_waitq);
992 }
993
994 set_current_state(TASK_INTERRUPTIBLE);
995
996 }
997
998 __set_current_state(TASK_RUNNING);
999 tq->tq_nthreads--;
1000 list_del_init(&tqt->tqt_thread_list);
1001 error:
1002 kmem_free(tqt, sizeof (taskq_thread_t));
1003 spin_unlock_irqrestore(&tq->tq_lock, flags);
1004
1005 tsd_set(taskq_tsd, NULL);
1006 thread_exit();
1007
1008 return (0);
1009 }
1010
1011 static taskq_thread_t *
taskq_thread_create(taskq_t * tq)1012 taskq_thread_create(taskq_t *tq)
1013 {
1014 static int last_used_cpu = 0;
1015 taskq_thread_t *tqt;
1016
1017 tqt = kmem_alloc(sizeof (*tqt), KM_PUSHPAGE);
1018 INIT_LIST_HEAD(&tqt->tqt_thread_list);
1019 INIT_LIST_HEAD(&tqt->tqt_active_list);
1020 tqt->tqt_tq = tq;
1021 tqt->tqt_id = TASKQID_INVALID;
1022
1023 tqt->tqt_thread = spl_kthread_create(taskq_thread, tqt,
1024 "%s", tq->tq_name);
1025 if (tqt->tqt_thread == NULL) {
1026 kmem_free(tqt, sizeof (taskq_thread_t));
1027 return (NULL);
1028 }
1029
1030 if (spl_taskq_thread_bind) {
1031 last_used_cpu = (last_used_cpu + 1) % num_online_cpus();
1032 kthread_bind(tqt->tqt_thread, last_used_cpu);
1033 }
1034
1035 if (spl_taskq_thread_priority)
1036 set_user_nice(tqt->tqt_thread, PRIO_TO_NICE(tq->tq_pri));
1037
1038 wake_up_process(tqt->tqt_thread);
1039
1040 return (tqt);
1041 }
1042
1043 taskq_t *
taskq_create(const char * name,int threads_arg,pri_t pri,int minalloc,int maxalloc,uint_t flags)1044 taskq_create(const char *name, int threads_arg, pri_t pri,
1045 int minalloc, int maxalloc, uint_t flags)
1046 {
1047 taskq_t *tq;
1048 taskq_thread_t *tqt;
1049 int count = 0, rc = 0, i;
1050 unsigned long irqflags;
1051 int nthreads = threads_arg;
1052
1053 ASSERT(name != NULL);
1054 ASSERT(minalloc >= 0);
1055 ASSERT(!(flags & (TASKQ_CPR_SAFE))); /* Unsupported */
1056
1057 /* Scale the number of threads using nthreads as a percentage */
1058 if (flags & TASKQ_THREADS_CPU_PCT) {
1059 ASSERT(nthreads <= 100);
1060 ASSERT(nthreads >= 0);
1061 nthreads = MIN(threads_arg, 100);
1062 nthreads = MAX(nthreads, 0);
1063 nthreads = MAX((num_online_cpus() * nthreads) /100, 1);
1064 }
1065
1066 tq = kmem_alloc(sizeof (*tq), KM_PUSHPAGE);
1067 if (tq == NULL)
1068 return (NULL);
1069
1070 tq->tq_hp_support = B_FALSE;
1071 #ifdef HAVE_CPU_HOTPLUG
1072 if (flags & TASKQ_THREADS_CPU_PCT) {
1073 tq->tq_hp_support = B_TRUE;
1074 if (cpuhp_state_add_instance_nocalls(spl_taskq_cpuhp_state,
1075 &tq->tq_hp_cb_node) != 0) {
1076 kmem_free(tq, sizeof (*tq));
1077 return (NULL);
1078 }
1079 }
1080 #endif
1081
1082 spin_lock_init(&tq->tq_lock);
1083 INIT_LIST_HEAD(&tq->tq_thread_list);
1084 INIT_LIST_HEAD(&tq->tq_active_list);
1085 tq->tq_name = kmem_strdup(name);
1086 tq->tq_nactive = 0;
1087 tq->tq_nthreads = 0;
1088 tq->tq_nspawn = 0;
1089 tq->tq_maxthreads = nthreads;
1090 tq->tq_cpu_pct = threads_arg;
1091 tq->tq_pri = pri;
1092 tq->tq_minalloc = minalloc;
1093 tq->tq_maxalloc = maxalloc;
1094 tq->tq_nalloc = 0;
1095 tq->tq_flags = (flags | TASKQ_ACTIVE);
1096 tq->tq_next_id = TASKQID_INITIAL;
1097 tq->tq_lowest_id = TASKQID_INITIAL;
1098 tq->lastspawnstop = jiffies;
1099 INIT_LIST_HEAD(&tq->tq_free_list);
1100 INIT_LIST_HEAD(&tq->tq_pend_list);
1101 INIT_LIST_HEAD(&tq->tq_prio_list);
1102 INIT_LIST_HEAD(&tq->tq_delay_list);
1103 init_waitqueue_head(&tq->tq_work_waitq);
1104 init_waitqueue_head(&tq->tq_wait_waitq);
1105 tq->tq_lock_class = TQ_LOCK_GENERAL;
1106 INIT_LIST_HEAD(&tq->tq_taskqs);
1107
1108 if (flags & TASKQ_PREPOPULATE) {
1109 spin_lock_irqsave_nested(&tq->tq_lock, irqflags,
1110 tq->tq_lock_class);
1111
1112 for (i = 0; i < minalloc; i++)
1113 task_done(tq, task_alloc(tq, TQ_PUSHPAGE | TQ_NEW,
1114 &irqflags));
1115
1116 spin_unlock_irqrestore(&tq->tq_lock, irqflags);
1117 }
1118
1119 if ((flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic)
1120 nthreads = 1;
1121
1122 for (i = 0; i < nthreads; i++) {
1123 tqt = taskq_thread_create(tq);
1124 if (tqt == NULL)
1125 rc = 1;
1126 else
1127 count++;
1128 }
1129
1130 /* Wait for all threads to be started before potential destroy */
1131 wait_event(tq->tq_wait_waitq, tq->tq_nthreads == count);
1132 /*
1133 * taskq_thread might have touched nspawn, but we don't want them to
1134 * because they're not dynamically spawned. So we reset it to 0
1135 */
1136 tq->tq_nspawn = 0;
1137
1138 if (rc) {
1139 taskq_destroy(tq);
1140 tq = NULL;
1141 } else {
1142 down_write(&tq_list_sem);
1143 tq->tq_instance = taskq_find_by_name(name) + 1;
1144 list_add_tail(&tq->tq_taskqs, &tq_list);
1145 up_write(&tq_list_sem);
1146 }
1147
1148 return (tq);
1149 }
1150 EXPORT_SYMBOL(taskq_create);
1151
1152 void
taskq_destroy(taskq_t * tq)1153 taskq_destroy(taskq_t *tq)
1154 {
1155 struct task_struct *thread;
1156 taskq_thread_t *tqt;
1157 taskq_ent_t *t;
1158 unsigned long flags;
1159
1160 ASSERT(tq);
1161 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
1162 tq->tq_flags &= ~TASKQ_ACTIVE;
1163 spin_unlock_irqrestore(&tq->tq_lock, flags);
1164
1165 #ifdef HAVE_CPU_HOTPLUG
1166 if (tq->tq_hp_support) {
1167 VERIFY0(cpuhp_state_remove_instance_nocalls(
1168 spl_taskq_cpuhp_state, &tq->tq_hp_cb_node));
1169 }
1170 #endif
1171 /*
1172 * When TASKQ_ACTIVE is clear new tasks may not be added nor may
1173 * new worker threads be spawned for dynamic taskq.
1174 */
1175 if (dynamic_taskq != NULL)
1176 taskq_wait_outstanding(dynamic_taskq, 0);
1177
1178 taskq_wait(tq);
1179
1180 /* remove taskq from global list used by the kstats */
1181 down_write(&tq_list_sem);
1182 list_del(&tq->tq_taskqs);
1183 up_write(&tq_list_sem);
1184
1185 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
1186 /* wait for spawning threads to insert themselves to the list */
1187 while (tq->tq_nspawn) {
1188 spin_unlock_irqrestore(&tq->tq_lock, flags);
1189 schedule_timeout_interruptible(1);
1190 spin_lock_irqsave_nested(&tq->tq_lock, flags,
1191 tq->tq_lock_class);
1192 }
1193
1194 /*
1195 * Signal each thread to exit and block until it does. Each thread
1196 * is responsible for removing itself from the list and freeing its
1197 * taskq_thread_t. This allows for idle threads to opt to remove
1198 * themselves from the taskq. They can be recreated as needed.
1199 */
1200 while (!list_empty(&tq->tq_thread_list)) {
1201 tqt = list_entry(tq->tq_thread_list.next,
1202 taskq_thread_t, tqt_thread_list);
1203 thread = tqt->tqt_thread;
1204 spin_unlock_irqrestore(&tq->tq_lock, flags);
1205
1206 kthread_stop(thread);
1207
1208 spin_lock_irqsave_nested(&tq->tq_lock, flags,
1209 tq->tq_lock_class);
1210 }
1211
1212 while (!list_empty(&tq->tq_free_list)) {
1213 t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list);
1214
1215 ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
1216
1217 list_del_init(&t->tqent_list);
1218 task_free(tq, t);
1219 }
1220
1221 ASSERT0(tq->tq_nthreads);
1222 ASSERT0(tq->tq_nalloc);
1223 ASSERT0(tq->tq_nspawn);
1224 ASSERT(list_empty(&tq->tq_thread_list));
1225 ASSERT(list_empty(&tq->tq_active_list));
1226 ASSERT(list_empty(&tq->tq_free_list));
1227 ASSERT(list_empty(&tq->tq_pend_list));
1228 ASSERT(list_empty(&tq->tq_prio_list));
1229 ASSERT(list_empty(&tq->tq_delay_list));
1230
1231 spin_unlock_irqrestore(&tq->tq_lock, flags);
1232
1233 kmem_strfree(tq->tq_name);
1234 kmem_free(tq, sizeof (taskq_t));
1235 }
1236 EXPORT_SYMBOL(taskq_destroy);
1237
1238 /*
1239 * Create a taskq with a specified number of pool threads. Allocate
1240 * and return an array of nthreads kthread_t pointers, one for each
1241 * thread in the pool. The array is not ordered and must be freed
1242 * by the caller.
1243 */
1244 taskq_t *
taskq_create_synced(const char * name,int nthreads,pri_t pri,int minalloc,int maxalloc,uint_t flags,kthread_t *** ktpp)1245 taskq_create_synced(const char *name, int nthreads, pri_t pri,
1246 int minalloc, int maxalloc, uint_t flags, kthread_t ***ktpp)
1247 {
1248 taskq_t *tq;
1249 taskq_thread_t *tqt;
1250 int i = 0;
1251 kthread_t **kthreads = kmem_zalloc(sizeof (*kthreads) * nthreads,
1252 KM_SLEEP);
1253
1254 flags &= ~(TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT | TASKQ_DC_BATCH);
1255
1256 /* taskq_create spawns all the threads before returning */
1257 tq = taskq_create(name, nthreads, minclsyspri, nthreads, INT_MAX,
1258 flags | TASKQ_PREPOPULATE);
1259 VERIFY(tq != NULL);
1260 VERIFY(tq->tq_nthreads == nthreads);
1261
1262 list_for_each_entry(tqt, &tq->tq_thread_list, tqt_thread_list) {
1263 kthreads[i] = tqt->tqt_thread;
1264 i++;
1265 }
1266
1267 ASSERT3S(i, ==, nthreads);
1268 *ktpp = kthreads;
1269
1270 return (tq);
1271 }
1272 EXPORT_SYMBOL(taskq_create_synced);
1273
1274 static unsigned int spl_taskq_kick = 0;
1275
1276 /*
1277 * 2.6.36 API Change
1278 * module_param_cb is introduced to take kernel_param_ops and
1279 * module_param_call is marked as obsolete. Also set and get operations
1280 * were changed to take a 'const struct kernel_param *'.
1281 */
1282 static int
1283 #ifdef module_param_cb
param_set_taskq_kick(const char * val,const struct kernel_param * kp)1284 param_set_taskq_kick(const char *val, const struct kernel_param *kp)
1285 #else
1286 param_set_taskq_kick(const char *val, struct kernel_param *kp)
1287 #endif
1288 {
1289 int ret;
1290 taskq_t *tq = NULL;
1291 taskq_ent_t *t;
1292 unsigned long flags;
1293
1294 ret = param_set_uint(val, kp);
1295 if (ret < 0 || !spl_taskq_kick)
1296 return (ret);
1297 /* reset value */
1298 spl_taskq_kick = 0;
1299
1300 down_read(&tq_list_sem);
1301 list_for_each_entry(tq, &tq_list, tq_taskqs) {
1302 spin_lock_irqsave_nested(&tq->tq_lock, flags,
1303 tq->tq_lock_class);
1304 /* Check if the first pending is older than 5 seconds */
1305 t = taskq_next_ent(tq);
1306 if (t && time_after(jiffies, t->tqent_birth + 5*HZ)) {
1307 (void) taskq_thread_spawn(tq);
1308 printk(KERN_INFO "spl: Kicked taskq %s/%d\n",
1309 tq->tq_name, tq->tq_instance);
1310 }
1311 spin_unlock_irqrestore(&tq->tq_lock, flags);
1312 }
1313 up_read(&tq_list_sem);
1314 return (ret);
1315 }
1316
1317 #ifdef module_param_cb
1318 static const struct kernel_param_ops param_ops_taskq_kick = {
1319 .set = param_set_taskq_kick,
1320 .get = param_get_uint,
1321 };
1322 module_param_cb(spl_taskq_kick, ¶m_ops_taskq_kick, &spl_taskq_kick, 0644);
1323 #else
1324 module_param_call(spl_taskq_kick, param_set_taskq_kick, param_get_uint,
1325 &spl_taskq_kick, 0644);
1326 #endif
1327 MODULE_PARM_DESC(spl_taskq_kick,
1328 "Write nonzero to kick stuck taskqs to spawn more threads");
1329
1330 #ifdef HAVE_CPU_HOTPLUG
1331 /*
1332 * This callback will be called exactly once for each core that comes online,
1333 * for each dynamic taskq. We attempt to expand taskqs that have
1334 * TASKQ_THREADS_CPU_PCT set. We need to redo the percentage calculation every
1335 * time, to correctly determine whether or not to add a thread.
1336 */
1337 static int
spl_taskq_expand(unsigned int cpu,struct hlist_node * node)1338 spl_taskq_expand(unsigned int cpu, struct hlist_node *node)
1339 {
1340 taskq_t *tq = list_entry(node, taskq_t, tq_hp_cb_node);
1341 unsigned long flags;
1342 int err = 0;
1343
1344 ASSERT(tq);
1345 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
1346
1347 if (!(tq->tq_flags & TASKQ_ACTIVE)) {
1348 spin_unlock_irqrestore(&tq->tq_lock, flags);
1349 return (err);
1350 }
1351
1352 ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT);
1353 int nthreads = MIN(tq->tq_cpu_pct, 100);
1354 nthreads = MAX(((num_online_cpus() + 1) * nthreads) / 100, 1);
1355 tq->tq_maxthreads = nthreads;
1356
1357 if (!((tq->tq_flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) &&
1358 tq->tq_maxthreads > tq->tq_nthreads) {
1359 spin_unlock_irqrestore(&tq->tq_lock, flags);
1360 taskq_thread_t *tqt = taskq_thread_create(tq);
1361 if (tqt == NULL)
1362 err = -1;
1363 return (err);
1364 }
1365 spin_unlock_irqrestore(&tq->tq_lock, flags);
1366 return (err);
1367 }
1368
1369 /*
1370 * While we don't support offlining CPUs, it is possible that CPUs will fail
1371 * to online successfully. We do need to be able to handle this case
1372 * gracefully.
1373 */
1374 static int
spl_taskq_prepare_down(unsigned int cpu,struct hlist_node * node)1375 spl_taskq_prepare_down(unsigned int cpu, struct hlist_node *node)
1376 {
1377 taskq_t *tq = list_entry(node, taskq_t, tq_hp_cb_node);
1378 unsigned long flags;
1379
1380 ASSERT(tq);
1381 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
1382
1383 if (!(tq->tq_flags & TASKQ_ACTIVE))
1384 goto out;
1385
1386 ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT);
1387 int nthreads = MIN(tq->tq_cpu_pct, 100);
1388 nthreads = MAX(((num_online_cpus()) * nthreads) / 100, 1);
1389 tq->tq_maxthreads = nthreads;
1390
1391 if (!((tq->tq_flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) &&
1392 tq->tq_maxthreads < tq->tq_nthreads) {
1393 ASSERT3U(tq->tq_maxthreads, ==, tq->tq_nthreads - 1);
1394 taskq_thread_t *tqt = list_entry(tq->tq_thread_list.next,
1395 taskq_thread_t, tqt_thread_list);
1396 struct task_struct *thread = tqt->tqt_thread;
1397 spin_unlock_irqrestore(&tq->tq_lock, flags);
1398
1399 kthread_stop(thread);
1400
1401 return (0);
1402 }
1403
1404 out:
1405 spin_unlock_irqrestore(&tq->tq_lock, flags);
1406 return (0);
1407 }
1408 #endif
1409
1410 int
spl_taskq_init(void)1411 spl_taskq_init(void)
1412 {
1413 init_rwsem(&tq_list_sem);
1414 tsd_create(&taskq_tsd, NULL);
1415
1416 #ifdef HAVE_CPU_HOTPLUG
1417 spl_taskq_cpuhp_state = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
1418 "fs/spl_taskq:online", spl_taskq_expand, spl_taskq_prepare_down);
1419 #endif
1420
1421 system_taskq = taskq_create("spl_system_taskq", MAX(boot_ncpus, 64),
1422 maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC);
1423 if (system_taskq == NULL)
1424 return (-ENOMEM);
1425
1426 system_delay_taskq = taskq_create("spl_delay_taskq", MAX(boot_ncpus, 4),
1427 maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC);
1428 if (system_delay_taskq == NULL) {
1429 #ifdef HAVE_CPU_HOTPLUG
1430 cpuhp_remove_multi_state(spl_taskq_cpuhp_state);
1431 #endif
1432 taskq_destroy(system_taskq);
1433 return (-ENOMEM);
1434 }
1435
1436 dynamic_taskq = taskq_create("spl_dynamic_taskq", 1,
1437 maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE);
1438 if (dynamic_taskq == NULL) {
1439 #ifdef HAVE_CPU_HOTPLUG
1440 cpuhp_remove_multi_state(spl_taskq_cpuhp_state);
1441 #endif
1442 taskq_destroy(system_taskq);
1443 taskq_destroy(system_delay_taskq);
1444 return (-ENOMEM);
1445 }
1446
1447 /*
1448 * This is used to annotate tq_lock, so
1449 * taskq_dispatch -> taskq_thread_spawn -> taskq_dispatch
1450 * does not trigger a lockdep warning re: possible recursive locking
1451 */
1452 dynamic_taskq->tq_lock_class = TQ_LOCK_DYNAMIC;
1453
1454 return (0);
1455 }
1456
1457 void
spl_taskq_fini(void)1458 spl_taskq_fini(void)
1459 {
1460 taskq_destroy(dynamic_taskq);
1461 dynamic_taskq = NULL;
1462
1463 taskq_destroy(system_delay_taskq);
1464 system_delay_taskq = NULL;
1465
1466 taskq_destroy(system_taskq);
1467 system_taskq = NULL;
1468
1469 tsd_destroy(&taskq_tsd);
1470
1471 #ifdef HAVE_CPU_HOTPLUG
1472 cpuhp_remove_multi_state(spl_taskq_cpuhp_state);
1473 spl_taskq_cpuhp_state = 0;
1474 #endif
1475 }
1476