1 /*
2 * Task management functions.
3 *
4 * Copyright 2000-2009 Willy Tarreau <w@1wt.eu>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13 #include <string.h>
14
15 #include <import/eb32sctree.h>
16 #include <import/eb32tree.h>
17
18 #include <haproxy/api.h>
19 #include <haproxy/cfgparse.h>
20 #include <haproxy/fd.h>
21 #include <haproxy/freq_ctr.h>
22 #include <haproxy/list.h>
23 #include <haproxy/pool.h>
24 #include <haproxy/stream.h>
25 #include <haproxy/task.h>
26 #include <haproxy/time.h>
27 #include <haproxy/tools.h>
28
29
30 DECLARE_POOL(pool_head_task, "task", sizeof(struct task));
31 DECLARE_POOL(pool_head_tasklet, "tasklet", sizeof(struct tasklet));
32
33 /* This is the memory pool containing all the signal structs. These
34 * struct are used to store each required signal between two tasks.
35 */
36 DECLARE_POOL(pool_head_notification, "notification", sizeof(struct notification));
37
38 unsigned int nb_tasks = 0;
39 volatile unsigned long global_tasks_mask = 0; /* Mask of threads with tasks in the global runqueue */
40 unsigned int tasks_run_queue = 0;
41 unsigned int tasks_run_queue_cur = 0; /* copy of the run queue size */
42 unsigned int nb_tasks_cur = 0; /* copy of the tasks count */
43 unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */
44
45 THREAD_LOCAL struct task_per_thread *sched = &task_per_thread[0]; /* scheduler context for the current thread */
46
47 __decl_aligned_spinlock(rq_lock); /* spin lock related to run queue */
48 __decl_aligned_rwlock(wq_lock); /* RW lock related to the wait queue */
49
50 #ifdef USE_THREAD
51 struct eb_root timers; /* sorted timers tree, global */
52 struct eb_root rqueue; /* tree constituting the run queue */
53 int global_rqueue_size; /* Number of element sin the global runqueue */
54 #endif
55
56 static unsigned int rqueue_ticks; /* insertion count */
57
58 struct task_per_thread task_per_thread[MAX_THREADS];
59
60
61 /* Flags the task <t> for immediate destruction and puts it into its first
62 * thread's shared tasklet list if not yet queued/running. This will bypass
63 * the priority scheduling and make the task show up as fast as possible in
64 * the other thread's queue. Note that this operation isn't idempotent and is
65 * not supposed to be run on the same task from multiple threads at once. It's
66 * the caller's responsibility to make sure it is the only one able to kill the
67 * task.
68 */
task_kill(struct task * t)69 void task_kill(struct task *t)
70 {
71 unsigned short state = t->state;
72 unsigned int thr;
73
74 BUG_ON(state & TASK_KILLED);
75
76 while (1) {
77 while (state & (TASK_RUNNING | TASK_QUEUED)) {
78 /* task already in the queue and about to be executed,
79 * or even currently running. Just add the flag and be
80 * done with it, the process loop will detect it and kill
81 * it. The CAS will fail if we arrive too late.
82 */
83 if (_HA_ATOMIC_CAS(&t->state, &state, state | TASK_KILLED))
84 return;
85 }
86
87 /* We'll have to wake it up, but we must also secure it so that
88 * it doesn't vanish under us. TASK_QUEUED guarantees nobody will
89 * add past us.
90 */
91 if (_HA_ATOMIC_CAS(&t->state, &state, state | TASK_QUEUED | TASK_KILLED)) {
92 /* Bypass the tree and go directly into the shared tasklet list.
93 * Note: that's a task so it must be accounted for as such. Pick
94 * the task's first thread for the job.
95 */
96 thr = my_ffsl(t->thread_mask) - 1;
97
98 /* Beware: tasks that have never run don't have their ->list empty yet! */
99 LIST_INIT(&((struct tasklet *)t)->list);
100 MT_LIST_ADDQ(&task_per_thread[thr].shared_tasklet_list,
101 (struct mt_list *)&((struct tasklet *)t)->list);
102 _HA_ATOMIC_ADD(&tasks_run_queue, 1);
103 _HA_ATOMIC_ADD(&task_per_thread[thr].task_list_size, 1);
104 if (sleeping_thread_mask & (1UL << thr)) {
105 _HA_ATOMIC_AND(&sleeping_thread_mask, ~(1UL << thr));
106 wake_thread(thr);
107 }
108 return;
109 }
110 }
111 }
112
113 /* Puts the task <t> in run queue at a position depending on t->nice. <t> is
114 * returned. The nice value assigns boosts in 32th of the run queue size. A
115 * nice value of -1024 sets the task to -tasks_run_queue*32, while a nice value
116 * of 1024 sets the task to tasks_run_queue*32. The state flags are cleared, so
117 * the caller will have to set its flags after this call.
118 * The task must not already be in the run queue. If unsure, use the safer
119 * task_wakeup() function.
120 */
__task_wakeup(struct task * t,struct eb_root * root)121 void __task_wakeup(struct task *t, struct eb_root *root)
122 {
123 #ifdef USE_THREAD
124 if (root == &rqueue) {
125 HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
126 }
127 #endif
128 /* Make sure if the task isn't in the runqueue, nobody inserts it
129 * in the meanwhile.
130 */
131 _HA_ATOMIC_ADD(&tasks_run_queue, 1);
132 #ifdef USE_THREAD
133 if (root == &rqueue) {
134 global_tasks_mask |= t->thread_mask;
135 __ha_barrier_store();
136 }
137 #endif
138 t->rq.key = _HA_ATOMIC_ADD(&rqueue_ticks, 1);
139
140 if (likely(t->nice)) {
141 int offset;
142
143 _HA_ATOMIC_ADD(&niced_tasks, 1);
144 offset = t->nice * (int)global.tune.runqueue_depth;
145 t->rq.key += offset;
146 }
147
148 if (task_profiling_mask & tid_bit)
149 t->call_date = now_mono_time();
150
151 eb32sc_insert(root, &t->rq, t->thread_mask);
152 #ifdef USE_THREAD
153 if (root == &rqueue) {
154 global_rqueue_size++;
155 _HA_ATOMIC_OR(&t->state, TASK_GLOBAL);
156 HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
157 } else
158 #endif
159 {
160 int nb = ((void *)root - (void *)&task_per_thread[0].rqueue) / sizeof(task_per_thread[0]);
161 task_per_thread[nb].rqueue_size++;
162 }
163 #ifdef USE_THREAD
164 /* If all threads that are supposed to handle this task are sleeping,
165 * wake one.
166 */
167 if ((((t->thread_mask & all_threads_mask) & sleeping_thread_mask) ==
168 (t->thread_mask & all_threads_mask))) {
169 unsigned long m = (t->thread_mask & all_threads_mask) &~ tid_bit;
170
171 m = (m & (m - 1)) ^ m; // keep lowest bit set
172 _HA_ATOMIC_AND(&sleeping_thread_mask, ~m);
173 wake_thread(my_ffsl(m) - 1);
174 }
175 #endif
176 return;
177 }
178
179 /*
180 * __task_queue()
181 *
182 * Inserts a task into wait queue <wq> at the position given by its expiration
183 * date. It does not matter if the task was already in the wait queue or not,
184 * as it will be unlinked. The task must not have an infinite expiration timer.
185 * Last, tasks must not be queued further than the end of the tree, which is
186 * between <now_ms> and <now_ms> + 2^31 ms (now+24days in 32bit).
187 *
188 * This function should not be used directly, it is meant to be called by the
189 * inline version of task_queue() which performs a few cheap preliminary tests
190 * before deciding to call __task_queue(). Moreover this function doesn't care
191 * at all about locking so the caller must be careful when deciding whether to
192 * lock or not around this call.
193 */
__task_queue(struct task * task,struct eb_root * wq)194 void __task_queue(struct task *task, struct eb_root *wq)
195 {
196 if (likely(task_in_wq(task)))
197 __task_unlink_wq(task);
198
199 /* the task is not in the queue now */
200 task->wq.key = task->expire;
201 #ifdef DEBUG_CHECK_INVALID_EXPIRATION_DATES
202 if (tick_is_lt(task->wq.key, now_ms))
203 /* we're queuing too far away or in the past (most likely) */
204 return;
205 #endif
206
207 eb32_insert(wq, &task->wq);
208 }
209
210 /*
211 * Extract all expired timers from the timer queue, and wakes up all
212 * associated tasks.
213 */
wake_expired_tasks()214 void wake_expired_tasks()
215 {
216 struct task_per_thread * const tt = sched; // thread's tasks
217 int max_processed = global.tune.runqueue_depth;
218 struct task *task;
219 struct eb32_node *eb;
220 __decl_thread(int key);
221
222 while (max_processed-- > 0) {
223 lookup_next_local:
224 eb = eb32_lookup_ge(&tt->timers, now_ms - TIMER_LOOK_BACK);
225 if (!eb) {
226 /* we might have reached the end of the tree, typically because
227 * <now_ms> is in the first half and we're first scanning the last
228 * half. Let's loop back to the beginning of the tree now.
229 */
230 eb = eb32_first(&tt->timers);
231 if (likely(!eb))
232 break;
233 }
234
235 /* It is possible that this task was left at an earlier place in the
236 * tree because a recent call to task_queue() has not moved it. This
237 * happens when the new expiration date is later than the old one.
238 * Since it is very unlikely that we reach a timeout anyway, it's a
239 * lot cheaper to proceed like this because we almost never update
240 * the tree. We may also find disabled expiration dates there. Since
241 * we have detached the task from the tree, we simply call task_queue
242 * to take care of this. Note that we might occasionally requeue it at
243 * the same place, before <eb>, so we have to check if this happens,
244 * and adjust <eb>, otherwise we may skip it which is not what we want.
245 * We may also not requeue the task (and not point eb at it) if its
246 * expiration time is not set. We also make sure we leave the real
247 * expiration date for the next task in the queue so that when calling
248 * next_timer_expiry() we're guaranteed to see the next real date and
249 * not the next apparent date. This is in order to avoid useless
250 * wakeups.
251 */
252
253 task = eb32_entry(eb, struct task, wq);
254 if (tick_is_expired(task->expire, now_ms)) {
255 /* expired task, wake it up */
256 __task_unlink_wq(task);
257 task_wakeup(task, TASK_WOKEN_TIMER);
258 }
259 else if (task->expire != eb->key) {
260 /* task is not expired but its key doesn't match so let's
261 * update it and skip to next apparently expired task.
262 */
263 __task_unlink_wq(task);
264 if (tick_isset(task->expire))
265 __task_queue(task, &tt->timers);
266 }
267 else {
268 /* task not expired and correctly placed */
269 break;
270 }
271 }
272
273 #ifdef USE_THREAD
274 if (eb_is_empty(&timers))
275 goto leave;
276
277 HA_RWLOCK_RDLOCK(TASK_WQ_LOCK, &wq_lock);
278 eb = eb32_lookup_ge(&timers, now_ms - TIMER_LOOK_BACK);
279 if (!eb) {
280 eb = eb32_first(&timers);
281 if (likely(!eb)) {
282 HA_RWLOCK_RDUNLOCK(TASK_WQ_LOCK, &wq_lock);
283 goto leave;
284 }
285 }
286 key = eb->key;
287 HA_RWLOCK_RDUNLOCK(TASK_WQ_LOCK, &wq_lock);
288
289 if (tick_is_lt(now_ms, key))
290 goto leave;
291
292 /* There's really something of interest here, let's visit the queue */
293
294 while (1) {
295 HA_RWLOCK_WRLOCK(TASK_WQ_LOCK, &wq_lock);
296 lookup_next:
297 if (max_processed-- <= 0)
298 break;
299 eb = eb32_lookup_ge(&timers, now_ms - TIMER_LOOK_BACK);
300 if (!eb) {
301 /* we might have reached the end of the tree, typically because
302 * <now_ms> is in the first half and we're first scanning the last
303 * half. Let's loop back to the beginning of the tree now.
304 */
305 eb = eb32_first(&timers);
306 if (likely(!eb))
307 break;
308 }
309
310 task = eb32_entry(eb, struct task, wq);
311 if (tick_is_expired(task->expire, now_ms)) {
312 /* expired task, wake it up */
313 __task_unlink_wq(task);
314 task_wakeup(task, TASK_WOKEN_TIMER);
315 }
316 else if (task->expire != eb->key) {
317 /* task is not expired but its key doesn't match so let's
318 * update it and skip to next apparently expired task.
319 */
320 __task_unlink_wq(task);
321 if (tick_isset(task->expire))
322 __task_queue(task, &timers);
323 goto lookup_next;
324 }
325 else {
326 /* task not expired and correctly placed */
327 break;
328 }
329 HA_RWLOCK_WRUNLOCK(TASK_WQ_LOCK, &wq_lock);
330 }
331
332 HA_RWLOCK_WRUNLOCK(TASK_WQ_LOCK, &wq_lock);
333 #endif
334 leave:
335 return;
336 }
337
338 /* Checks the next timer for the current thread by looking into its own timer
339 * list and the global one. It may return TICK_ETERNITY if no timer is present.
340 * Note that the next timer might very well be slightly in the past.
341 */
next_timer_expiry()342 int next_timer_expiry()
343 {
344 struct task_per_thread * const tt = sched; // thread's tasks
345 struct eb32_node *eb;
346 int ret = TICK_ETERNITY;
347 __decl_thread(int key = TICK_ETERNITY);
348
349 /* first check in the thread-local timers */
350 eb = eb32_lookup_ge(&tt->timers, now_ms - TIMER_LOOK_BACK);
351 if (!eb) {
352 /* we might have reached the end of the tree, typically because
353 * <now_ms> is in the first half and we're first scanning the last
354 * half. Let's loop back to the beginning of the tree now.
355 */
356 eb = eb32_first(&tt->timers);
357 }
358
359 if (eb)
360 ret = eb->key;
361
362 #ifdef USE_THREAD
363 if (!eb_is_empty(&timers)) {
364 HA_RWLOCK_RDLOCK(TASK_WQ_LOCK, &wq_lock);
365 eb = eb32_lookup_ge(&timers, now_ms - TIMER_LOOK_BACK);
366 if (!eb)
367 eb = eb32_first(&timers);
368 if (eb)
369 key = eb->key;
370 HA_RWLOCK_RDUNLOCK(TASK_WQ_LOCK, &wq_lock);
371 if (eb)
372 ret = tick_first(ret, key);
373 }
374 #endif
375 return ret;
376 }
377
378 /* Walks over tasklet lists sched->tasklets[0..TL_CLASSES-1] and run at most
379 * budget[TL_*] of them. Returns the number of entries effectively processed
380 * (tasks and tasklets merged). The count of tasks in the list for the current
381 * thread is adjusted.
382 */
run_tasks_from_lists(unsigned int budgets[])383 unsigned int run_tasks_from_lists(unsigned int budgets[])
384 {
385 struct task *(*process)(struct task *t, void *ctx, unsigned short state);
386 struct list *tl_queues = sched->tasklets;
387 struct task *t;
388 uint8_t budget_mask = (1 << TL_CLASSES) - 1;
389 unsigned int done = 0;
390 unsigned int queue;
391 unsigned short state;
392 void *ctx;
393
394 for (queue = 0; queue < TL_CLASSES;) {
395 sched->current_queue = queue;
396
397 /* global.tune.sched.low-latency is set */
398 if (global.tune.options & GTUNE_SCHED_LOW_LATENCY) {
399 if (unlikely(sched->tl_class_mask & budget_mask & ((1 << queue) - 1))) {
400 /* a lower queue index has tasks again and still has a
401 * budget to run them. Let's switch to it now.
402 */
403 queue = (sched->tl_class_mask & 1) ? 0 :
404 (sched->tl_class_mask & 2) ? 1 : 2;
405 continue;
406 }
407
408 if (unlikely(queue > TL_URGENT &&
409 budget_mask & (1 << TL_URGENT) &&
410 !MT_LIST_ISEMPTY(&sched->shared_tasklet_list))) {
411 /* an urgent tasklet arrived from another thread */
412 break;
413 }
414
415 if (unlikely(queue > TL_NORMAL &&
416 budget_mask & (1 << TL_NORMAL) &&
417 ((sched->rqueue_size > 0) ||
418 (global_tasks_mask & tid_bit)))) {
419 /* a task was woken up by a bulk tasklet or another thread */
420 break;
421 }
422 }
423
424 if (LIST_ISEMPTY(&tl_queues[queue])) {
425 sched->tl_class_mask &= ~(1 << queue);
426 queue++;
427 continue;
428 }
429
430 if (!budgets[queue]) {
431 budget_mask &= ~(1 << queue);
432 queue++;
433 continue;
434 }
435
436 budgets[queue]--;
437 t = (struct task *)LIST_ELEM(tl_queues[queue].n, struct tasklet *, list);
438 state = t->state & (TASK_SHARED_WQ|TASK_SELF_WAKING|TASK_KILLED);
439
440 ti->flags &= ~TI_FL_STUCK; // this thread is still running
441 activity[tid].ctxsw++;
442 ctx = t->context;
443 process = t->process;
444 t->calls++;
445 sched->current = t;
446
447 _HA_ATOMIC_SUB(&tasks_run_queue, 1);
448
449 if (TASK_IS_TASKLET(t)) {
450 LIST_DEL_INIT(&((struct tasklet *)t)->list);
451 __ha_barrier_store();
452 state = _HA_ATOMIC_XCHG(&t->state, state);
453 __ha_barrier_atomic_store();
454 process(t, ctx, state);
455 done++;
456 sched->current = NULL;
457 __ha_barrier_store();
458 continue;
459 }
460
461 LIST_DEL_INIT(&((struct tasklet *)t)->list);
462 __ha_barrier_store();
463 state = _HA_ATOMIC_XCHG(&t->state, state | TASK_RUNNING);
464 __ha_barrier_atomic_store();
465
466 /* OK then this is a regular task */
467
468 _HA_ATOMIC_SUB(&task_per_thread[tid].task_list_size, 1);
469 if (unlikely(t->call_date)) {
470 uint64_t now_ns = now_mono_time();
471
472 t->lat_time += now_ns - t->call_date;
473 t->call_date = now_ns;
474 }
475
476 __ha_barrier_store();
477
478 /* Note for below: if TASK_KILLED arrived before we've read the state, we
479 * directly free the task. Otherwise it will be seen after processing and
480 * it's freed on the exit path.
481 */
482 if (likely(!(state & TASK_KILLED) && process == process_stream))
483 t = process_stream(t, ctx, state);
484 else if (!(state & TASK_KILLED) && process != NULL)
485 t = process(t, ctx, state);
486 else {
487 task_unlink_wq(t);
488 __task_free(t);
489 sched->current = NULL;
490 __ha_barrier_store();
491 /* We don't want max_processed to be decremented if
492 * we're just freeing a destroyed task, we should only
493 * do so if we really ran a task.
494 */
495 continue;
496 }
497 sched->current = NULL;
498 __ha_barrier_store();
499 /* If there is a pending state we have to wake up the task
500 * immediately, else we defer it into wait queue
501 */
502 if (t != NULL) {
503 if (unlikely(t->call_date)) {
504 t->cpu_time += now_mono_time() - t->call_date;
505 t->call_date = 0;
506 }
507
508 state = _HA_ATOMIC_AND(&t->state, ~TASK_RUNNING);
509 if (unlikely(state & TASK_KILLED)) {
510 task_unlink_wq(t);
511 __task_free(t);
512 }
513 else if (state & TASK_WOKEN_ANY)
514 task_wakeup(t, 0);
515 else
516 task_queue(t);
517 }
518 done++;
519 }
520 sched->current_queue = -1;
521
522 return done;
523 }
524
525 /* The run queue is chronologically sorted in a tree. An insertion counter is
526 * used to assign a position to each task. This counter may be combined with
527 * other variables (eg: nice value) to set the final position in the tree. The
528 * counter may wrap without a problem, of course. We then limit the number of
529 * tasks processed to 200 in any case, so that general latency remains low and
530 * so that task positions have a chance to be considered. The function scans
531 * both the global and local run queues and picks the most urgent task between
532 * the two. We need to grab the global runqueue lock to touch it so it's taken
533 * on the very first access to the global run queue and is released as soon as
534 * it reaches the end.
535 *
536 * The function adjusts <next> if a new event is closer.
537 */
process_runnable_tasks()538 void process_runnable_tasks()
539 {
540 struct task_per_thread * const tt = sched;
541 struct eb32sc_node *lrq; // next local run queue entry
542 struct eb32sc_node *grq; // next global run queue entry
543 struct task *t;
544 const unsigned int default_weights[TL_CLASSES] = {
545 [TL_URGENT] = 64, // ~50% of CPU bandwidth for I/O
546 [TL_NORMAL] = 48, // ~37% of CPU bandwidth for tasks
547 [TL_BULK] = 16, // ~13% of CPU bandwidth for self-wakers
548 };
549 unsigned int max[TL_CLASSES]; // max to be run per class
550 unsigned int max_total; // sum of max above
551 struct mt_list *tmp_list;
552 unsigned int queue;
553 int max_processed;
554
555 ti->flags &= ~TI_FL_STUCK; // this thread is still running
556
557 if (!thread_has_tasks()) {
558 activity[tid].empty_rq++;
559 return;
560 }
561
562 tasks_run_queue_cur = tasks_run_queue; /* keep a copy for reporting */
563 nb_tasks_cur = nb_tasks;
564 max_processed = global.tune.runqueue_depth;
565
566 if (likely(niced_tasks))
567 max_processed = (max_processed + 3) / 4;
568
569 not_done_yet:
570 max[TL_URGENT] = max[TL_NORMAL] = max[TL_BULK] = 0;
571
572 /* urgent tasklets list gets a default weight of ~50% */
573 if ((tt->tl_class_mask & (1 << TL_URGENT)) ||
574 !MT_LIST_ISEMPTY(&tt->shared_tasklet_list))
575 max[TL_URGENT] = default_weights[TL_URGENT];
576
577 /* normal tasklets list gets a default weight of ~37% */
578 if ((tt->tl_class_mask & (1 << TL_NORMAL)) ||
579 (sched->rqueue_size > 0) || (global_tasks_mask & tid_bit))
580 max[TL_NORMAL] = default_weights[TL_NORMAL];
581
582 /* bulk tasklets list gets a default weight of ~13% */
583 if ((tt->tl_class_mask & (1 << TL_BULK)))
584 max[TL_BULK] = default_weights[TL_BULK];
585
586 /* Now compute a fair share of the weights. Total may slightly exceed
587 * 100% due to rounding, this is not a problem. Note that while in
588 * theory the sum cannot be NULL as we cannot get there without tasklets
589 * to process, in practice it seldom happens when multiple writers
590 * conflict and rollback on MT_LIST_ADDQ(shared_tasklet_list), causing
591 * a first MT_LIST_ISEMPTY() to succeed for thread_has_task() and the
592 * one above to finally fail. This is extremely rare and not a problem.
593 */
594 max_total = max[TL_URGENT] + max[TL_NORMAL] + max[TL_BULK];
595 if (!max_total)
596 return;
597
598 for (queue = 0; queue < TL_CLASSES; queue++)
599 max[queue] = ((unsigned)max_processed * max[queue] + max_total - 1) / max_total;
600
601 lrq = grq = NULL;
602
603 /* pick up to max[TL_NORMAL] regular tasks from prio-ordered run queues */
604 /* Note: the grq lock is always held when grq is not null */
605 while (tt->task_list_size < max[TL_NORMAL]) {
606 if ((global_tasks_mask & tid_bit) && !grq) {
607 #ifdef USE_THREAD
608 HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
609 grq = eb32sc_lookup_ge(&rqueue, rqueue_ticks - TIMER_LOOK_BACK, tid_bit);
610 if (unlikely(!grq)) {
611 grq = eb32sc_first(&rqueue, tid_bit);
612 if (!grq) {
613 global_tasks_mask &= ~tid_bit;
614 HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
615 }
616 }
617 #endif
618 }
619
620 /* If a global task is available for this thread, it's in grq
621 * now and the global RQ is locked.
622 */
623
624 if (!lrq) {
625 lrq = eb32sc_lookup_ge(&tt->rqueue, rqueue_ticks - TIMER_LOOK_BACK, tid_bit);
626 if (unlikely(!lrq))
627 lrq = eb32sc_first(&tt->rqueue, tid_bit);
628 }
629
630 if (!lrq && !grq)
631 break;
632
633 if (likely(!grq || (lrq && (int)(lrq->key - grq->key) <= 0))) {
634 t = eb32sc_entry(lrq, struct task, rq);
635 lrq = eb32sc_next(lrq, tid_bit);
636 __task_unlink_rq(t);
637 }
638 #ifdef USE_THREAD
639 else {
640 t = eb32sc_entry(grq, struct task, rq);
641 grq = eb32sc_next(grq, tid_bit);
642 __task_unlink_rq(t);
643 if (unlikely(!grq)) {
644 grq = eb32sc_first(&rqueue, tid_bit);
645 if (!grq) {
646 global_tasks_mask &= ~tid_bit;
647 HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
648 }
649 }
650 }
651 #endif
652
653 /* Make sure the entry doesn't appear to be in a list */
654 LIST_INIT(&((struct tasklet *)t)->list);
655 /* And add it to the local task list */
656 tasklet_insert_into_tasklet_list(&tt->tasklets[TL_NORMAL], (struct tasklet *)t);
657 tt->tl_class_mask |= 1 << TL_NORMAL;
658 _HA_ATOMIC_ADD(&tt->task_list_size, 1);
659 activity[tid].tasksw++;
660 }
661
662 /* release the rqueue lock */
663 if (grq) {
664 HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
665 grq = NULL;
666 }
667
668 /* Merge the list of tasklets waken up by other threads to the
669 * main list.
670 */
671 tmp_list = MT_LIST_BEHEAD(&tt->shared_tasklet_list);
672 if (tmp_list) {
673 LIST_SPLICE_END_DETACHED(&tt->tasklets[TL_URGENT], (struct list *)tmp_list);
674 if (!LIST_ISEMPTY(&tt->tasklets[TL_URGENT]))
675 tt->tl_class_mask |= 1 << TL_URGENT;
676 }
677
678 /* execute tasklets in each queue */
679 max_processed -= run_tasks_from_lists(max);
680
681 /* some tasks may have woken other ones up */
682 if (max_processed > 0 && thread_has_tasks())
683 goto not_done_yet;
684
685 if (tt->tl_class_mask)
686 activity[tid].long_rq++;
687 }
688
689 /* create a work list array for <nbthread> threads, using tasks made of
690 * function <fct>. The context passed to the function will be the pointer to
691 * the thread's work list, which will contain a copy of argument <arg>. The
692 * wake up reason will be TASK_WOKEN_OTHER. The pointer to the work_list array
693 * is returned on success, otherwise NULL on failure.
694 */
work_list_create(int nbthread,struct task * (* fct)(struct task *,void *,unsigned short),void * arg)695 struct work_list *work_list_create(int nbthread,
696 struct task *(*fct)(struct task *, void *, unsigned short),
697 void *arg)
698 {
699 struct work_list *wl;
700 int i;
701
702 wl = calloc(nbthread, sizeof(*wl));
703 if (!wl)
704 goto fail;
705
706 for (i = 0; i < nbthread; i++) {
707 MT_LIST_INIT(&wl[i].head);
708 wl[i].task = task_new(1UL << i);
709 if (!wl[i].task)
710 goto fail;
711 wl[i].task->process = fct;
712 wl[i].task->context = &wl[i];
713 wl[i].arg = arg;
714 }
715 return wl;
716
717 fail:
718 work_list_destroy(wl, nbthread);
719 return NULL;
720 }
721
722 /* destroy work list <work> */
work_list_destroy(struct work_list * work,int nbthread)723 void work_list_destroy(struct work_list *work, int nbthread)
724 {
725 int t;
726
727 if (!work)
728 return;
729 for (t = 0; t < nbthread; t++)
730 task_destroy(work[t].task);
731 free(work);
732 }
733
734 /*
735 * Delete every tasks before running the master polling loop
736 */
mworker_cleantasks()737 void mworker_cleantasks()
738 {
739 struct task *t;
740 int i;
741 struct eb32_node *tmp_wq = NULL;
742 struct eb32sc_node *tmp_rq = NULL;
743
744 #ifdef USE_THREAD
745 /* cleanup the global run queue */
746 tmp_rq = eb32sc_first(&rqueue, MAX_THREADS_MASK);
747 while (tmp_rq) {
748 t = eb32sc_entry(tmp_rq, struct task, rq);
749 tmp_rq = eb32sc_next(tmp_rq, MAX_THREADS_MASK);
750 task_destroy(t);
751 }
752 /* cleanup the timers queue */
753 tmp_wq = eb32_first(&timers);
754 while (tmp_wq) {
755 t = eb32_entry(tmp_wq, struct task, wq);
756 tmp_wq = eb32_next(tmp_wq);
757 task_destroy(t);
758 }
759 #endif
760 /* clean the per thread run queue */
761 for (i = 0; i < global.nbthread; i++) {
762 tmp_rq = eb32sc_first(&task_per_thread[i].rqueue, MAX_THREADS_MASK);
763 while (tmp_rq) {
764 t = eb32sc_entry(tmp_rq, struct task, rq);
765 tmp_rq = eb32sc_next(tmp_rq, MAX_THREADS_MASK);
766 task_destroy(t);
767 }
768 /* cleanup the per thread timers queue */
769 tmp_wq = eb32_first(&task_per_thread[i].timers);
770 while (tmp_wq) {
771 t = eb32_entry(tmp_wq, struct task, wq);
772 tmp_wq = eb32_next(tmp_wq);
773 task_destroy(t);
774 }
775 }
776 }
777
778 /* perform minimal intializations */
init_task()779 static void init_task()
780 {
781 int i;
782
783 #ifdef USE_THREAD
784 memset(&timers, 0, sizeof(timers));
785 memset(&rqueue, 0, sizeof(rqueue));
786 #endif
787 memset(&task_per_thread, 0, sizeof(task_per_thread));
788 for (i = 0; i < MAX_THREADS; i++) {
789 LIST_INIT(&task_per_thread[i].tasklets[TL_URGENT]);
790 LIST_INIT(&task_per_thread[i].tasklets[TL_NORMAL]);
791 LIST_INIT(&task_per_thread[i].tasklets[TL_BULK]);
792 MT_LIST_INIT(&task_per_thread[i].shared_tasklet_list);
793 }
794 }
795
796 /* config parser for global "tune.sched.low-latency", accepts "on" or "off" */
cfg_parse_tune_sched_low_latency(char ** args,int section_type,struct proxy * curpx,struct proxy * defpx,const char * file,int line,char ** err)797 static int cfg_parse_tune_sched_low_latency(char **args, int section_type, struct proxy *curpx,
798 struct proxy *defpx, const char *file, int line,
799 char **err)
800 {
801 if (too_many_args(1, args, err, NULL))
802 return -1;
803
804 if (strcmp(args[1], "on") == 0)
805 global.tune.options |= GTUNE_SCHED_LOW_LATENCY;
806 else if (strcmp(args[1], "off") == 0)
807 global.tune.options &= ~GTUNE_SCHED_LOW_LATENCY;
808 else {
809 memprintf(err, "'%s' expects either 'on' or 'off' but got '%s'.", args[0], args[1]);
810 return -1;
811 }
812 return 0;
813 }
814
815 /* config keyword parsers */
816 static struct cfg_kw_list cfg_kws = {ILH, {
817 { CFG_GLOBAL, "tune.sched.low-latency", cfg_parse_tune_sched_low_latency },
818 { 0, NULL, NULL }
819 }};
820
821 INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
822 INITCALL0(STG_PREPARE, init_task);
823
824 /*
825 * Local variables:
826 * c-indent-level: 8
827 * c-basic-offset: 8
828 * End:
829 */
830