1 /*
2 * Task management functions.
3 *
4 * Copyright 2000-2009 Willy Tarreau <w@1wt.eu>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13 #include <string.h>
14
15 #include <common/config.h>
16 #include <common/memory.h>
17 #include <common/mini-clist.h>
18 #include <common/standard.h>
19 #include <common/time.h>
20 #include <eb32sctree.h>
21 #include <eb32tree.h>
22
23 #include <proto/proxy.h>
24 #include <proto/stream.h>
25 #include <proto/task.h>
26 #include <proto/fd.h>
27
28 DECLARE_POOL(pool_head_task, "task", sizeof(struct task));
29 DECLARE_POOL(pool_head_tasklet, "tasklet", sizeof(struct tasklet));
30
31 /* This is the memory pool containing all the signal structs. These
32 * struct are used to store each required signal between two tasks.
33 */
34 DECLARE_POOL(pool_head_notification, "notification", sizeof(struct notification));
35
36 unsigned int nb_tasks = 0;
37 volatile unsigned long active_tasks_mask = 0; /* Mask of threads with active tasks */
38 volatile unsigned long global_tasks_mask = 0; /* Mask of threads with tasks in the global runqueue */
39 unsigned int tasks_run_queue = 0;
40 unsigned int tasks_run_queue_cur = 0; /* copy of the run queue size */
41 unsigned int nb_tasks_cur = 0; /* copy of the tasks count */
42 unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */
43
44 THREAD_LOCAL struct task *curr_task = NULL; /* task currently running or NULL */
45 THREAD_LOCAL struct eb32sc_node *rq_next = NULL; /* Next task to be potentially run */
46
47 __decl_aligned_spinlock(rq_lock); /* spin lock related to run queue */
48 __decl_aligned_spinlock(wq_lock); /* spin lock related to wait queue */
49
50 #ifdef USE_THREAD
51 struct eb_root timers; /* sorted timers tree, global */
52 struct eb_root rqueue; /* tree constituting the run queue */
53 int global_rqueue_size; /* Number of element sin the global runqueue */
54 #endif
55
56 static unsigned int rqueue_ticks; /* insertion count */
57
58 struct task_per_thread task_per_thread[MAX_THREADS];
59
60 /* Puts the task <t> in run queue at a position depending on t->nice. <t> is
61 * returned. The nice value assigns boosts in 32th of the run queue size. A
62 * nice value of -1024 sets the task to -tasks_run_queue*32, while a nice value
63 * of 1024 sets the task to tasks_run_queue*32. The state flags are cleared, so
64 * the caller will have to set its flags after this call.
65 * The task must not already be in the run queue. If unsure, use the safer
66 * task_wakeup() function.
67 */
__task_wakeup(struct task * t,struct eb_root * root)68 void __task_wakeup(struct task *t, struct eb_root *root)
69 {
70 #ifdef USE_THREAD
71 if (root == &rqueue) {
72 HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
73 }
74 #endif
75 /* Make sure if the task isn't in the runqueue, nobody inserts it
76 * in the meanwhile.
77 */
78 HA_ATOMIC_ADD(&tasks_run_queue, 1);
79 #ifdef USE_THREAD
80 if (root == &rqueue) {
81 global_tasks_mask |= t->thread_mask;
82 __ha_barrier_store();
83 }
84 #endif
85 HA_ATOMIC_OR(&active_tasks_mask, t->thread_mask);
86 t->rq.key = HA_ATOMIC_ADD(&rqueue_ticks, 1);
87
88 if (likely(t->nice)) {
89 int offset;
90
91 HA_ATOMIC_ADD(&niced_tasks, 1);
92 offset = t->nice * (int)global.tune.runqueue_depth;
93 t->rq.key += offset;
94 }
95
96 if (profiling & HA_PROF_TASKS)
97 t->call_date = now_mono_time();
98
99 eb32sc_insert(root, &t->rq, t->thread_mask);
100 #ifdef USE_THREAD
101 if (root == &rqueue) {
102 global_rqueue_size++;
103 HA_ATOMIC_OR(&t->state, TASK_GLOBAL);
104 HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
105 } else
106 #endif
107 {
108 int nb = ((void *)root - (void *)&task_per_thread[0].rqueue) / sizeof(task_per_thread[0]);
109 task_per_thread[nb].rqueue_size++;
110 }
111 #ifdef USE_THREAD
112 /* If all threads that are supposed to handle this task are sleeping,
113 * wake one.
114 */
115 if ((((t->thread_mask & all_threads_mask) & sleeping_thread_mask) ==
116 (t->thread_mask & all_threads_mask))) {
117 unsigned long m = (t->thread_mask & all_threads_mask) &~ tid_bit;
118
119 m = (m & (m - 1)) ^ m; // keep lowest bit set
120 HA_ATOMIC_AND(&sleeping_thread_mask, ~m);
121 wake_thread(my_ffsl(m) - 1);
122 }
123 #endif
124 return;
125 }
126
127 /*
128 * __task_queue()
129 *
130 * Inserts a task into wait queue <wq> at the position given by its expiration
131 * date. It does not matter if the task was already in the wait queue or not,
132 * as it will be unlinked. The task must not have an infinite expiration timer.
133 * Last, tasks must not be queued further than the end of the tree, which is
134 * between <now_ms> and <now_ms> + 2^31 ms (now+24days in 32bit).
135 *
136 * This function should not be used directly, it is meant to be called by the
137 * inline version of task_queue() which performs a few cheap preliminary tests
138 * before deciding to call __task_queue(). Moreover this function doesn't care
139 * at all about locking so the caller must be careful when deciding whether to
140 * lock or not around this call.
141 */
__task_queue(struct task * task,struct eb_root * wq)142 void __task_queue(struct task *task, struct eb_root *wq)
143 {
144 if (likely(task_in_wq(task)))
145 __task_unlink_wq(task);
146
147 /* the task is not in the queue now */
148 task->wq.key = task->expire;
149 #ifdef DEBUG_CHECK_INVALID_EXPIRATION_DATES
150 if (tick_is_lt(task->wq.key, now_ms))
151 /* we're queuing too far away or in the past (most likely) */
152 return;
153 #endif
154
155 eb32_insert(wq, &task->wq);
156 }
157
158 /*
159 * Extract all expired timers from the timer queue, and wakes up all
160 * associated tasks. Returns the date of next event (or eternity).
161 */
wake_expired_tasks()162 int wake_expired_tasks()
163 {
164 struct task *task;
165 struct eb32_node *eb;
166 int ret = TICK_ETERNITY;
167
168 while (1) {
169 lookup_next_local:
170 eb = eb32_lookup_ge(&task_per_thread[tid].timers, now_ms - TIMER_LOOK_BACK);
171 if (!eb) {
172 /* we might have reached the end of the tree, typically because
173 * <now_ms> is in the first half and we're first scanning the last
174 * half. Let's loop back to the beginning of the tree now.
175 */
176 eb = eb32_first(&task_per_thread[tid].timers);
177 if (likely(!eb))
178 break;
179 }
180
181 if (tick_is_lt(now_ms, eb->key)) {
182 /* timer not expired yet, revisit it later */
183 ret = eb->key;
184 break;
185 }
186
187 /* timer looks expired, detach it from the queue */
188 task = eb32_entry(eb, struct task, wq);
189 __task_unlink_wq(task);
190
191 /* It is possible that this task was left at an earlier place in the
192 * tree because a recent call to task_queue() has not moved it. This
193 * happens when the new expiration date is later than the old one.
194 * Since it is very unlikely that we reach a timeout anyway, it's a
195 * lot cheaper to proceed like this because we almost never update
196 * the tree. We may also find disabled expiration dates there. Since
197 * we have detached the task from the tree, we simply call task_queue
198 * to take care of this. Note that we might occasionally requeue it at
199 * the same place, before <eb>, so we have to check if this happens,
200 * and adjust <eb>, otherwise we may skip it which is not what we want.
201 * We may also not requeue the task (and not point eb at it) if its
202 * expiration time is not set.
203 */
204 if (!tick_is_expired(task->expire, now_ms)) {
205 if (tick_isset(task->expire))
206 __task_queue(task, &task_per_thread[tid].timers);
207 goto lookup_next_local;
208 }
209 task_wakeup(task, TASK_WOKEN_TIMER);
210 }
211
212 #ifdef USE_THREAD
213 while (1) {
214 HA_SPIN_LOCK(TASK_WQ_LOCK, &wq_lock);
215 lookup_next:
216 eb = eb32_lookup_ge(&timers, now_ms - TIMER_LOOK_BACK);
217 if (!eb) {
218 /* we might have reached the end of the tree, typically because
219 * <now_ms> is in the first half and we're first scanning the last
220 * half. Let's loop back to the beginning of the tree now.
221 */
222 eb = eb32_first(&timers);
223 if (likely(!eb))
224 break;
225 }
226
227 if (tick_is_lt(now_ms, eb->key)) {
228 /* timer not expired yet, revisit it later */
229 ret = tick_first(ret, eb->key);
230 break;
231 }
232
233 /* timer looks expired, detach it from the queue */
234 task = eb32_entry(eb, struct task, wq);
235 __task_unlink_wq(task);
236
237 /* It is possible that this task was left at an earlier place in the
238 * tree because a recent call to task_queue() has not moved it. This
239 * happens when the new expiration date is later than the old one.
240 * Since it is very unlikely that we reach a timeout anyway, it's a
241 * lot cheaper to proceed like this because we almost never update
242 * the tree. We may also find disabled expiration dates there. Since
243 * we have detached the task from the tree, we simply call task_queue
244 * to take care of this. Note that we might occasionally requeue it at
245 * the same place, before <eb>, so we have to check if this happens,
246 * and adjust <eb>, otherwise we may skip it which is not what we want.
247 * We may also not requeue the task (and not point eb at it) if its
248 * expiration time is not set.
249 */
250 if (!tick_is_expired(task->expire, now_ms)) {
251 if (tick_isset(task->expire))
252 __task_queue(task, &timers);
253 goto lookup_next;
254 }
255 task_wakeup(task, TASK_WOKEN_TIMER);
256 HA_SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock);
257 }
258
259 HA_SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock);
260 #endif
261 return ret;
262 }
263
264 /* The run queue is chronologically sorted in a tree. An insertion counter is
265 * used to assign a position to each task. This counter may be combined with
266 * other variables (eg: nice value) to set the final position in the tree. The
267 * counter may wrap without a problem, of course. We then limit the number of
268 * tasks processed to 200 in any case, so that general latency remains low and
269 * so that task positions have a chance to be considered. The function scans
270 * both the global and local run queues and picks the most urgent task between
271 * the two. We need to grab the global runqueue lock to touch it so it's taken
272 * on the very first access to the global run queue and is released as soon as
273 * it reaches the end.
274 *
275 * The function adjusts <next> if a new event is closer.
276 */
process_runnable_tasks()277 void process_runnable_tasks()
278 {
279 struct eb32sc_node *lrq = NULL; // next local run queue entry
280 struct eb32sc_node *grq = NULL; // next global run queue entry
281 struct task *t;
282 int max_processed;
283
284 if (!(active_tasks_mask & tid_bit)) {
285 activity[tid].empty_rq++;
286 return;
287 }
288
289 tasks_run_queue_cur = tasks_run_queue; /* keep a copy for reporting */
290 nb_tasks_cur = nb_tasks;
291 max_processed = global.tune.runqueue_depth;
292
293 if (likely(niced_tasks))
294 max_processed = (max_processed + 3) / 4;
295
296 /* Note: the grq lock is always held when grq is not null */
297
298 while (task_per_thread[tid].task_list_size < max_processed) {
299 if ((global_tasks_mask & tid_bit) && !grq) {
300 #ifdef USE_THREAD
301 HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
302 grq = eb32sc_lookup_ge(&rqueue, rqueue_ticks - TIMER_LOOK_BACK, tid_bit);
303 if (unlikely(!grq)) {
304 grq = eb32sc_first(&rqueue, tid_bit);
305 if (!grq) {
306 global_tasks_mask &= ~tid_bit;
307 HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
308 }
309 }
310 #endif
311 }
312
313 /* If a global task is available for this thread, it's in grq
314 * now and the global RQ is locked.
315 */
316
317 if (!lrq) {
318 lrq = eb32sc_lookup_ge(&task_per_thread[tid].rqueue, rqueue_ticks - TIMER_LOOK_BACK, tid_bit);
319 if (unlikely(!lrq))
320 lrq = eb32sc_first(&task_per_thread[tid].rqueue, tid_bit);
321 }
322
323 if (!lrq && !grq)
324 break;
325
326 if (likely(!grq || (lrq && (int)(lrq->key - grq->key) <= 0))) {
327 t = eb32sc_entry(lrq, struct task, rq);
328 lrq = eb32sc_next(lrq, tid_bit);
329 __task_unlink_rq(t);
330 }
331 #ifdef USE_THREAD
332 else {
333 t = eb32sc_entry(grq, struct task, rq);
334 grq = eb32sc_next(grq, tid_bit);
335 __task_unlink_rq(t);
336 if (unlikely(!grq)) {
337 grq = eb32sc_first(&rqueue, tid_bit);
338 if (!grq) {
339 global_tasks_mask &= ~tid_bit;
340 HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
341 }
342 }
343 }
344 #endif
345
346 /* And add it to the local task list */
347 task_insert_into_tasklet_list(t);
348 }
349
350 /* release the rqueue lock */
351 if (grq) {
352 HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
353 grq = NULL;
354 }
355
356 if (!(global_tasks_mask & tid_bit) && task_per_thread[tid].rqueue_size == 0) {
357 HA_ATOMIC_AND(&active_tasks_mask, ~tid_bit);
358 __ha_barrier_load();
359 if (global_tasks_mask & tid_bit)
360 HA_ATOMIC_OR(&active_tasks_mask, tid_bit);
361 }
362
363 while (max_processed > 0 && !LIST_ISEMPTY(&task_per_thread[tid].task_list)) {
364 struct task *t;
365 unsigned short state;
366 void *ctx;
367 struct task *(*process)(struct task *t, void *ctx, unsigned short state);
368
369 t = (struct task *)LIST_ELEM(task_per_thread[tid].task_list.n, struct tasklet *, list);
370 state = (t->state & TASK_SHARED_WQ) | TASK_RUNNING;
371 state = HA_ATOMIC_XCHG(&t->state, state);
372 __ha_barrier_store();
373 __task_remove_from_tasklet_list(t);
374
375 ctx = t->context;
376 process = t->process;
377 t->calls++;
378
379 if (unlikely(!TASK_IS_TASKLET(t) && t->call_date)) {
380 uint64_t now_ns = now_mono_time();
381
382 t->lat_time += now_ns - t->call_date;
383 t->call_date = now_ns;
384 }
385
386 curr_task = (struct task *)t;
387 if (likely(process == process_stream))
388 t = process_stream(t, ctx, state);
389 else {
390 if (t->process != NULL)
391 t = process(TASK_IS_TASKLET(t) ? NULL : t, ctx, state);
392 else {
393 __task_free(t);
394 t = NULL;
395 }
396 }
397 curr_task = NULL;
398 /* If there is a pending state we have to wake up the task
399 * immediately, else we defer it into wait queue
400 */
401 if (t != NULL) {
402 if (unlikely(!TASK_IS_TASKLET(t) && t->call_date)) {
403 t->cpu_time += now_mono_time() - t->call_date;
404 t->call_date = 0;
405 }
406
407 state = HA_ATOMIC_AND(&t->state, ~TASK_RUNNING);
408 if (state & TASK_WOKEN_ANY)
409 task_wakeup(t, 0);
410 else
411 task_queue(t);
412 }
413
414 max_processed--;
415 }
416
417 if (!LIST_ISEMPTY(&task_per_thread[tid].task_list)) {
418 HA_ATOMIC_OR(&active_tasks_mask, tid_bit);
419 activity[tid].long_rq++;
420 }
421 }
422
423 /* create a work list array for <nbthread> threads, using tasks made of
424 * function <fct>. The context passed to the function will be the pointer to
425 * the thread's work list, which will contain a copy of argument <arg>. The
426 * wake up reason will be TASK_WOKEN_OTHER. The pointer to the work_list array
427 * is returned on success, otherwise NULL on failure.
428 */
work_list_create(int nbthread,struct task * (* fct)(struct task *,void *,unsigned short),void * arg)429 struct work_list *work_list_create(int nbthread,
430 struct task *(*fct)(struct task *, void *, unsigned short),
431 void *arg)
432 {
433 struct work_list *wl;
434 int i;
435
436 wl = calloc(nbthread, sizeof(*wl));
437 if (!wl)
438 goto fail;
439
440 for (i = 0; i < nbthread; i++) {
441 LIST_INIT(&wl[i].head);
442 wl[i].task = task_new(1UL << i);
443 if (!wl[i].task)
444 goto fail;
445 wl[i].task->process = fct;
446 wl[i].task->context = &wl[i];
447 wl[i].arg = arg;
448 }
449 return wl;
450
451 fail:
452 work_list_destroy(wl, nbthread);
453 return NULL;
454 }
455
456 /* destroy work list <work> */
work_list_destroy(struct work_list * work,int nbthread)457 void work_list_destroy(struct work_list *work, int nbthread)
458 {
459 int t;
460
461 if (!work)
462 return;
463 for (t = 0; t < nbthread; t++) {
464 task_delete(work[t].task);
465 task_free(work[t].task);
466 }
467 free(work);
468 }
469
470 /*
471 * Delete every tasks before running the master polling loop
472 */
mworker_cleantasks()473 void mworker_cleantasks()
474 {
475 struct task *t;
476 int i;
477 struct eb32_node *tmp_wq = NULL;
478 struct eb32sc_node *tmp_rq = NULL;
479
480 #ifdef USE_THREAD
481 /* cleanup the global run queue */
482 tmp_rq = eb32sc_first(&rqueue, MAX_THREADS_MASK);
483 while (tmp_rq) {
484 t = eb32sc_entry(tmp_rq, struct task, rq);
485 tmp_rq = eb32sc_next(tmp_rq, MAX_THREADS_MASK);
486 task_delete(t);
487 task_free(t);
488 }
489 /* cleanup the timers queue */
490 tmp_wq = eb32_first(&timers);
491 while (tmp_wq) {
492 t = eb32_entry(tmp_wq, struct task, wq);
493 tmp_wq = eb32_next(tmp_wq);
494 task_delete(t);
495 task_free(t);
496 }
497 #endif
498 /* clean the per thread run queue */
499 for (i = 0; i < global.nbthread; i++) {
500 tmp_rq = eb32sc_first(&task_per_thread[i].rqueue, MAX_THREADS_MASK);
501 while (tmp_rq) {
502 t = eb32sc_entry(tmp_rq, struct task, rq);
503 tmp_rq = eb32sc_next(tmp_rq, MAX_THREADS_MASK);
504 task_delete(t);
505 task_free(t);
506 }
507 /* cleanup the per thread timers queue */
508 tmp_wq = eb32_first(&task_per_thread[i].timers);
509 while (tmp_wq) {
510 t = eb32_entry(tmp_wq, struct task, wq);
511 tmp_wq = eb32_next(tmp_wq);
512 task_delete(t);
513 task_free(t);
514 }
515 }
516 }
517
518 /* perform minimal intializations */
init_task()519 static void init_task()
520 {
521 int i;
522
523 #ifdef USE_THREAD
524 memset(&timers, 0, sizeof(timers));
525 memset(&rqueue, 0, sizeof(rqueue));
526 #endif
527 memset(&task_per_thread, 0, sizeof(task_per_thread));
528 for (i = 0; i < MAX_THREADS; i++) {
529 LIST_INIT(&task_per_thread[i].task_list);
530 }
531 }
532
533 INITCALL0(STG_PREPARE, init_task);
534
535 /*
536 * Local variables:
537 * c-indent-level: 8
538 * c-basic-offset: 8
539 * End:
540 */
541