1 /*
2  * Task management functions.
3  *
4  * Copyright 2000-2009 Willy Tarreau <w@1wt.eu>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  *
11  */
12 
13 #include <string.h>
14 
15 #include <common/config.h>
16 #include <common/memory.h>
17 #include <common/mini-clist.h>
18 #include <common/standard.h>
19 #include <common/time.h>
20 #include <eb32sctree.h>
21 #include <eb32tree.h>
22 
23 #include <proto/fd.h>
24 #include <proto/freq_ctr.h>
25 #include <proto/proxy.h>
26 #include <proto/stream.h>
27 #include <proto/task.h>
28 
29 DECLARE_POOL(pool_head_task,    "task",    sizeof(struct task));
30 DECLARE_POOL(pool_head_tasklet, "tasklet", sizeof(struct tasklet));
31 
32 /* This is the memory pool containing all the signal structs. These
33  * struct are used to store each required signal between two tasks.
34  */
35 DECLARE_POOL(pool_head_notification, "notification", sizeof(struct notification));
36 
37 unsigned int nb_tasks = 0;
38 volatile unsigned long global_tasks_mask = 0; /* Mask of threads with tasks in the global runqueue */
39 unsigned int tasks_run_queue = 0;
40 unsigned int tasks_run_queue_cur = 0;    /* copy of the run queue size */
41 unsigned int nb_tasks_cur = 0;     /* copy of the tasks count */
42 unsigned int niced_tasks = 0;      /* number of niced tasks in the run queue */
43 
44 THREAD_LOCAL struct task_per_thread *sched = &task_per_thread[0]; /* scheduler context for the current thread */
45 
46 __decl_aligned_spinlock(rq_lock); /* spin lock related to run queue */
47 __decl_aligned_rwlock(wq_lock);   /* RW lock related to the wait queue */
48 
49 #ifdef USE_THREAD
50 struct eb_root timers;      /* sorted timers tree, global */
51 struct eb_root rqueue;      /* tree constituting the run queue */
52 int global_rqueue_size; /* Number of element sin the global runqueue */
53 #endif
54 
55 static unsigned int rqueue_ticks;  /* insertion count */
56 
57 struct task_per_thread task_per_thread[MAX_THREADS];
58 
59 /* Puts the task <t> in run queue at a position depending on t->nice. <t> is
60  * returned. The nice value assigns boosts in 32th of the run queue size. A
61  * nice value of -1024 sets the task to -tasks_run_queue*32, while a nice value
62  * of 1024 sets the task to tasks_run_queue*32. The state flags are cleared, so
63  * the caller will have to set its flags after this call.
64  * The task must not already be in the run queue. If unsure, use the safer
65  * task_wakeup() function.
66  */
__task_wakeup(struct task * t,struct eb_root * root)67 void __task_wakeup(struct task *t, struct eb_root *root)
68 {
69 #ifdef USE_THREAD
70 	if (root == &rqueue) {
71 		HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
72 	}
73 #endif
74 	/* Make sure if the task isn't in the runqueue, nobody inserts it
75 	 * in the meanwhile.
76 	 */
77 	_HA_ATOMIC_ADD(&tasks_run_queue, 1);
78 #ifdef USE_THREAD
79 	if (root == &rqueue) {
80 		global_tasks_mask |= t->thread_mask;
81 		__ha_barrier_store();
82 	}
83 #endif
84 	t->rq.key = _HA_ATOMIC_ADD(&rqueue_ticks, 1);
85 
86 	if (likely(t->nice)) {
87 		int offset;
88 
89 		_HA_ATOMIC_ADD(&niced_tasks, 1);
90 		offset = t->nice * (int)global.tune.runqueue_depth;
91 		t->rq.key += offset;
92 	}
93 
94 	if (task_profiling_mask & tid_bit)
95 		t->call_date = now_mono_time();
96 
97 	eb32sc_insert(root, &t->rq, t->thread_mask);
98 #ifdef USE_THREAD
99 	if (root == &rqueue) {
100 		global_rqueue_size++;
101 		_HA_ATOMIC_OR(&t->state, TASK_GLOBAL);
102 		HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
103 	} else
104 #endif
105 	{
106 		int nb = ((void *)root - (void *)&task_per_thread[0].rqueue) / sizeof(task_per_thread[0]);
107 		task_per_thread[nb].rqueue_size++;
108 	}
109 #ifdef USE_THREAD
110 	/* If all threads that are supposed to handle this task are sleeping,
111 	 * wake one.
112 	 */
113 	if ((((t->thread_mask & all_threads_mask) & sleeping_thread_mask) ==
114 	     (t->thread_mask & all_threads_mask))) {
115 		unsigned long m = (t->thread_mask & all_threads_mask) &~ tid_bit;
116 
117 		m = (m & (m - 1)) ^ m; // keep lowest bit set
118 		_HA_ATOMIC_AND(&sleeping_thread_mask, ~m);
119 		wake_thread(my_ffsl(m) - 1);
120 	}
121 #endif
122 	return;
123 }
124 
125 /*
126  * __task_queue()
127  *
128  * Inserts a task into wait queue <wq> at the position given by its expiration
129  * date. It does not matter if the task was already in the wait queue or not,
130  * as it will be unlinked. The task must not have an infinite expiration timer.
131  * Last, tasks must not be queued further than the end of the tree, which is
132  * between <now_ms> and <now_ms> + 2^31 ms (now+24days in 32bit).
133  *
134  * This function should not be used directly, it is meant to be called by the
135  * inline version of task_queue() which performs a few cheap preliminary tests
136  * before deciding to call __task_queue(). Moreover this function doesn't care
137  * at all about locking so the caller must be careful when deciding whether to
138  * lock or not around this call.
139  */
__task_queue(struct task * task,struct eb_root * wq)140 void __task_queue(struct task *task, struct eb_root *wq)
141 {
142 	if (likely(task_in_wq(task)))
143 		__task_unlink_wq(task);
144 
145 	/* the task is not in the queue now */
146 	task->wq.key = task->expire;
147 #ifdef DEBUG_CHECK_INVALID_EXPIRATION_DATES
148 	if (tick_is_lt(task->wq.key, now_ms))
149 		/* we're queuing too far away or in the past (most likely) */
150 		return;
151 #endif
152 
153 	eb32_insert(wq, &task->wq);
154 }
155 
156 /*
157  * Extract all expired timers from the timer queue, and wakes up all
158  * associated tasks. Returns the date of next event (or eternity).
159  */
wake_expired_tasks()160 int wake_expired_tasks()
161 {
162 	struct task_per_thread * const tt = sched; // thread's tasks
163 	int max_processed = global.tune.runqueue_depth;
164 	struct task *task;
165 	struct eb32_node *eb;
166 	int ret = TICK_ETERNITY;
167 	__decl_hathreads(int key);
168 
169 	while (max_processed-- > 0) {
170   lookup_next_local:
171 		eb = eb32_lookup_ge(&tt->timers, now_ms - TIMER_LOOK_BACK);
172 		if (!eb) {
173 			/* we might have reached the end of the tree, typically because
174 			* <now_ms> is in the first half and we're first scanning the last
175 			* half. Let's loop back to the beginning of the tree now.
176 			*/
177 			eb = eb32_first(&tt->timers);
178 			if (likely(!eb))
179 				break;
180 		}
181 
182 		if (tick_is_lt(now_ms, eb->key)) {
183 			/* timer not expired yet, revisit it later */
184 			ret = eb->key;
185 			break;
186 		}
187 
188 		/* timer looks expired, detach it from the queue */
189 		task = eb32_entry(eb, struct task, wq);
190 		__task_unlink_wq(task);
191 
192 		/* It is possible that this task was left at an earlier place in the
193 		 * tree because a recent call to task_queue() has not moved it. This
194 		 * happens when the new expiration date is later than the old one.
195 		 * Since it is very unlikely that we reach a timeout anyway, it's a
196 		 * lot cheaper to proceed like this because we almost never update
197 		 * the tree. We may also find disabled expiration dates there. Since
198 		 * we have detached the task from the tree, we simply call task_queue
199 		 * to take care of this. Note that we might occasionally requeue it at
200 		 * the same place, before <eb>, so we have to check if this happens,
201 		 * and adjust <eb>, otherwise we may skip it which is not what we want.
202 		 * We may also not requeue the task (and not point eb at it) if its
203 		 * expiration time is not set.
204 		 */
205 		if (!tick_is_expired(task->expire, now_ms)) {
206 			if (tick_isset(task->expire))
207 				__task_queue(task, &tt->timers);
208 			goto lookup_next_local;
209 		}
210 		task_wakeup(task, TASK_WOKEN_TIMER);
211 	}
212 
213 #ifdef USE_THREAD
214 	if (eb_is_empty(&timers))
215 		goto leave;
216 
217 	HA_RWLOCK_RDLOCK(TASK_WQ_LOCK, &wq_lock);
218 	eb = eb32_lookup_ge(&timers, now_ms - TIMER_LOOK_BACK);
219 	if (!eb) {
220 		eb = eb32_first(&timers);
221 		if (likely(!eb)) {
222 			HA_RWLOCK_RDUNLOCK(TASK_WQ_LOCK, &wq_lock);
223 			goto leave;
224 		}
225 	}
226 	key = eb->key;
227 	HA_RWLOCK_RDUNLOCK(TASK_WQ_LOCK, &wq_lock);
228 
229 	if (tick_is_lt(now_ms, key)) {
230 		/* timer not expired yet, revisit it later */
231 		ret = tick_first(ret, key);
232 		goto leave;
233 	}
234 
235 	/* There's really something of interest here, let's visit the queue */
236 
237 	while (1) {
238 		HA_RWLOCK_WRLOCK(TASK_WQ_LOCK, &wq_lock);
239   lookup_next:
240 		if (max_processed-- <= 0)
241 			break;
242 		eb = eb32_lookup_ge(&timers, now_ms - TIMER_LOOK_BACK);
243 		if (!eb) {
244 			/* we might have reached the end of the tree, typically because
245 			* <now_ms> is in the first half and we're first scanning the last
246 			* half. Let's loop back to the beginning of the tree now.
247 			*/
248 			eb = eb32_first(&timers);
249 			if (likely(!eb))
250 				break;
251 		}
252 
253 		if (tick_is_lt(now_ms, eb->key)) {
254 			/* timer not expired yet, revisit it later */
255 			ret = tick_first(ret, eb->key);
256 			break;
257 		}
258 
259 		/* timer looks expired, detach it from the queue */
260 		task = eb32_entry(eb, struct task, wq);
261 		__task_unlink_wq(task);
262 
263 		/* It is possible that this task was left at an earlier place in the
264 		 * tree because a recent call to task_queue() has not moved it. This
265 		 * happens when the new expiration date is later than the old one.
266 		 * Since it is very unlikely that we reach a timeout anyway, it's a
267 		 * lot cheaper to proceed like this because we almost never update
268 		 * the tree. We may also find disabled expiration dates there. Since
269 		 * we have detached the task from the tree, we simply call task_queue
270 		 * to take care of this. Note that we might occasionally requeue it at
271 		 * the same place, before <eb>, so we have to check if this happens,
272 		 * and adjust <eb>, otherwise we may skip it which is not what we want.
273 		 * We may also not requeue the task (and not point eb at it) if its
274 		 * expiration time is not set.
275 		 */
276 		if (!tick_is_expired(task->expire, now_ms)) {
277 			if (tick_isset(task->expire))
278 				__task_queue(task, &timers);
279 			goto lookup_next;
280 		}
281 		task_wakeup(task, TASK_WOKEN_TIMER);
282 		HA_RWLOCK_WRUNLOCK(TASK_WQ_LOCK, &wq_lock);
283 	}
284 
285 	HA_RWLOCK_WRUNLOCK(TASK_WQ_LOCK, &wq_lock);
286 #endif
287 leave:
288 	return ret;
289 }
290 
291 /* The run queue is chronologically sorted in a tree. An insertion counter is
292  * used to assign a position to each task. This counter may be combined with
293  * other variables (eg: nice value) to set the final position in the tree. The
294  * counter may wrap without a problem, of course. We then limit the number of
295  * tasks processed to 200 in any case, so that general latency remains low and
296  * so that task positions have a chance to be considered. The function scans
297  * both the global and local run queues and picks the most urgent task between
298  * the two. We need to grab the global runqueue lock to touch it so it's taken
299  * on the very first access to the global run queue and is released as soon as
300  * it reaches the end.
301  *
302  * The function adjusts <next> if a new event is closer.
303  */
process_runnable_tasks()304 void process_runnable_tasks()
305 {
306 	struct task_per_thread * const tt = sched;
307 	struct eb32sc_node *lrq = NULL; // next local run queue entry
308 	struct eb32sc_node *grq = NULL; // next global run queue entry
309 	struct task *t;
310 	int max_processed;
311 	struct mt_list *tmp_list;
312 
313 	ti->flags &= ~TI_FL_STUCK; // this thread is still running
314 
315 	if (!thread_has_tasks()) {
316 		activity[tid].empty_rq++;
317 		return;
318 	}
319 	/* Merge the list of tasklets waken up by other threads to the
320 	 * main list.
321 	 */
322 	tmp_list = MT_LIST_BEHEAD(&sched->shared_tasklet_list);
323 	if (tmp_list)
324 		LIST_SPLICE_END_DETACHED(&sched->task_list, (struct list *)tmp_list);
325 
326 	tasks_run_queue_cur = tasks_run_queue; /* keep a copy for reporting */
327 	nb_tasks_cur = nb_tasks;
328 	max_processed = global.tune.runqueue_depth;
329 
330 	if (likely(niced_tasks))
331 		max_processed = (max_processed + 3) / 4;
332 
333 	/* Note: the grq lock is always held when grq is not null */
334 
335 	while (tt->task_list_size < max_processed) {
336 		if ((global_tasks_mask & tid_bit) && !grq) {
337 #ifdef USE_THREAD
338 			HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
339 			grq = eb32sc_lookup_ge(&rqueue, rqueue_ticks - TIMER_LOOK_BACK, tid_bit);
340 			if (unlikely(!grq)) {
341 				grq = eb32sc_first(&rqueue, tid_bit);
342 				if (!grq) {
343 					global_tasks_mask &= ~tid_bit;
344 					HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
345 				}
346 			}
347 #endif
348 		}
349 
350 		/* If a global task is available for this thread, it's in grq
351 		 * now and the global RQ is locked.
352 		 */
353 
354 		if (!lrq) {
355 			lrq = eb32sc_lookup_ge(&tt->rqueue, rqueue_ticks - TIMER_LOOK_BACK, tid_bit);
356 			if (unlikely(!lrq))
357 				lrq = eb32sc_first(&tt->rqueue, tid_bit);
358 		}
359 
360 		if (!lrq && !grq)
361 			break;
362 
363 		if (likely(!grq || (lrq && (int)(lrq->key - grq->key) <= 0))) {
364 			t = eb32sc_entry(lrq, struct task, rq);
365 			lrq = eb32sc_next(lrq, tid_bit);
366 			__task_unlink_rq(t);
367 		}
368 #ifdef USE_THREAD
369 		else {
370 			t = eb32sc_entry(grq, struct task, rq);
371 			grq = eb32sc_next(grq, tid_bit);
372 			__task_unlink_rq(t);
373 			if (unlikely(!grq)) {
374 				grq = eb32sc_first(&rqueue, tid_bit);
375 				if (!grq) {
376 					global_tasks_mask &= ~tid_bit;
377 					HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
378 				}
379 			}
380 		}
381 #endif
382 
383 		/* Make sure the entry doesn't appear to be in a list */
384 		LIST_INIT(&((struct tasklet *)t)->list);
385 		/* And add it to the local task list */
386 		tasklet_insert_into_tasklet_list((struct tasklet *)t);
387 		tt->task_list_size++;
388 		activity[tid].tasksw++;
389 	}
390 
391 	/* release the rqueue lock */
392 	if (grq) {
393 		HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
394 		grq = NULL;
395 	}
396 
397 	while (max_processed > 0 && !LIST_ISEMPTY(&tt->task_list)) {
398 		struct task *t;
399 		unsigned short state;
400 		void *ctx;
401 		struct task *(*process)(struct task *t, void *ctx, unsigned short state);
402 
403 		t = (struct task *)LIST_ELEM(task_per_thread[tid].task_list.n, struct tasklet *, list);
404 		state = (t->state & TASK_SHARED_WQ) | TASK_RUNNING;
405 		state = _HA_ATOMIC_XCHG(&t->state, state);
406 		__ha_barrier_atomic_store();
407 		__tasklet_remove_from_tasklet_list((struct tasklet *)t);
408 
409 		ti->flags &= ~TI_FL_STUCK; // this thread is still running
410 		activity[tid].ctxsw++;
411 		ctx = t->context;
412 		process = t->process;
413 		t->calls++;
414 
415 		if (TASK_IS_TASKLET(t)) {
416 			process(NULL, ctx, state);
417 			max_processed--;
418 			continue;
419 		}
420 
421 		/* OK then this is a regular task */
422 
423 		tt->task_list_size--;
424 		if (unlikely(t->call_date)) {
425 			uint64_t now_ns = now_mono_time();
426 
427 			t->lat_time += now_ns - t->call_date;
428 			t->call_date = now_ns;
429 		}
430 
431 		sched->current = t;
432 		__ha_barrier_store();
433 		if (likely(process == process_stream))
434 			t = process_stream(t, ctx, state);
435 		else if (process != NULL)
436 			t = process(t, ctx, state);
437 		else {
438 			__task_free(t);
439 			sched->current = NULL;
440 			__ha_barrier_store();
441 			/* We don't want max_processed to be decremented if
442 			 * we're just freeing a destroyed task, we should only
443 			 * do so if we really ran a task.
444 			 */
445 			continue;
446 		}
447 		sched->current = NULL;
448 		__ha_barrier_store();
449 		/* If there is a pending state  we have to wake up the task
450 		 * immediately, else we defer it into wait queue
451 		 */
452 		if (t != NULL) {
453 			if (unlikely(t->call_date)) {
454 				t->cpu_time += now_mono_time() - t->call_date;
455 				t->call_date = 0;
456 			}
457 
458 			state = _HA_ATOMIC_AND(&t->state, ~TASK_RUNNING);
459 			if (state & TASK_WOKEN_ANY)
460 				task_wakeup(t, 0);
461 			else
462 				task_queue(t);
463 		}
464 
465 		max_processed--;
466 	}
467 
468 	if (!LIST_ISEMPTY(&tt->task_list))
469 		activity[tid].long_rq++;
470 }
471 
472 /* create a work list array for <nbthread> threads, using tasks made of
473  * function <fct>. The context passed to the function will be the pointer to
474  * the thread's work list, which will contain a copy of argument <arg>. The
475  * wake up reason will be TASK_WOKEN_OTHER. The pointer to the work_list array
476  * is returned on success, otherwise NULL on failure.
477  */
work_list_create(int nbthread,struct task * (* fct)(struct task *,void *,unsigned short),void * arg)478 struct work_list *work_list_create(int nbthread,
479                                    struct task *(*fct)(struct task *, void *, unsigned short),
480                                    void *arg)
481 {
482 	struct work_list *wl;
483 	int i;
484 
485 	wl = calloc(nbthread, sizeof(*wl));
486 	if (!wl)
487 		goto fail;
488 
489 	for (i = 0; i < nbthread; i++) {
490 		MT_LIST_INIT(&wl[i].head);
491 		wl[i].task = task_new(1UL << i);
492 		if (!wl[i].task)
493 			goto fail;
494 		wl[i].task->process = fct;
495 		wl[i].task->context = &wl[i];
496 		wl[i].arg = arg;
497 	}
498 	return wl;
499 
500  fail:
501 	work_list_destroy(wl, nbthread);
502 	return NULL;
503 }
504 
505 /* destroy work list <work> */
work_list_destroy(struct work_list * work,int nbthread)506 void work_list_destroy(struct work_list *work, int nbthread)
507 {
508 	int t;
509 
510 	if (!work)
511 		return;
512 	for (t = 0; t < nbthread; t++)
513 		task_destroy(work[t].task);
514 	free(work);
515 }
516 
517 /*
518  * Delete every tasks before running the master polling loop
519  */
mworker_cleantasks()520 void mworker_cleantasks()
521 {
522 	struct task *t;
523 	int i;
524 	struct eb32_node *tmp_wq = NULL;
525 	struct eb32sc_node *tmp_rq = NULL;
526 
527 #ifdef USE_THREAD
528 	/* cleanup the global run queue */
529 	tmp_rq = eb32sc_first(&rqueue, MAX_THREADS_MASK);
530 	while (tmp_rq) {
531 		t = eb32sc_entry(tmp_rq, struct task, rq);
532 		tmp_rq = eb32sc_next(tmp_rq, MAX_THREADS_MASK);
533 		task_destroy(t);
534 	}
535 	/* cleanup the timers queue */
536 	tmp_wq = eb32_first(&timers);
537 	while (tmp_wq) {
538 		t = eb32_entry(tmp_wq, struct task, wq);
539 		tmp_wq = eb32_next(tmp_wq);
540 		task_destroy(t);
541 	}
542 #endif
543 	/* clean the per thread run queue */
544 	for (i = 0; i < global.nbthread; i++) {
545 		tmp_rq = eb32sc_first(&task_per_thread[i].rqueue, MAX_THREADS_MASK);
546 		while (tmp_rq) {
547 			t = eb32sc_entry(tmp_rq, struct task, rq);
548 			tmp_rq = eb32sc_next(tmp_rq, MAX_THREADS_MASK);
549 			task_destroy(t);
550 		}
551 		/* cleanup the per thread timers queue */
552 		tmp_wq = eb32_first(&task_per_thread[i].timers);
553 		while (tmp_wq) {
554 			t = eb32_entry(tmp_wq, struct task, wq);
555 			tmp_wq = eb32_next(tmp_wq);
556 			task_destroy(t);
557 		}
558 	}
559 }
560 
561 /* perform minimal intializations */
init_task()562 static void init_task()
563 {
564 	int i;
565 
566 #ifdef USE_THREAD
567 	memset(&timers, 0, sizeof(timers));
568 	memset(&rqueue, 0, sizeof(rqueue));
569 #endif
570 	memset(&task_per_thread, 0, sizeof(task_per_thread));
571 	for (i = 0; i < MAX_THREADS; i++) {
572 		LIST_INIT(&task_per_thread[i].task_list);
573 		MT_LIST_INIT(&task_per_thread[i].shared_tasklet_list);
574 	}
575 }
576 
577 INITCALL0(STG_PREPARE, init_task);
578 
579 /*
580  * Local variables:
581  *  c-indent-level: 8
582  *  c-basic-offset: 8
583  * End:
584  */
585