1 /*
2 * include/proto/task.h
3 * Functions for task management.
4 *
5 * Copyright (C) 2000-2010 Willy Tarreau - w@1wt.eu
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation, version 2.1
10 * exclusively.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #ifndef _PROTO_TASK_H
23 #define _PROTO_TASK_H
24
25
26 #include <sys/time.h>
27
28 #include <common/config.h>
29 #include <common/memory.h>
30 #include <common/mini-clist.h>
31 #include <common/standard.h>
32 #include <common/ticks.h>
33 #include <eb32tree.h>
34
35 #include <types/global.h>
36 #include <types/task.h>
37
38 /* Principle of the wait queue.
39 *
40 * We want to be able to tell whether an expiration date is before of after the
41 * current time <now>. We KNOW that expiration dates are never too far apart,
42 * because they are measured in ticks (milliseconds). We also know that almost
43 * all dates will be in the future, and that a very small part of them will be
44 * in the past, they are the ones which have expired since last time we checked
45 * them. Using ticks, we know if a date is in the future or in the past, but we
46 * cannot use that to store sorted information because that reference changes
47 * all the time.
48 *
49 * We'll use the fact that the time wraps to sort timers. Timers above <now>
50 * are in the future, timers below <now> are in the past. Here, "above" and
51 * "below" are to be considered modulo 2^31.
52 *
53 * Timers are stored sorted in an ebtree. We use the new ability for ebtrees to
54 * lookup values starting from X to only expire tasks between <now> - 2^31 and
55 * <now>. If the end of the tree is reached while walking over it, we simply
56 * loop back to the beginning. That way, we have no problem keeping sorted
57 * wrapping timers in a tree, between (now - 24 days) and (now + 24 days). The
58 * keys in the tree always reflect their real position, none can be infinite.
59 * This reduces the number of checks to be performed.
60 *
61 * Another nice optimisation is to allow a timer to stay at an old place in the
62 * queue as long as it's not further than the real expiration date. That way,
63 * we use the tree as a place holder for a minorant of the real expiration
64 * date. Since we have a very low chance of hitting a timeout anyway, we can
65 * bounce the nodes to their right place when we scan the tree if we encounter
66 * a misplaced node once in a while. This even allows us not to remove the
67 * infinite timers from the wait queue.
68 *
69 * So, to summarize, we have :
70 * - node->key always defines current position in the wait queue
71 * - timer is the real expiration date (possibly infinite)
72 * - node->key is always before or equal to timer
73 *
74 * The run queue works similarly to the wait queue except that the current date
75 * is replaced by an insertion counter which can also wrap without any problem.
76 */
77
78 /* The farthest we can look back in a timer tree */
79 #define TIMER_LOOK_BACK (1U << 31)
80
81 /* a few exported variables */
82 extern unsigned int nb_tasks; /* total number of tasks */
83 extern unsigned int tasks_run_queue; /* run queue size */
84 extern unsigned int tasks_run_queue_cur;
85 extern unsigned int nb_tasks_cur;
86 extern unsigned int niced_tasks; /* number of niced tasks in the run queue */
87 extern struct pool_head *pool2_task;
88 extern struct eb32_node *last_timer; /* optimization: last queued timer */
89 extern struct eb32_node *rq_next; /* optimization: next task except if delete/insert */
90
91 /* return 0 if task is in run queue, otherwise non-zero */
task_in_rq(struct task * t)92 static inline int task_in_rq(struct task *t)
93 {
94 return t->rq.node.leaf_p != NULL;
95 }
96
97 /* return 0 if task is in wait queue, otherwise non-zero */
task_in_wq(struct task * t)98 static inline int task_in_wq(struct task *t)
99 {
100 return t->wq.node.leaf_p != NULL;
101 }
102
103 /* puts the task <t> in run queue with reason flags <f>, and returns <t> */
104 struct task *__task_wakeup(struct task *t);
task_wakeup(struct task * t,unsigned int f)105 static inline struct task *task_wakeup(struct task *t, unsigned int f)
106 {
107 if (likely(!task_in_rq(t)))
108 __task_wakeup(t);
109 t->state |= f;
110 return t;
111 }
112
113 /*
114 * Unlink the task from the wait queue, and possibly update the last_timer
115 * pointer. A pointer to the task itself is returned. The task *must* already
116 * be in the wait queue before calling this function. If unsure, use the safer
117 * task_unlink_wq() function.
118 */
__task_unlink_wq(struct task * t)119 static inline struct task *__task_unlink_wq(struct task *t)
120 {
121 eb32_delete(&t->wq);
122 if (last_timer == &t->wq)
123 last_timer = NULL;
124 return t;
125 }
126
task_unlink_wq(struct task * t)127 static inline struct task *task_unlink_wq(struct task *t)
128 {
129 if (likely(task_in_wq(t)))
130 __task_unlink_wq(t);
131 return t;
132 }
133
134 /*
135 * Unlink the task from the run queue. The tasks_run_queue size and number of
136 * niced tasks are updated too. A pointer to the task itself is returned. The
137 * task *must* already be in the run queue before calling this function. If
138 * unsure, use the safer task_unlink_rq() function. Note that the pointer to the
139 * next run queue entry is neither checked nor updated.
140 */
__task_unlink_rq(struct task * t)141 static inline struct task *__task_unlink_rq(struct task *t)
142 {
143 eb32_delete(&t->rq);
144 tasks_run_queue--;
145 if (likely(t->nice))
146 niced_tasks--;
147 return t;
148 }
149
150 /* This function unlinks task <t> from the run queue if it is in it. It also
151 * takes care of updating the next run queue task if it was this task.
152 */
task_unlink_rq(struct task * t)153 static inline struct task *task_unlink_rq(struct task *t)
154 {
155 if (likely(task_in_rq(t))) {
156 if (&t->rq == rq_next)
157 rq_next = eb32_next(rq_next);
158 __task_unlink_rq(t);
159 }
160 return t;
161 }
162
163 /*
164 * Unlinks the task and adjusts run queue stats.
165 * A pointer to the task itself is returned.
166 */
task_delete(struct task * t)167 static inline struct task *task_delete(struct task *t)
168 {
169 task_unlink_wq(t);
170 task_unlink_rq(t);
171 return t;
172 }
173
174 /*
175 * Initialize a new task. The bare minimum is performed (queue pointers and
176 * state). The task is returned. This function should not be used outside of
177 * task_new().
178 */
task_init(struct task * t)179 static inline struct task *task_init(struct task *t)
180 {
181 t->wq.node.leaf_p = NULL;
182 t->rq.node.leaf_p = NULL;
183 t->state = TASK_SLEEPING;
184 t->nice = 0;
185 t->calls = 0;
186 return t;
187 }
188
189 /*
190 * Allocate and initialise a new task. The new task is returned, or NULL in
191 * case of lack of memory. The task count is incremented. Tasks should only
192 * be allocated this way, and must be freed using task_free().
193 */
task_new(void)194 static inline struct task *task_new(void)
195 {
196 struct task *t = pool_alloc2(pool2_task);
197 if (t) {
198 nb_tasks++;
199 task_init(t);
200 }
201 return t;
202 }
203
204 /*
205 * Free a task. Its context must have been freed since it will be lost.
206 * The task count is decremented.
207 */
task_free(struct task * t)208 static inline void task_free(struct task *t)
209 {
210 pool_free2(pool2_task, t);
211 if (unlikely(stopping))
212 pool_flush2(pool2_task);
213 nb_tasks--;
214 }
215
216 /* Place <task> into the wait queue, where it may already be. If the expiration
217 * timer is infinite, do nothing and rely on wake_expired_task to clean up.
218 */
219 void __task_queue(struct task *task);
task_queue(struct task * task)220 static inline void task_queue(struct task *task)
221 {
222 /* If we already have a place in the wait queue no later than the
223 * timeout we're trying to set, we'll stay there, because it is very
224 * unlikely that we will reach the timeout anyway. If the timeout
225 * has been disabled, it's useless to leave the queue as well. We'll
226 * rely on wake_expired_tasks() to catch the node and move it to the
227 * proper place should it ever happen. Finally we only add the task
228 * to the queue if it was not there or if it was further than what
229 * we want.
230 */
231 if (!tick_isset(task->expire))
232 return;
233
234 if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key))
235 __task_queue(task);
236 }
237
238 /* Ensure <task> will be woken up at most at <when>. If the task is already in
239 * the run queue (but not running), nothing is done. It may be used that way
240 * with a delay : task_schedule(task, tick_add(now_ms, delay));
241 */
task_schedule(struct task * task,int when)242 static inline void task_schedule(struct task *task, int when)
243 {
244 if (task_in_rq(task))
245 return;
246
247 if (task_in_wq(task))
248 when = tick_first(when, task->expire);
249
250 task->expire = when;
251 if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key))
252 __task_queue(task);
253 }
254
255 /* This function returns true is some notification are pending
256 */
notification_registered(struct list * wake)257 static inline int notification_registered(struct list *wake)
258 {
259 return !LIST_ISEMPTY(wake);
260 }
261
262 /*
263 * This does 3 things :
264 * - wake up all expired tasks
265 * - call all runnable tasks
266 * - return the date of next event in <next> or eternity.
267 */
268
269 void process_runnable_tasks();
270
271 /*
272 * Extract all expired timers from the timer queue, and wakes up all
273 * associated tasks. Returns the date of next event (or eternity).
274 */
275 int wake_expired_tasks();
276
277 /* Perform minimal initializations, report 0 in case of error, 1 if OK. */
278 int init_task();
279
280 #endif /* _PROTO_TASK_H */
281
282 /*
283 * Local variables:
284 * c-indent-level: 8
285 * c-basic-offset: 8
286 * End:
287 */
288