1 /* LGPL (v2.1 or any later version) - see LICENSE file for details */
2 #include <ccan/timer/timer.h>
3 #include <ccan/array_size/array_size.h>
4 #include <ccan/ilog/ilog.h>
5 #include <stdlib.h>
6 #include <stdio.h>
7
8 #define PER_LEVEL (1ULL << TIMER_LEVEL_BITS)
9
10 struct timer_level {
11 struct list_head list[PER_LEVEL];
12 };
13
timer_default_alloc(struct timers * timers,size_t len)14 static void *timer_default_alloc(struct timers *timers, size_t len)
15 {
16 return malloc(len);
17 }
18
timer_default_free(struct timers * timers,void * p)19 static void timer_default_free(struct timers *timers, void *p)
20 {
21 free(p);
22 }
23
24 static void *(*timer_alloc)(struct timers *, size_t) = timer_default_alloc;
25 static void (*timer_free)(struct timers *, void *) = timer_default_free;
26
timers_set_allocator(void * (* alloc)(struct timers *,size_t len),void (* free)(struct timers *,void * p))27 void timers_set_allocator(void *(*alloc)(struct timers *, size_t len),
28 void (*free)(struct timers *, void *p))
29 {
30 if (!alloc)
31 alloc = timer_default_alloc;
32 if (!free)
33 free = timer_default_free;
34 timer_alloc = alloc;
35 timer_free = free;
36 }
37
time_to_grains(struct timemono t)38 static uint64_t time_to_grains(struct timemono t)
39 {
40 return t.ts.tv_sec * ((uint64_t)1000000000 / TIMER_GRANULARITY)
41 + (t.ts.tv_nsec / TIMER_GRANULARITY);
42 }
43
grains_to_time(uint64_t grains)44 static struct timemono grains_to_time(uint64_t grains)
45 {
46 struct timemono t;
47
48 t.ts.tv_sec = grains / (1000000000 / TIMER_GRANULARITY);
49 t.ts.tv_nsec = (grains % (1000000000 / TIMER_GRANULARITY))
50 * TIMER_GRANULARITY;
51 return t;
52 }
53
timers_init(struct timers * timers,struct timemono start)54 void timers_init(struct timers *timers, struct timemono start)
55 {
56 unsigned int i;
57
58 list_head_init(&timers->far);
59 timers->base = time_to_grains(start);
60 timers->first = -1ULL;
61 memset(timers->firsts, 0xFF, sizeof(timers->firsts));
62 for (i = 0; i < ARRAY_SIZE(timers->level); i++)
63 timers->level[i] = NULL;
64 }
65
level_of(const struct timers * timers,uint64_t time)66 static unsigned int level_of(const struct timers *timers, uint64_t time)
67 {
68 uint64_t diff;
69
70 /* Level depends how far away it is. */
71 diff = time - timers->base;
72 return ilog64(diff / 2) / TIMER_LEVEL_BITS;
73 }
74
timer_add_raw(struct timers * timers,struct timer * t)75 static void timer_add_raw(struct timers *timers, struct timer *t)
76 {
77 struct list_head *l;
78 unsigned int level = level_of(timers, t->time);
79 uint64_t *first;
80
81 if (!timers->level[level]) {
82 l = &timers->far;
83 first = &timers->firsts[ARRAY_SIZE(timers->level)];
84 } else {
85 int off = (t->time >> (level*TIMER_LEVEL_BITS)) & (PER_LEVEL-1);
86 l = &timers->level[level]->list[off];
87 first = &timers->firsts[level];
88 }
89
90 list_add_tail(l, &t->list);
91 if (t->time < *first)
92 *first = t->time;
93 }
94
timer_init(struct timer * t)95 void timer_init(struct timer *t)
96 {
97 list_node_init(&t->list);
98 }
99
list_node_initted(const struct list_node * n)100 static inline bool list_node_initted(const struct list_node *n)
101 {
102 return n->prev == n;
103 }
104
timer_addrel(struct timers * timers,struct timer * t,struct timerel rel)105 void timer_addrel(struct timers *timers, struct timer *t, struct timerel rel)
106 {
107 assert(list_node_initted(&t->list));
108
109 t->time = time_to_grains(timemono_add(time_mono(), rel));
110
111 /* Added in the past? Treat it as imminent. */
112 if (t->time < timers->base)
113 t->time = timers->base;
114
115 if (t->time < timers->first)
116 timers->first = t->time;
117
118 timer_add_raw(timers, t);
119 }
120
timer_addmono(struct timers * timers,struct timer * t,struct timemono when)121 void timer_addmono(struct timers *timers, struct timer *t, struct timemono when)
122 {
123 assert(list_node_initted(&t->list));
124
125 t->time = time_to_grains(when);
126
127 /* Added in the past? Treat it as imminent. */
128 if (t->time < timers->base)
129 t->time = timers->base;
130 if (t->time < timers->first)
131 timers->first = t->time;
132
133 timer_add_raw(timers, t);
134 }
135
136 /* FIXME: inline */
timer_del(struct timers * timers UNNEEDED,struct timer * t)137 void timer_del(struct timers *timers UNNEEDED, struct timer *t)
138 {
139 list_del_init(&t->list);
140 }
141
timers_far_get(struct timers * timers,struct list_head * list,uint64_t when)142 static void timers_far_get(struct timers *timers,
143 struct list_head *list,
144 uint64_t when)
145 {
146 struct timer *i, *next;
147
148 list_for_each_safe(&timers->far, i, next, list) {
149 if (i->time <= when) {
150 list_del_from(&timers->far, &i->list);
151 list_add_tail(list, &i->list);
152 }
153 }
154 }
155
add_level(struct timers * timers,unsigned int level)156 static void add_level(struct timers *timers, unsigned int level)
157 {
158 struct timer_level *l;
159 struct timer *t;
160 unsigned int i;
161 struct list_head from_far;
162
163 l = timer_alloc(timers, sizeof(*l));
164 if (!l)
165 return;
166
167 for (i = 0; i < ARRAY_SIZE(l->list); i++)
168 list_head_init(&l->list[i]);
169 timers->level[level] = l;
170
171 list_head_init(&from_far);
172 timers_far_get(timers, &from_far,
173 timers->base + (1ULL << ((level+1)*TIMER_LEVEL_BITS)) - 1);
174
175 while ((t = list_pop(&from_far, struct timer, list)) != NULL)
176 timer_add_raw(timers, t);
177 }
178
179 /* We don't need to search past the first at level 0, since the
180 * bucket range is 1; they're all the same. */
find_first(const struct list_head * list,unsigned int level,const struct timer * prev)181 static const struct timer *find_first(const struct list_head *list,
182 unsigned int level,
183 const struct timer *prev)
184 {
185 struct timer *t;
186
187 list_for_each(list, t, list) {
188 if (!prev || t->time < prev->time)
189 prev = t;
190 if (level == 0)
191 break;
192 }
193 return prev;
194 }
195
196 /* Update level's first watermark, and return overall first. */
first_for_level(struct timers * timers,size_t level,const struct timer * level_first,const struct timer * first)197 static const struct timer *first_for_level(struct timers *timers,
198 size_t level,
199 const struct timer *level_first,
200 const struct timer *first)
201 {
202 if (level_first) {
203 timers->firsts[level] = level_first->time;
204 if (!first || level_first->time < first->time)
205 first = level_first;
206 } else {
207 timers->firsts[level] = -1ULL;
208 }
209 return first;
210 }
211
level_may_beat(const struct timers * timers,size_t level,const struct timer * first)212 static bool level_may_beat(const struct timers *timers, size_t level,
213 const struct timer *first)
214 {
215 return !first || timers->firsts[level] < first->time;
216 }
217
218 /* FIXME: Suboptimal */
brute_force_first(struct timers * timers)219 static const struct timer *brute_force_first(struct timers *timers)
220 {
221 unsigned int l, i;
222 const struct timer *found = NULL;
223
224 for (l = 0; l < ARRAY_SIZE(timers->level) && timers->level[l]; l++) {
225 const struct timer *t = NULL;
226
227 /* Do we know they don't have a better one? */
228 if (!level_may_beat(timers, l, found))
229 continue;
230
231 /* Find first timer on this level. */
232 for (i = 0; i < PER_LEVEL; i++)
233 t = find_first(&timers->level[l]->list[i], l, t);
234
235 found = first_for_level(timers, l, t, found);
236 }
237
238 /* Check (and update) far list if there's a chance. */
239 l = ARRAY_SIZE(timers->level);
240 if (level_may_beat(timers, l, found)) {
241 const struct timer *t = find_first(&timers->far, l, NULL);
242 found = first_for_level(timers, l, t, found);
243 }
244
245 return found;
246 }
247
get_first(struct timers * timers)248 static const struct timer *get_first(struct timers *timers)
249 {
250 /* We can have just far timers, for example. */
251 if (timers->level[0]) {
252 /* First search rest of lower buckets; we've already spilled
253 * so if we find one there we don't need to search further. */
254 unsigned int i, off = timers->base % PER_LEVEL;
255
256 for (i = off; i < PER_LEVEL; i++) {
257 struct list_head *h = &timers->level[0]->list[i];
258 if (!list_empty(h))
259 return find_first(h, 0, NULL);
260 }
261 }
262
263 /* From here on, we're searching non-normalized parts of the
264 * data structure, which is much subtler.
265 *
266 * So we brute force. */
267 return brute_force_first(timers);
268 }
269
update_first(struct timers * timers)270 static bool update_first(struct timers *timers)
271 {
272 const struct timer *found = get_first(timers);
273
274 if (!found) {
275 timers->first = -1ULL;
276 return false;
277 }
278
279 timers->first = found->time;
280 return true;
281 }
282
timer_earliest(struct timers * timers,struct timemono * first)283 bool timer_earliest(struct timers *timers, struct timemono *first)
284 {
285 if (!update_first(timers))
286 return false;
287
288 *first = grains_to_time(timers->first);
289 return true;
290 }
291
292 /* Assume no timers before 'time', cascade down and update base time. */
timer_fast_forward(struct timers * timers,uint64_t time)293 static void timer_fast_forward(struct timers *timers, uint64_t time)
294 {
295 unsigned int level, changed;
296 int need_level = -1;
297 struct list_head list;
298 struct timer *i;
299
300 /* How many bits changed between base and time?
301 * Each time we wrap, we need to empty buckets from above. */
302 if (time == timers->base)
303 return;
304
305 changed = ilog64_nz(time ^ timers->base);
306 level = (changed - 1) / TIMER_LEVEL_BITS;
307
308 /* Buckets always empty downwards, so we could cascade manually,
309 * but it's rarely very many so we just remove and re-add */
310 list_head_init(&list);
311
312 do {
313 if (!timers->level[level]) {
314 /* We need any which belong on this level. */
315 timers_far_get(timers, &list,
316 timers->base
317 + (1ULL << ((level+1)*TIMER_LEVEL_BITS))-1);
318 need_level = level;
319 } else {
320 unsigned src;
321
322 /* Get all timers from this bucket. */
323 src = (time >> (level * TIMER_LEVEL_BITS)) % PER_LEVEL;
324 list_append_list(&list,
325 &timers->level[level]->list[src]);
326 }
327 } while (level--);
328
329 /* Did we hit the last level? If so, add. */
330 if (need_level != -1)
331 add_level(timers, need_level);
332
333 /* Fast-forward the time, and re-add everyone. */
334 timers->base = time;
335 while ((i = list_pop(&list, struct timer, list)) != NULL)
336 timer_add_raw(timers, i);
337 }
338
339 /* Returns an expired timer. */
timers_expire(struct timers * timers,struct timemono expire)340 struct timer *timers_expire(struct timers *timers, struct timemono expire)
341 {
342 uint64_t now = time_to_grains(expire);
343 unsigned int off;
344 struct timer *t;
345
346 /* This can happen without TIME_HAVE_MONOTONIC, but I also have
347 * a report of OpenBSD 6.8 under virtualbox doing this. */
348 if (now < timers->base) {
349 return NULL;
350 }
351
352 if (!timers->level[0]) {
353 if (list_empty(&timers->far))
354 return NULL;
355 add_level(timers, 0);
356 }
357
358 do {
359 if (timers->first > now) {
360 timer_fast_forward(timers, now);
361 return NULL;
362 }
363
364 timer_fast_forward(timers, timers->first);
365 off = timers->base % PER_LEVEL;
366
367 /* This *may* be NULL, if we deleted the first timer */
368 t = list_pop(&timers->level[0]->list[off], struct timer, list);
369 if (t)
370 list_node_init(&t->list);
371 } while (!t && update_first(timers));
372
373 return t;
374 }
375
timer_list_check(const struct list_head * l,uint64_t min,uint64_t max,uint64_t first,const char * abortstr)376 static bool timer_list_check(const struct list_head *l,
377 uint64_t min, uint64_t max, uint64_t first,
378 const char *abortstr)
379 {
380 const struct timer *t;
381
382 if (!list_check(l, abortstr))
383 return false;
384
385 list_for_each(l, t, list) {
386 if (t->time < min || t->time > max) {
387 if (abortstr) {
388 fprintf(stderr,
389 "%s: timer %p %llu not %llu-%llu\n",
390 abortstr, t, (long long)t->time,
391 (long long)min, (long long)max);
392 abort();
393 }
394 return false;
395 }
396 if (t->time < first) {
397 if (abortstr) {
398 fprintf(stderr,
399 "%s: timer %p %llu < minimum %llu\n",
400 abortstr, t, (long long)t->time,
401 (long long)first);
402 abort();
403 }
404 return false;
405 }
406 }
407 return true;
408 }
409
timers_check(const struct timers * timers,const char * abortstr)410 struct timers *timers_check(const struct timers *timers, const char *abortstr)
411 {
412 unsigned int l, i, off;
413 uint64_t base;
414
415 l = 0;
416 if (!timers->level[0])
417 goto past_levels;
418
419 /* First level is simple. */
420 off = timers->base % PER_LEVEL;
421 for (i = 0; i < PER_LEVEL; i++) {
422 struct list_head *h;
423
424 h = &timers->level[l]->list[(i+off) % PER_LEVEL];
425 if (!timer_list_check(h, timers->base + i, timers->base + i,
426 timers->firsts[l], abortstr))
427 return NULL;
428 }
429
430 /* For other levels, "current" bucket has been emptied, and may contain
431 * entries for the current + level_size bucket. */
432 for (l = 1; l < ARRAY_SIZE(timers->level) && timers->level[l]; l++) {
433 uint64_t per_bucket = 1ULL << (TIMER_LEVEL_BITS * l);
434
435 off = ((timers->base >> (l*TIMER_LEVEL_BITS)) % PER_LEVEL);
436 /* We start at *next* bucket. */
437 base = (timers->base & ~(per_bucket - 1)) + per_bucket;
438
439 for (i = 1; i <= PER_LEVEL; i++) {
440 struct list_head *h;
441
442 h = &timers->level[l]->list[(i+off) % PER_LEVEL];
443 if (!timer_list_check(h, base, base + per_bucket - 1,
444 timers->firsts[l], abortstr))
445 return NULL;
446 base += per_bucket;
447 }
448 }
449
450 past_levels:
451 base = (timers->base & ~((1ULL << (TIMER_LEVEL_BITS * l)) - 1))
452 + (1ULL << (TIMER_LEVEL_BITS * l)) - 1;
453 if (!timer_list_check(&timers->far, base, -1ULL,
454 timers->firsts[ARRAY_SIZE(timers->level)],
455 abortstr))
456 return NULL;
457
458 return (struct timers *)timers;
459 }
460
461 #ifdef CCAN_TIMER_DEBUG
dump_bucket_stats(FILE * fp,const struct list_head * h)462 static void dump_bucket_stats(FILE *fp, const struct list_head *h)
463 {
464 unsigned long long min, max, num;
465 struct timer *t;
466
467 if (list_empty(h)) {
468 printf("\n");
469 return;
470 }
471
472 min = -1ULL;
473 max = 0;
474 num = 0;
475 list_for_each(h, t, list) {
476 if (t->time < min)
477 min = t->time;
478 if (t->time > max)
479 max = t->time;
480 num++;
481 }
482 fprintf(fp, " %llu (%llu-%llu)\n",
483 num, min, max);
484 }
485
timers_dump(const struct timers * timers,FILE * fp)486 void timers_dump(const struct timers *timers, FILE *fp)
487 {
488 unsigned int l, i, off;
489 unsigned long long base;
490
491 if (!fp)
492 fp = stderr;
493
494 fprintf(fp, "Base: %llu\n", (unsigned long long)timers->base);
495
496 if (!timers->level[0])
497 goto past_levels;
498
499 fprintf(fp, "Level 0:\n");
500
501 /* First level is simple. */
502 off = timers->base % PER_LEVEL;
503 for (i = 0; i < PER_LEVEL; i++) {
504 const struct list_head *h;
505
506 fprintf(fp, " Bucket %llu (%lu):",
507 (i+off) % PER_LEVEL, timers->base + i);
508 h = &timers->level[0]->list[(i+off) % PER_LEVEL];
509 dump_bucket_stats(fp, h);
510 }
511
512 /* For other levels, "current" bucket has been emptied, and may contain
513 * entries for the current + level_size bucket. */
514 for (l = 1; l < ARRAY_SIZE(timers->level) && timers->level[l]; l++) {
515 uint64_t per_bucket = 1ULL << (TIMER_LEVEL_BITS * l);
516
517 off = ((timers->base >> (l*TIMER_LEVEL_BITS)) % PER_LEVEL);
518 /* We start at *next* bucket. */
519 base = (timers->base & ~(per_bucket - 1)) + per_bucket;
520
521 fprintf(fp, "Level %u:\n", l);
522 for (i = 1; i <= PER_LEVEL; i++) {
523 const struct list_head *h;
524
525 fprintf(fp, " Bucket %llu (%llu - %llu):",
526 (i+off) % PER_LEVEL,
527 base, base + per_bucket - 1);
528
529 h = &timers->level[l]->list[(i+off) % PER_LEVEL];
530 dump_bucket_stats(fp, h);
531 base += per_bucket;
532 }
533 }
534
535 past_levels:
536 if (!list_empty(&timers->far)) {
537 fprintf(fp, "Far timers:");
538 dump_bucket_stats(fp, &timers->far);
539 }
540 }
541 #endif
542
timers_cleanup(struct timers * timers)543 void timers_cleanup(struct timers *timers)
544 {
545 unsigned int l;
546
547 for (l = 0; l < ARRAY_SIZE(timers->level); l++)
548 timer_free(timers, timers->level[l]);
549 }
550