1 /*
2 * %CopyrightBegin%
3 *
4 * Copyright Ericsson AB 2015-2018. All Rights Reserved.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * %CopyrightEnd%
19 */
20
21 /*
22 * Description: High level timers implementing BIF timers
23 * as well as process and port timers.
24 *
25 * Author: Rickard Green
26 */
27
28 #ifdef HAVE_CONFIG_H
29 # include "config.h"
30 #endif
31
32 #include "sys.h"
33 #include "global.h"
34 #include "bif.h"
35 #include "erl_bif_unique.h"
36 #define ERTS_WANT_TIMER_WHEEL_API
37 #include "erl_time.h"
38 #include "erl_hl_timer.h"
39 #include "erl_proc_sig_queue.h"
40
41 #define ERTS_TMR_CHECK_CANCEL_ON_CREATE 0
42
43 #if 0
44 # define ERTS_HLT_HARD_DEBUG
45 #endif
46 #if 0
47 # define ERTS_HLT_DEBUG
48 #endif
49
50 #if defined(ERTS_HLT_HARD_DEBUG) || defined(DEBUG)
51 # if defined(ERTS_HLT_HARD_DEBUG)
52 # undef ERTS_RBT_HARD_DEBUG
53 # define ERTS_RBT_HARD_DEBUG 1
54 # endif
55 # ifndef ERTS_HLT_DEBUG
56 # define ERTS_HLT_DEBUG 1
57 # endif
58 #endif
59
60 #undef ERTS_HLT_ASSERT
61 #if defined(ERTS_HLT_DEBUG)
62 # define ERTS_HLT_ASSERT(E) ERTS_ASSERT(E)
63 # undef ERTS_RBT_DEBUG
64 # define ERTS_RBT_DEBUG
65 #else
66 # define ERTS_HLT_ASSERT(E) ((void) 1)
67 #endif
68
69 #if defined(ERTS_HLT_HARD_DEBUG) && defined(__GNUC__)
70 #warning "* * * * * * * * * * * * * * * * * *"
71 #warning "* ERTS_HLT_HARD_DEBUG IS ENABLED! *"
72 #warning "* * * * * * * * * * * * * * * * * *"
73 #endif
74
75 #ifdef ERTS_HLT_HARD_DEBUG
76 # define ERTS_HLT_HDBG_CHK_SRV(SRV) hdbg_chk_srv((SRV))
77 static void hdbg_chk_srv(ErtsHLTimerService *srv);
78 #else
79 # define ERTS_HLT_HDBG_CHK_SRV(SRV) ((void) 1)
80 #endif
81
82 #if ERTS_REF_NUMBERS != 3
83 #error "ERTS_REF_NUMBERS changed. Update me..."
84 #endif
85
86 typedef enum {
87 ERTS_TMR_BIF,
88 ERTS_TMR_PROC,
89 ERTS_TMR_PORT,
90 ERTS_TMR_CALLBACK
91 } ErtsTmrType;
92
93 #define ERTS_BIF_TIMER_SHORT_TIME 5000
94
95 /* Bit 0 to 10 contains scheduler id (see mask below) */
96 #define ERTS_TMR_ROFLG_HLT (((Uint32) 1) << 11)
97 #define ERTS_TMR_ROFLG_BIF_TMR (((Uint32) 1) << 12)
98 #define ERTS_TMR_ROFLG_PRE_ALC (((Uint32) 1) << 13)
99 #define ERTS_TMR_ROFLG_REG_NAME (((Uint32) 1) << 14)
100 #define ERTS_TMR_ROFLG_PROC (((Uint32) 1) << 15)
101 #define ERTS_TMR_ROFLG_PORT (((Uint32) 1) << 16)
102 #define ERTS_TMR_ROFLG_CALLBACK (((Uint32) 1) << 17)
103
104 #define ERTS_TMR_ROFLG_SID_MASK \
105 (ERTS_TMR_ROFLG_HLT - (Uint32) 1)
106
107 #define ERTS_TMR_STATE_ACTIVE ((erts_aint32_t) 0)
108 #define ERTS_TMR_STATE_CANCELED ((erts_aint32_t) 1)
109 #define ERTS_TMR_STATE_TIMED_OUT ((erts_aint32_t) 2)
110
111 typedef struct ErtsHLTimer_ ErtsHLTimer;
112
113 #define ERTS_HLT_PFLG_RED (((UWord) 1) << 0)
114 #define ERTS_HLT_PFLG_SAME_TIME (((UWord) 1) << 1)
115
116 #define ERTS_HLT_PFLGS_MASK \
117 (ERTS_HLT_PFLG_RED|ERTS_HLT_PFLG_SAME_TIME)
118
119 #define ERTS_HLT_PFIELD_NOT_IN_TABLE (~((UWord) 0))
120
121 typedef struct ErtsBifTimer_ ErtsBifTimer;
122
123 typedef struct {
124 ErtsBifTimer *next;
125 ErtsBifTimer *prev;
126 } ErtsBifTimerList;
127
128 typedef struct {
129 UWord parent; /* parent pointer and flags... */
130 union {
131 struct {
132 ErtsHLTimer *right;
133 ErtsHLTimer *left;
134 } t;
135 struct {
136 ErtsHLTimer *prev;
137 ErtsHLTimer *next;
138 } l;
139 } u;
140 ErtsHLTimer *same_time;
141 } ErtsHLTimerTimeTree;
142
143 typedef struct {
144 UWord parent; /* parent pointer and flags... */
145 ErtsBifTimer *right;
146 ErtsBifTimer *left;
147 } ErtsBifTimerTree;
148
149 typedef struct {
150 Uint32 roflgs;
151 erts_atomic32_t refc;
152 union {
153 void *arg;
154 erts_atomic_t next;
155 } u;
156 union {
157 Process *proc;
158 Port *port;
159 Eterm name;
160 void (*callback)(void *);
161 } receiver;
162 } ErtsTmrHead;
163
164 struct ErtsHLTimer_ {
165 ErtsTmrHead head; /* NEED to be first! */
166 ErtsMonotonicTime timeout;
167 union {
168 ErtsThrPrgrLaterOp cleanup;
169 ErtsHLTimerTimeTree tree;
170 } time;
171
172 #ifdef ERTS_HLT_HARD_DEBUG
173 int pending_timeout;
174 #endif
175 };
176
177 typedef struct {
178 ErtsTmrHead head; /* NEED to be first! */
179 union {
180 ErtsTWheelTimer tw_tmr;
181 ErtsThrPrgrLaterOp cleanup;
182 } u;
183 } ErtsTWTimer;
184
185 struct ErtsBifTimer_ {
186 union {
187 ErtsTmrHead head;
188 ErtsHLTimer hlt;
189 ErtsTWTimer twt;
190 } type;
191 struct {
192 erts_atomic32_t state;
193 Uint32 refn[ERTS_REF_NUMBERS];
194 ErtsBifTimerTree proc_tree;
195 ErtsBifTimerTree tree;
196 Eterm message;
197 ErlHeapFragment *bp;
198 } btm;
199 };
200
201 typedef union {
202 ErtsTmrHead head;
203 ErtsHLTimer hlt;
204 ErtsTWTimer twt;
205 ErtsBifTimer btm;
206 } ErtsTimer;
207
208 typedef ErtsTimer *(*ErtsCreateTimerFunc)(ErtsSchedulerData *esdp,
209 ErtsMonotonicTime timeout_pos,
210 int short_time, ErtsTmrType type,
211 void *rcvrp, Eterm rcvr,
212 Eterm msg,
213 Uint32 *refn,
214 void (*callback)(void *), void *arg);
215
216 #ifdef SMALL_MEMORY
217 #define BIF_TIMER_PREALC_SZ 10
218 #define PTIMER_PREALC_SZ 10
219 #else
220 #define BIF_TIMER_PREALC_SZ 100
221 #define PTIMER_PREALC_SZ 100
222 #endif
223
224 ERTS_SCHED_PREF_PALLOC_IMPL(bif_timer_pre,
225 ErtsBifTimer,
226 BIF_TIMER_PREALC_SZ)
227
228 ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(tw_timer,
229 ErtsTWTimer,
230 PTIMER_PREALC_SZ,
231 ERTS_ALC_T_LL_PTIMER)
232
233 #ifdef ERTS_HLT_DEBUG
234 #define ERTS_TMR_TIMEOUT_YIELD_LIMIT 5
235 #else
236 #define ERTS_TMR_TIMEOUT_YIELD_LIMIT 100
237 #endif
238 #define ERTS_TMR_CANCELED_TIMER_LIMIT 100
239 #define ERTS_TMR_CANCELED_TIMER_SMALL_LIMIT 5
240
241 #define ERTS_TMR_TIMEOUT_YIELD_STATE_T same_time_list_yield_state_t
242 #define ERTS_TMR_YIELDING_TIMEOUT_STATE_INITER {NULL, {0}}
243 typedef struct {
244 int dummy;
245 } ERTS_TMR_TIMEOUT_YIELD_STATE_T;
246
247 typedef struct {
248 ErtsTmrHead marker;
249 erts_atomic_t last;
250 } ErtsHLTCncldTmrQTail;
251
252
253 typedef struct {
254 /*
255 * This structure needs to be cache line aligned for best
256 * performance.
257 */
258 union {
259 /*
260 * Modified by threads returning canceled
261 * timers to this timer service.
262 */
263 ErtsHLTCncldTmrQTail data;
264 char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(
265 sizeof(ErtsHLTCncldTmrQTail))];
266 } tail;
267 /*
268 * Everything below this point is *only* accessed by the
269 * thread managing this timer service.
270 */
271 struct {
272 ErtsTimer *first;
273 ErtsTimer *unref_end;
274 struct {
275 ErtsThrPrgrVal thr_progress;
276 int thr_progress_reached;
277 ErtsTimer *unref_end;
278 } next;
279 int used_marker;
280 } head;
281 } ErtsHLTCncldTmrQ;
282
283
284 typedef struct {
285 ErtsHLTimer *root;
286 ERTS_TMR_TIMEOUT_YIELD_STATE_T state;
287 } ErtsYieldingTimeoutState;
288
289 struct ErtsHLTimerService_ {
290 ErtsHLTCncldTmrQ canceled_queue;
291 ErtsHLTimer *time_tree;
292 ErtsBifTimer *btm_tree;
293 ErtsHLTimer *next_timeout;
294 ErtsYieldingTimeoutState yield;
295 ErtsTWheelTimer service_timer;
296 };
297
298 static ERTS_INLINE int
refn_is_lt(Uint32 * x,Uint32 * y)299 refn_is_lt(Uint32 *x, Uint32 *y)
300 {
301 /* !0 if x < y */
302 if (x[2] < y[2])
303 return 1;
304 if (x[2] != y[2])
305 return 0;
306 if (x[1] < y[1])
307 return 1;
308 if (x[1] != y[1])
309 return 0;
310 return x[0] < y[0];
311 }
312
313 static ERTS_INLINE int
refn_is_eq(Uint32 * x,Uint32 * y)314 refn_is_eq(Uint32 *x, Uint32 *y)
315 {
316 return (x[0] == y[0]) & (x[1] == y[1]) & (x[2] == y[2]);
317 }
318
319 #define ERTS_RBT_PREFIX time
320 #define ERTS_RBT_T ErtsHLTimer
321 #define ERTS_RBT_KEY_T ErtsMonotonicTime
322 #define ERTS_RBT_FLAGS_T UWord
323 #define ERTS_RBT_INIT_EMPTY_TNODE(T) \
324 do { \
325 (T)->time.tree.parent = (UWord) NULL; \
326 (T)->time.tree.u.t.right = NULL; \
327 (T)->time.tree.u.t.left = NULL; \
328 } while (0)
329 #define ERTS_RBT_IS_RED(T) \
330 ((int) ((T)->time.tree.parent & ERTS_HLT_PFLG_RED))
331 #define ERTS_RBT_SET_RED(T) \
332 ((T)->time.tree.parent |= ERTS_HLT_PFLG_RED)
333 #define ERTS_RBT_IS_BLACK(T) \
334 (!ERTS_RBT_IS_RED((T)))
335 #define ERTS_RBT_SET_BLACK(T) \
336 ((T)->time.tree.parent &= ~ERTS_HLT_PFLG_RED)
337 #define ERTS_RBT_GET_FLAGS(T) \
338 ((T)->time.tree.parent & ERTS_HLT_PFLGS_MASK)
339 #define ERTS_RBT_SET_FLAGS(T, F) \
340 do { \
341 ERTS_HLT_ASSERT((((UWord) (F)) & ~ERTS_HLT_PFLGS_MASK) == 0); \
342 (T)->time.tree.parent &= ~ERTS_HLT_PFLGS_MASK; \
343 (T)->time.tree.parent |= (F); \
344 } while (0)
345 #define ERTS_RBT_GET_PARENT(T) \
346 ((ErtsHLTimer *) ((T)->time.tree.parent & ~ERTS_HLT_PFLGS_MASK))
347 #define ERTS_RBT_SET_PARENT(T, P) \
348 do { \
349 ERTS_HLT_ASSERT((((UWord) (P)) & ERTS_HLT_PFLGS_MASK) == 0); \
350 (T)->time.tree.parent &= ERTS_HLT_PFLGS_MASK; \
351 (T)->time.tree.parent |= (UWord) (P); \
352 } while (0)
353 #define ERTS_RBT_GET_RIGHT(T) ((T)->time.tree.u.t.right)
354 #define ERTS_RBT_SET_RIGHT(T, R) ((T)->time.tree.u.t.right = (R))
355 #define ERTS_RBT_GET_LEFT(T) ((T)->time.tree.u.t.left)
356 #define ERTS_RBT_SET_LEFT(T, L) ((T)->time.tree.u.t.left = (L))
357 #define ERTS_RBT_GET_KEY(T) ((T)->timeout)
358 #define ERTS_RBT_IS_LT(KX, KY) ((KX) < (KY))
359 #define ERTS_RBT_IS_EQ(KX, KY) ((KX) == (KY))
360 #define ERTS_RBT_WANT_DELETE
361 #define ERTS_RBT_WANT_SMALLEST
362 #define ERTS_RBT_WANT_LOOKUP_INSERT
363 #define ERTS_RBT_WANT_REPLACE
364 #define ERTS_RBT_WANT_FOREACH
365 #ifdef ERTS_HLT_HARD_DEBUG
366 # define ERTS_RBT_WANT_LOOKUP
367 #endif
368 #define ERTS_RBT_UNDEF
369
370 #include "erl_rbtree.h"
371
372 /* Use circular list for timers at same time */
373
374 static ERTS_INLINE void
same_time_list_insert(ErtsHLTimer ** root,ErtsHLTimer * tmr)375 same_time_list_insert(ErtsHLTimer **root, ErtsHLTimer *tmr)
376 {
377 ErtsHLTimer *first = *root;
378 if (!first) {
379 ERTS_HLT_ASSERT((((UWord) root) & ERTS_HLT_PFLG_SAME_TIME) == 0);
380 tmr->time.tree.parent = ((UWord) root) | ERTS_HLT_PFLG_SAME_TIME;
381 tmr->time.tree.u.l.next = tmr;
382 tmr->time.tree.u.l.prev = tmr;
383 *root = tmr;
384 }
385 else {
386 tmr->time.tree.parent = ERTS_HLT_PFLG_SAME_TIME;
387 tmr->time.tree.u.l.next = first;
388 tmr->time.tree.u.l.prev = first->time.tree.u.l.prev;
389 first->time.tree.u.l.prev = tmr;
390 tmr->time.tree.u.l.prev->time.tree.u.l.next = tmr;
391 }
392 }
393
394 static ERTS_INLINE void
same_time_list_delete(ErtsHLTimer * tmr)395 same_time_list_delete(ErtsHLTimer *tmr)
396 {
397 ErtsHLTimer **root, *next;
398
399 root = (ErtsHLTimer **) (tmr->time.tree.parent & ~ERTS_HLT_PFLG_SAME_TIME);
400 next = tmr->time.tree.u.l.next;
401
402 ERTS_HLT_ASSERT((tmr->time.tree.parent
403 == (((UWord) root) | ERTS_HLT_PFLG_SAME_TIME))
404 || (tmr->time.tree.parent
405 == ERTS_HLT_PFLG_SAME_TIME));
406
407 if (next == tmr) {
408 ERTS_HLT_ASSERT(root && *root == tmr);
409 ERTS_HLT_ASSERT(tmr->time.tree.u.l.prev == tmr);
410 *root = NULL;
411 }
412 else {
413 if (root) {
414 ERTS_HLT_ASSERT(*root == tmr);
415 *root = next;
416 next->time.tree.parent = ((UWord) root) | ERTS_HLT_PFLG_SAME_TIME;
417 }
418 tmr->time.tree.u.l.next->time.tree.u.l.prev = tmr->time.tree.u.l.prev;
419 tmr->time.tree.u.l.prev->time.tree.u.l.next = next;
420 }
421 }
422
423 static ERTS_INLINE void
same_time_list_new_root(ErtsHLTimer ** root)424 same_time_list_new_root(ErtsHLTimer **root)
425 {
426 ErtsHLTimer *tmr = *root;
427 if (tmr) {
428 ERTS_HLT_ASSERT(root);
429 tmr->time.tree.parent = ((UWord) root) | ERTS_HLT_PFLG_SAME_TIME;
430 }
431 }
432
433 static ERTS_INLINE int
same_time_list_foreach_destroy_yielding(ErtsHLTimer ** root,void (* op)(ErtsHLTimer *,void *),void * arg,ERTS_TMR_TIMEOUT_YIELD_STATE_T * ys,Sint ylimit)434 same_time_list_foreach_destroy_yielding(ErtsHLTimer **root,
435 void (*op)(ErtsHLTimer *, void *),
436 void *arg,
437 ERTS_TMR_TIMEOUT_YIELD_STATE_T *ys,
438 Sint ylimit)
439 {
440 Sint ycnt = ylimit;
441 ErtsHLTimer *end, *tmr = *root;
442 if (!tmr)
443 return 0;
444
445 ERTS_HLT_ASSERT(tmr->time.tree.parent
446 == (((UWord) root) | ERTS_HLT_PFLG_SAME_TIME));
447
448 end = tmr->time.tree.u.l.prev;
449 end->time.tree.u.l.next = NULL;
450
451 while (1) {
452 ErtsHLTimer *op_tmr = tmr;
453
454 ERTS_HLT_ASSERT((tmr->time.tree.parent
455 == (((UWord) root) | ERTS_HLT_PFLG_SAME_TIME))
456 || (tmr->time.tree.parent
457 == ERTS_HLT_PFLG_SAME_TIME));
458
459 tmr = tmr->time.tree.u.l.next;
460 (*op)(op_tmr, arg);
461 if (!tmr) {
462 *root = NULL;
463 return 0;
464 }
465 if (--ycnt <= 0) {
466 /* Make new circle of timers left to process... */
467 *root = tmr;
468 end->time.tree.u.l.next = tmr;
469 tmr->time.tree.u.l.prev = end;
470 tmr->time.tree.parent = ((UWord) root) | ERTS_HLT_PFLG_SAME_TIME;
471 return 1;
472 }
473 }
474 }
475
476 static ERTS_INLINE void
same_time_list_foreach(ErtsHLTimer * root,void (* op)(ErtsHLTimer *,void *),void * arg)477 same_time_list_foreach(ErtsHLTimer *root,
478 void (*op)(ErtsHLTimer *, void *),
479 void *arg)
480 {
481 if (root) {
482 ErtsHLTimer *tmr = root;
483 do {
484 (*op)(tmr, arg);
485 tmr = tmr->time.tree.u.l.next;
486 } while (root != tmr);
487 }
488 }
489
490 #ifdef ERTS_HLT_HARD_DEBUG
491
492 static ERTS_INLINE ErtsHLTimer *
same_time_list_lookup(ErtsHLTimer * root,ErtsHLTimer * x)493 same_time_list_lookup(ErtsHLTimer *root, ErtsHLTimer *x)
494 {
495 if (root) {
496 ErtsHLTimer *tmr = root;
497 do {
498 if (tmr == x)
499 return tmr;
500 tmr = tmr->time.tree.u.l.next;
501 } while (root != tmr);
502 }
503 return NULL;
504 }
505
506 #endif /* ERTS_HLT_HARD_DEBUG */
507
508 #define ERTS_BTM_HLT2REFN(T) ((T)->btm.refn)
509
510 #define ERTS_RBT_PREFIX btm
511 #define ERTS_RBT_T ErtsBifTimer
512 #define ERTS_RBT_KEY_T Uint32 *
513 #define ERTS_RBT_FLAGS_T UWord
514 #define ERTS_RBT_INIT_EMPTY_TNODE(T) \
515 do { \
516 (T)->btm.tree.parent = (UWord) NULL; \
517 (T)->btm.tree.right = NULL; \
518 (T)->btm.tree.left = NULL; \
519 } while (0)
520 #define ERTS_RBT_IS_RED(T) \
521 ((int) ((T)->btm.tree.parent & ERTS_HLT_PFLG_RED))
522 #define ERTS_RBT_SET_RED(T) \
523 ((T)->btm.tree.parent |= ERTS_HLT_PFLG_RED)
524 #define ERTS_RBT_IS_BLACK(T) \
525 (!ERTS_RBT_IS_RED((T)))
526 #define ERTS_RBT_SET_BLACK(T) \
527 ((T)->btm.tree.parent &= ~ERTS_HLT_PFLG_RED)
528 #define ERTS_RBT_GET_FLAGS(T) \
529 ((T)->btm.tree.parent & ERTS_HLT_PFLGS_MASK)
530 #define ERTS_RBT_SET_FLAGS(T, F) \
531 do { \
532 ERTS_HLT_ASSERT((((UWord) (F)) & ~ERTS_HLT_PFLGS_MASK) == 0); \
533 (T)->btm.tree.parent &= ~ERTS_HLT_PFLGS_MASK; \
534 (T)->btm.tree.parent |= (F); \
535 } while (0)
536 #define ERTS_RBT_GET_PARENT(T) \
537 ((ErtsBifTimer *) ((T)->btm.tree.parent & ~ERTS_HLT_PFLGS_MASK))
538 #define ERTS_RBT_SET_PARENT(T, P) \
539 do { \
540 ERTS_HLT_ASSERT((((UWord) (P)) & ERTS_HLT_PFLGS_MASK) == 0); \
541 (T)->btm.tree.parent &= ERTS_HLT_PFLGS_MASK; \
542 (T)->btm.tree.parent |= (UWord) (P); \
543 } while (0)
544 #define ERTS_RBT_GET_RIGHT(T) ((T)->btm.tree.right)
545 #define ERTS_RBT_SET_RIGHT(T, R) ((T)->btm.tree.right = (R))
546 #define ERTS_RBT_GET_LEFT(T) ((T)->btm.tree.left)
547 #define ERTS_RBT_SET_LEFT(T, L) ((T)->btm.tree.left = (L))
548 #define ERTS_RBT_GET_KEY(T) ERTS_BTM_HLT2REFN((T))
549 #define ERTS_RBT_IS_LT(KX, KY) refn_is_lt((KX), (KY))
550 #define ERTS_RBT_IS_EQ(KX, KY) refn_is_eq((KX), (KY))
551 #define ERTS_RBT_WANT_DELETE
552 #define ERTS_RBT_WANT_INSERT
553 #define ERTS_RBT_WANT_LOOKUP
554 #define ERTS_RBT_WANT_FOREACH
555 #define ERTS_RBT_UNDEF
556
557 #include "erl_rbtree.h"
558
559 #define ERTS_RBT_PREFIX proc_btm
560 #define ERTS_RBT_T ErtsBifTimer
561 #define ERTS_RBT_KEY_T Uint32 *
562 #define ERTS_RBT_FLAGS_T UWord
563 #define ERTS_RBT_INIT_EMPTY_TNODE(T) \
564 do { \
565 (T)->btm.proc_tree.parent = (UWord) NULL; \
566 (T)->btm.proc_tree.right = NULL; \
567 (T)->btm.proc_tree.left = NULL; \
568 } while (0)
569 #define ERTS_RBT_IS_RED(T) \
570 ((int) ((T)->btm.proc_tree.parent & ERTS_HLT_PFLG_RED))
571 #define ERTS_RBT_SET_RED(T) \
572 ((T)->btm.proc_tree.parent |= ERTS_HLT_PFLG_RED)
573 #define ERTS_RBT_IS_BLACK(T) \
574 (!ERTS_RBT_IS_RED((T)))
575 #define ERTS_RBT_SET_BLACK(T) \
576 ((T)->btm.proc_tree.parent &= ~ERTS_HLT_PFLG_RED)
577 #define ERTS_RBT_GET_FLAGS(T) \
578 ((T)->btm.proc_tree.parent & ERTS_HLT_PFLGS_MASK)
579 #define ERTS_RBT_SET_FLAGS(T, F) \
580 do { \
581 ERTS_HLT_ASSERT((((UWord) (F)) & ~ERTS_HLT_PFLGS_MASK) == 0); \
582 (T)->btm.proc_tree.parent &= ~ERTS_HLT_PFLGS_MASK; \
583 (T)->btm.proc_tree.parent |= (F); \
584 } while (0)
585 #define ERTS_RBT_GET_PARENT(T) \
586 ((ErtsBifTimer *) ((T)->btm.proc_tree.parent & ~ERTS_HLT_PFLGS_MASK))
587 #define ERTS_RBT_SET_PARENT(T, P) \
588 do { \
589 ERTS_HLT_ASSERT((((UWord) (P)) & ERTS_HLT_PFLGS_MASK) == 0); \
590 (T)->btm.proc_tree.parent &= ERTS_HLT_PFLGS_MASK; \
591 (T)->btm.proc_tree.parent |= (UWord) (P); \
592 } while (0)
593 #define ERTS_RBT_GET_RIGHT(T) ((T)->btm.proc_tree.right)
594 #define ERTS_RBT_SET_RIGHT(T, R) ((T)->btm.proc_tree.right = (R))
595 #define ERTS_RBT_GET_LEFT(T) ((T)->btm.proc_tree.left)
596 #define ERTS_RBT_SET_LEFT(T, L) ((T)->btm.proc_tree.left = (L))
597 #define ERTS_RBT_GET_KEY(T) ERTS_BTM_HLT2REFN((T))
598 #define ERTS_RBT_IS_LT(KX, KY) refn_is_lt((KX), (KY))
599 #define ERTS_RBT_IS_EQ(KX, KY) refn_is_eq((KX), (KY))
600 #define ERTS_RBT_WANT_DELETE
601 #define ERTS_RBT_WANT_INSERT
602 #define ERTS_RBT_WANT_LOOKUP
603 #define ERTS_RBT_WANT_FOREACH_DESTROY_YIELDING
604 #define ERTS_RBT_UNDEF
605
606 #include "erl_rbtree.h"
607
608 static void init_canceled_queue(ErtsHLTCncldTmrQ *cq);
609
610 void
erts_hl_timer_init(void)611 erts_hl_timer_init(void)
612 {
613 init_tw_timer_alloc();
614 init_bif_timer_pre_alloc();
615 }
616
617 ErtsHLTimerService *
erts_create_timer_service(void)618 erts_create_timer_service(void)
619 {
620 ErtsYieldingTimeoutState init_yield = ERTS_TMR_YIELDING_TIMEOUT_STATE_INITER;
621 ErtsHLTimerService *srv;
622
623 srv = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_TIMER_SERVICE,
624 sizeof(ErtsHLTimerService));
625 srv->time_tree = NULL;
626 srv->btm_tree = NULL;
627 srv->next_timeout = NULL;
628 srv->yield = init_yield;
629 erts_twheel_init_timer(&srv->service_timer);
630
631 init_canceled_queue(&srv->canceled_queue);
632
633 return srv;
634 }
635
636 size_t
erts_timer_type_size(ErtsAlcType_t type)637 erts_timer_type_size(ErtsAlcType_t type)
638 {
639 switch (type) {
640 case ERTS_ALC_T_LL_PTIMER: return sizeof(ErtsTWTimer);
641 case ERTS_ALC_T_HL_PTIMER: return sizeof(ErtsHLTimer);
642 case ERTS_ALC_T_BIF_TIMER: return sizeof(ErtsBifTimer);
643 default: ERTS_INTERNAL_ERROR("Unknown type");
644 }
645 return 0;
646 }
647
648 static ERTS_INLINE ErtsMonotonicTime
get_timeout_pos(ErtsMonotonicTime now,ErtsMonotonicTime msec)649 get_timeout_pos(ErtsMonotonicTime now, ErtsMonotonicTime msec)
650 {
651 ErtsMonotonicTime timeout_pos;
652 if (msec <= 0)
653 return ERTS_MONOTONIC_TO_CLKTCKS(now);
654 timeout_pos = ERTS_MONOTONIC_TO_CLKTCKS(now-1);
655 timeout_pos += ERTS_MSEC_TO_CLKTCKS(msec) + 1;
656 return timeout_pos;
657 }
658
659 static ERTS_INLINE Sint64
get_time_left(ErtsSchedulerData * esdp,ErtsMonotonicTime timeout_pos)660 get_time_left(ErtsSchedulerData *esdp, ErtsMonotonicTime timeout_pos)
661 {
662 ErtsMonotonicTime now = erts_get_monotonic_time(esdp);
663
664 now = ERTS_MONOTONIC_TO_CLKTCKS(now-1)+1;
665 if (timeout_pos <= now)
666 return (Sint64) 0;
667 return (Sint64) ERTS_CLKTCKS_TO_MSEC(timeout_pos - now);
668 }
669
670 static ERTS_INLINE int
proc_timeout_common(Process * proc,void * tmr)671 proc_timeout_common(Process *proc, void *tmr)
672 {
673 if (tmr == (void *) erts_atomic_cmpxchg_mb(&proc->common.timer,
674 ERTS_PTMR_TIMEDOUT,
675 (erts_aint_t) tmr)) {
676 erts_aint32_t state;
677 erts_proc_lock(proc, ERTS_PROC_LOCKS_MSG_RECEIVE);
678 state = erts_atomic32_read_acqb(&proc->state);
679 erts_proc_unlock(proc, ERTS_PROC_LOCKS_MSG_RECEIVE);
680 if (!(state & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_EXITING)))
681 erts_schedule_process(proc, state, 0);
682 return 1;
683 }
684 return 0;
685 }
686
687 static ERTS_INLINE int
port_timeout_common(Port * port,void * tmr)688 port_timeout_common(Port *port, void *tmr)
689 {
690 if (tmr == (void *) erts_atomic_cmpxchg_mb(&port->common.timer,
691 ERTS_PTMR_TIMEDOUT,
692 (erts_aint_t) tmr)) {
693 erts_port_task_schedule(port->common.id,
694 &port->timeout_task,
695 ERTS_PORT_TASK_TIMEOUT);
696 return 1;
697 }
698 return 0;
699 }
700
701 static ERTS_INLINE erts_aint_t
init_btm_specifics(ErtsSchedulerData * esdp,ErtsBifTimer * tmr,Eterm msg,Uint32 * refn)702 init_btm_specifics(ErtsSchedulerData *esdp,
703 ErtsBifTimer *tmr, Eterm msg,
704 Uint32 *refn
705 )
706 {
707 Uint hsz = is_immed(msg) ? ((Uint) 0) : size_object(msg);
708 int refc;
709 if (!hsz) {
710 tmr->btm.message = msg;
711 tmr->btm.bp = NULL;
712 }
713 else {
714 ErlHeapFragment *bp = new_message_buffer(hsz);
715 Eterm *hp = bp->mem;
716 tmr->btm.message = copy_struct(msg, hsz, &hp, &bp->off_heap);
717 tmr->btm.bp = bp;
718 }
719 refc = 0;
720 tmr->btm.refn[0] = refn[0];
721 tmr->btm.refn[1] = refn[1];
722 tmr->btm.refn[2] = refn[2];
723
724 tmr->btm.proc_tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
725
726 btm_rbt_insert(&esdp->timer_service->btm_tree, tmr);
727
728 erts_atomic32_init_nob(&tmr->btm.state, ERTS_TMR_STATE_ACTIVE);
729 return refc; /* refc from magic binary... */
730 }
731
732 static void tw_bif_timer_timeout(void *vbtmp);
733
734 static ERTS_INLINE void
timer_destroy(ErtsTimer * tmr,int twt,int btm)735 timer_destroy(ErtsTimer *tmr, int twt, int btm)
736 {
737 if (!btm) {
738 if (twt)
739 tw_timer_free(&tmr->twt);
740 else
741 erts_free(ERTS_ALC_T_HL_PTIMER, tmr);
742 }
743 else {
744 if (tmr->head.roflgs & ERTS_TMR_ROFLG_PRE_ALC)
745 bif_timer_pre_free(&tmr->btm);
746 else
747 erts_free(ERTS_ALC_T_BIF_TIMER, &tmr->btm);
748 }
749 }
750
751 static ERTS_INLINE void
timer_pre_dec_refc(ErtsTimer * tmr)752 timer_pre_dec_refc(ErtsTimer *tmr)
753 {
754 #ifdef ERTS_HLT_DEBUG
755 erts_aint_t refc;
756 refc = erts_atomic32_dec_read_nob(&tmr->head.refc);
757 ERTS_HLT_ASSERT(refc > 0);
758 #else
759 erts_atomic32_dec_nob(&tmr->head.refc);
760 #endif
761 }
762
763 /*
764 * Basic timer wheel timer stuff
765 */
766
767 static void
scheduled_tw_timer_destroy(void * vtmr)768 scheduled_tw_timer_destroy(void *vtmr)
769 {
770 ErtsTimer * tmr = (ErtsTimer *) vtmr;
771 int btm = !!(tmr->head.roflgs & ERTS_TMR_ROFLG_BIF_TMR);
772 timer_destroy((ErtsTimer *) vtmr, 1, btm);
773 }
774
775 static void
schedule_tw_timer_destroy(ErtsTWTimer * tmr)776 schedule_tw_timer_destroy(ErtsTWTimer *tmr)
777 {
778 Uint size;
779 /*
780 * Reference to process/port can be
781 * dropped at once...
782 */
783 if (tmr->head.roflgs & ERTS_TMR_ROFLG_PROC)
784 erts_proc_dec_refc(tmr->head.receiver.proc);
785 else if (tmr->head.roflgs & ERTS_TMR_ROFLG_PORT)
786 erts_port_dec_refc(tmr->head.receiver.port);
787
788 if (!(tmr->head.roflgs & ERTS_TMR_ROFLG_BIF_TMR))
789 size = sizeof(ErtsHLTimer);
790 else {
791 /* Message buffer already dropped... */
792 size = sizeof(ErtsBifTimer);
793 }
794
795 erts_schedule_thr_prgr_later_cleanup_op(
796 scheduled_tw_timer_destroy,
797 (void *) tmr,
798 &tmr->u.cleanup,
799 size);
800 }
801
802 static ERTS_INLINE void
tw_timer_dec_refc(ErtsTWTimer * tmr)803 tw_timer_dec_refc(ErtsTWTimer *tmr)
804 {
805 if (erts_atomic32_dec_read_relb(&tmr->head.refc) == 0) {
806 ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
807 schedule_tw_timer_destroy(tmr);
808 }
809 }
810
811 static void
tw_proc_timeout(void * vtwtp)812 tw_proc_timeout(void *vtwtp)
813 {
814 ErtsTWTimer *twtp = (ErtsTWTimer *) vtwtp;
815 Process *proc = twtp->head.receiver.proc;
816 if (proc_timeout_common(proc, vtwtp))
817 tw_timer_dec_refc(twtp);
818 tw_timer_dec_refc(twtp);
819 }
820
821 static void
tw_port_timeout(void * vtwtp)822 tw_port_timeout(void *vtwtp)
823 {
824 ErtsTWTimer *twtp = (ErtsTWTimer *) vtwtp;
825 Port *port = twtp->head.receiver.port;
826 if (port_timeout_common(port, vtwtp))
827 tw_timer_dec_refc(twtp);
828 tw_timer_dec_refc(twtp);
829 }
830
831 static void
cancel_tw_timer(ErtsSchedulerData * esdp,ErtsTWTimer * tmr)832 cancel_tw_timer(ErtsSchedulerData *esdp, ErtsTWTimer *tmr)
833 {
834 ERTS_HLT_ASSERT((tmr->head.roflgs & ERTS_TMR_ROFLG_SID_MASK)
835 == (Uint32) esdp->no);
836 erts_twheel_cancel_timer(esdp->timer_wheel, &tmr->u.tw_tmr);
837 tw_timer_dec_refc(tmr);
838 }
839
840 static void
tw_callback_timeout(void * vtwtp)841 tw_callback_timeout(void *vtwtp)
842 {
843 ErtsTWTimer *twtp = (ErtsTWTimer *) vtwtp;
844 void (*callback)(void *) = twtp->head.receiver.callback;
845 void *arg = twtp->head.u.arg;
846 tw_timer_dec_refc(twtp);
847 (*callback)(arg);
848 }
849
850 static ErtsTimer *
create_tw_timer(ErtsSchedulerData * esdp,ErtsMonotonicTime timeout_pos,int short_time,ErtsTmrType type,void * rcvrp,Eterm rcvr,Eterm msg,Uint32 * refn,void (* callback)(void *),void * arg)851 create_tw_timer(ErtsSchedulerData *esdp,
852 ErtsMonotonicTime timeout_pos,
853 int short_time, ErtsTmrType type,
854 void *rcvrp, Eterm rcvr,
855 Eterm msg,
856 Uint32 *refn,
857 void (*callback)(void *), void *arg)
858 {
859 ErtsTWTimer *tmr;
860 void (*timeout_func)(void *);
861 erts_aint32_t refc;
862
863 if (type != ERTS_TMR_BIF) {
864 tmr = tw_timer_alloc();
865 tmr->head.roflgs = 0;
866 }
867 else {
868 if (short_time) {
869 tmr = (ErtsTWTimer *) bif_timer_pre_alloc();
870 if (!tmr)
871 goto alloc_bif_timer;
872 tmr->head.roflgs = (ERTS_TMR_ROFLG_BIF_TMR
873 | ERTS_TMR_ROFLG_PRE_ALC);
874 }
875 else {
876 alloc_bif_timer:
877 tmr = (ErtsTWTimer *) erts_alloc(ERTS_ALC_T_BIF_TIMER,
878 sizeof(ErtsBifTimer));
879 tmr->head.roflgs = ERTS_TMR_ROFLG_BIF_TMR;
880 }
881 }
882
883 erts_twheel_init_timer(&tmr->u.tw_tmr);
884 tmr->head.roflgs |= (Uint32) esdp->no;
885 ERTS_HLT_ASSERT((((Uint32) esdp->no)
886 & ~ERTS_TMR_ROFLG_SID_MASK) == 0);
887
888 switch (type) {
889
890 case ERTS_TMR_PROC:
891 tmr->head.receiver.proc = (Process *) rcvrp;
892 tmr->head.roflgs |= ERTS_TMR_ROFLG_PROC;
893 timeout_func = tw_proc_timeout;
894 erts_proc_inc_refc((Process *) rcvrp);
895 refc = 2;
896 break;
897
898 case ERTS_TMR_PORT:
899 tmr->head.receiver.port = (Port *) rcvrp;
900 tmr->head.roflgs |= ERTS_TMR_ROFLG_PORT;
901 timeout_func = tw_port_timeout;
902 erts_port_inc_refc((Port *) rcvrp);
903 refc = 2;
904 break;
905
906 case ERTS_TMR_CALLBACK:
907 tmr->head.u.arg = arg;
908 tmr->head.receiver.callback = callback;
909
910 tmr->head.roflgs |= ERTS_TMR_ROFLG_CALLBACK;
911 timeout_func = tw_callback_timeout;
912 refc = 1;
913 break;
914
915 case ERTS_TMR_BIF:
916
917 timeout_func = tw_bif_timer_timeout;
918 if (is_internal_pid(rcvr)) {
919 tmr->head.roflgs |= ERTS_TMR_ROFLG_PROC;
920 tmr->head.receiver.proc = (Process *) rcvrp;
921 refc = 2;
922 }
923 else {
924 ERTS_HLT_ASSERT(is_atom(rcvr));
925 tmr->head.roflgs |= ERTS_TMR_ROFLG_REG_NAME;
926 tmr->head.receiver.name = (Eterm) rcvr;
927 refc = 1;
928 }
929
930 refc += init_btm_specifics(esdp,
931 (ErtsBifTimer *) tmr,
932 msg,
933 refn
934 );
935 break;
936
937 default:
938 ERTS_INTERNAL_ERROR("Unsupported timer type");
939 return NULL;
940 }
941
942 erts_atomic32_init_nob(&tmr->head.refc, refc);
943
944 erts_twheel_set_timer(esdp->timer_wheel,
945 &tmr->u.tw_tmr,
946 timeout_func,
947 tmr,
948 timeout_pos);
949
950 return (ErtsTimer *) tmr;
951 }
952
953 /*
954 * Basic high level timer stuff
955 */
956
957 static void
scheduled_hl_timer_destroy(void * vtmr)958 scheduled_hl_timer_destroy(void *vtmr)
959 {
960 ErtsTimer * tmr = (ErtsTimer *) vtmr;
961 int btm = !!(tmr->head.roflgs & ERTS_TMR_ROFLG_BIF_TMR);
962 timer_destroy((ErtsTimer *) vtmr, 0, btm);
963 }
964
965 static void
schedule_hl_timer_destroy(ErtsHLTimer * tmr,Uint32 roflgs)966 schedule_hl_timer_destroy(ErtsHLTimer *tmr, Uint32 roflgs)
967 {
968 UWord size;
969
970 /*
971 * Reference to process/port can be dropped
972 * at once...
973 */
974
975 ERTS_HLT_ASSERT(erts_atomic32_read_nob(&tmr->head.refc) == 0);
976
977 if (roflgs & ERTS_TMR_ROFLG_REG_NAME) {
978 ERTS_HLT_ASSERT(is_atom(tmr->head.receiver.name));
979 }
980 else if (roflgs & ERTS_TMR_ROFLG_PROC) {
981 ERTS_HLT_ASSERT(tmr->head.receiver.proc);
982 erts_proc_dec_refc(tmr->head.receiver.proc);
983 }
984 else if (roflgs & ERTS_TMR_ROFLG_PORT) {
985 ERTS_HLT_ASSERT(tmr->head.receiver.port);
986 erts_port_dec_refc(tmr->head.receiver.port);
987 }
988
989 if (!(roflgs & ERTS_TMR_ROFLG_BIF_TMR))
990 size = sizeof(ErtsHLTimer);
991 else {
992 /* Message buffer already dropped... */
993 size = sizeof(ErtsBifTimer);
994 }
995
996 erts_schedule_thr_prgr_later_cleanup_op(
997 scheduled_hl_timer_destroy, tmr,
998 &tmr->time.cleanup, size);
999 }
1000
1001 static ERTS_INLINE void
hl_timer_dec_refc(ErtsHLTimer * tmr,Uint32 roflgs)1002 hl_timer_dec_refc(ErtsHLTimer *tmr, Uint32 roflgs)
1003 {
1004 if (erts_atomic32_dec_read_relb(&tmr->head.refc) == 0) {
1005 ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
1006 schedule_hl_timer_destroy(tmr, roflgs);
1007 }
1008 }
1009
1010 static void hlt_service_timeout(void *vesdp);
1011 static void handle_canceled_queue(ErtsSchedulerData *esdp,
1012 ErtsHLTCncldTmrQ *cq,
1013 int use_limit,
1014 int ops_limit,
1015 int *need_thr_progress,
1016 ErtsThrPrgrVal *thr_prgr_p,
1017 int *need_more_work);
1018
1019 static ERTS_INLINE void
check_canceled_queue(ErtsSchedulerData * esdp,ErtsHLTimerService * srv)1020 check_canceled_queue(ErtsSchedulerData *esdp, ErtsHLTimerService *srv)
1021 {
1022 #if ERTS_TMR_CHECK_CANCEL_ON_CREATE
1023 ErtsHLTCncldTmrQ *cq = &srv->canceled_queue;
1024 if (cq->head.first != cq->head.unref_end)
1025 handle_canceled_queue(esdp, cq, 1,
1026 ERTS_TMR_CANCELED_TIMER_SMALL_LIMIT,
1027 NULL, NULL, NULL);
1028 #endif
1029 }
1030
1031 static ERTS_INLINE void
bif_timer_timeout(ErtsHLTimerService * srv,ErtsBifTimer * tmr,Uint32 roflgs)1032 bif_timer_timeout(ErtsHLTimerService *srv,
1033 ErtsBifTimer *tmr,
1034 Uint32 roflgs)
1035 {
1036 erts_aint32_t state;
1037
1038 ERTS_HLT_ASSERT(tmr->type.head.roflgs == roflgs);
1039 ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_BIF_TMR);
1040
1041 state = erts_atomic32_cmpxchg_acqb(&tmr->btm.state,
1042 ERTS_TMR_STATE_TIMED_OUT,
1043 ERTS_TMR_STATE_ACTIVE);
1044
1045 ERTS_HLT_ASSERT(state == ERTS_TMR_STATE_CANCELED
1046 || state == ERTS_TMR_STATE_ACTIVE);
1047
1048 if (state == ERTS_TMR_STATE_ACTIVE) {
1049 Process *proc;
1050
1051 if (roflgs & ERTS_TMR_ROFLG_REG_NAME) {
1052 Eterm term;
1053 term = tmr->type.head.receiver.name;
1054 ERTS_HLT_ASSERT(is_atom(term));
1055 term = erts_whereis_name_to_id(NULL, term);
1056 proc = erts_proc_lookup(term);
1057 }
1058 else {
1059 ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_PROC);
1060 proc = tmr->type.head.receiver.proc;
1061 ERTS_HLT_ASSERT(proc);
1062 }
1063 if (proc) {
1064 int dec_refc = 0;
1065 ErtsMessage *mp = erts_alloc_message(0, NULL);
1066 mp->data.heap_frag = tmr->btm.bp;
1067 tmr->btm.bp = NULL;
1068 erts_queue_message(proc, 0, mp, tmr->btm.message,
1069 am_clock_service);
1070 erts_proc_lock(proc, ERTS_PROC_LOCK_BTM);
1071 /* If the process is exiting do not disturb the cleanup... */
1072 if (!ERTS_PROC_IS_EXITING(proc)) {
1073 if (tmr->btm.proc_tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
1074 proc_btm_rbt_delete(&proc->bif_timers, tmr);
1075 tmr->btm.proc_tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
1076 dec_refc = 1;
1077 }
1078 }
1079 erts_proc_unlock(proc, ERTS_PROC_LOCK_BTM);
1080 if (dec_refc)
1081 timer_pre_dec_refc((ErtsTimer *) tmr);
1082 }
1083 if (tmr->btm.bp)
1084 free_message_buffer(tmr->btm.bp);
1085 }
1086
1087 if (tmr->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
1088 btm_rbt_delete(&srv->btm_tree, tmr);
1089 tmr->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
1090 }
1091
1092 }
1093
1094 static void
tw_bif_timer_timeout(void * vbtmp)1095 tw_bif_timer_timeout(void *vbtmp)
1096 {
1097 ErtsSchedulerData *esdp = erts_get_scheduler_data();
1098 ErtsHLTimerService *srv = esdp->timer_service;
1099 ErtsBifTimer *btmp = (ErtsBifTimer *) vbtmp;
1100 bif_timer_timeout(srv, btmp, btmp->type.head.roflgs);
1101 tw_timer_dec_refc(&btmp->type.twt);
1102 }
1103
1104 static ErtsTimer *
create_hl_timer(ErtsSchedulerData * esdp,ErtsMonotonicTime timeout_pos,int short_time,ErtsTmrType type,void * rcvrp,Eterm rcvr,Eterm msg,Uint32 * refn,void (* callback)(void *),void * arg)1105 create_hl_timer(ErtsSchedulerData *esdp,
1106 ErtsMonotonicTime timeout_pos,
1107 int short_time, ErtsTmrType type,
1108 void *rcvrp, Eterm rcvr,
1109 Eterm msg,
1110 Uint32 *refn,
1111 void (*callback)(void *), void *arg)
1112 {
1113 ErtsHLTimerService *srv = esdp->timer_service;
1114 ErtsHLTimer *tmr, *st_tmr;
1115 erts_aint32_t refc;
1116 Uint32 roflgs;
1117
1118 ERTS_HLT_HDBG_CHK_SRV(srv);
1119
1120 check_canceled_queue(esdp, srv);
1121
1122 ERTS_HLT_ASSERT((esdp->no & ~ERTS_TMR_ROFLG_SID_MASK) == 0);
1123
1124 roflgs = ((Uint32) esdp->no) | ERTS_TMR_ROFLG_HLT;
1125
1126 if (type != ERTS_TMR_BIF) {
1127
1128 tmr = erts_alloc(ERTS_ALC_T_HL_PTIMER,
1129 sizeof(ErtsHLTimer));
1130 tmr->timeout = timeout_pos;
1131
1132 switch (type) {
1133
1134 case ERTS_TMR_PROC:
1135 ERTS_HLT_ASSERT(is_internal_pid(rcvr));
1136
1137 erts_proc_inc_refc((Process *) rcvrp);
1138 tmr->head.receiver.proc = (Process *) rcvrp;
1139 roflgs |= ERTS_TMR_ROFLG_PROC;
1140 refc = 2;
1141 break;
1142
1143 case ERTS_TMR_PORT:
1144 ERTS_HLT_ASSERT(is_internal_port(rcvr));
1145 erts_port_inc_refc((Port *) rcvrp);
1146 tmr->head.receiver.port = (Port *) rcvrp;
1147 roflgs |= ERTS_TMR_ROFLG_PORT;
1148 refc = 2;
1149 break;
1150
1151 case ERTS_TMR_CALLBACK:
1152 roflgs |= ERTS_TMR_ROFLG_CALLBACK;
1153 tmr->head.receiver.callback = callback;
1154 tmr->head.u.arg = arg;
1155 refc = 1;
1156 break;
1157
1158 default:
1159 ERTS_INTERNAL_ERROR("Unsupported timer type");
1160 return NULL;
1161 }
1162
1163 }
1164 else { /* ERTS_TMR_BIF */
1165
1166 if (short_time) {
1167 tmr = (ErtsHLTimer *) bif_timer_pre_alloc();
1168 if (!tmr)
1169 goto alloc_bif_timer;
1170 roflgs |= ERTS_TMR_ROFLG_PRE_ALC;
1171 }
1172 else {
1173 alloc_bif_timer:
1174 tmr = (ErtsHLTimer *) erts_alloc(ERTS_ALC_T_BIF_TIMER,
1175 sizeof(ErtsBifTimer));
1176 }
1177
1178 tmr->timeout = timeout_pos;
1179
1180 roflgs |= ERTS_TMR_ROFLG_BIF_TMR;
1181 if (is_internal_pid(rcvr)) {
1182 roflgs |= ERTS_TMR_ROFLG_PROC;
1183 tmr->head.receiver.proc = (Process *) rcvrp;
1184 refc = 2;
1185 }
1186 else {
1187 ERTS_HLT_ASSERT(is_atom(rcvr));
1188 roflgs |= ERTS_TMR_ROFLG_REG_NAME;
1189 tmr->head.receiver.name = rcvr;
1190 refc = 1;
1191 }
1192
1193 refc += init_btm_specifics(esdp,
1194 (ErtsBifTimer *) tmr,
1195 msg,
1196 refn
1197 );
1198 }
1199
1200 tmr->head.roflgs = roflgs;
1201 erts_atomic32_init_nob(&tmr->head.refc, refc);
1202
1203 if (!srv->next_timeout
1204 || tmr->timeout < srv->next_timeout->timeout) {
1205 if (srv->next_timeout)
1206 erts_twheel_cancel_timer(esdp->timer_wheel,
1207 &srv->service_timer);
1208 erts_twheel_set_timer(esdp->timer_wheel,
1209 &srv->service_timer,
1210 hlt_service_timeout,
1211 (void *) esdp,
1212 tmr->timeout);
1213 srv->next_timeout = tmr;
1214 }
1215
1216 st_tmr = time_rbt_lookup_insert(&srv->time_tree, tmr);
1217 tmr->time.tree.same_time = st_tmr;
1218 if (st_tmr)
1219 same_time_list_insert(&st_tmr->time.tree.same_time, tmr);
1220
1221 #ifdef ERTS_HLT_HARD_DEBUG
1222 tmr->pending_timeout = 0;
1223 #endif
1224
1225 ERTS_HLT_HDBG_CHK_SRV(srv);
1226
1227 return (ErtsTimer *) tmr;
1228 }
1229
1230 static ERTS_INLINE void
hlt_proc_timeout(ErtsHLTimer * tmr)1231 hlt_proc_timeout(ErtsHLTimer *tmr)
1232 {
1233 if (proc_timeout_common(tmr->head.receiver.proc, (void *) tmr))
1234 hl_timer_dec_refc(tmr, tmr->head.roflgs);
1235 }
1236
1237 static ERTS_INLINE void
hlt_port_timeout(ErtsHLTimer * tmr)1238 hlt_port_timeout(ErtsHLTimer *tmr)
1239 {
1240 if (port_timeout_common(tmr->head.receiver.port, (void *) tmr))
1241 hl_timer_dec_refc(tmr, tmr->head.roflgs);
1242 }
1243
hlt_timeout(ErtsHLTimer * tmr,void * vsrv)1244 static void hlt_timeout(ErtsHLTimer *tmr, void *vsrv)
1245 {
1246 ErtsHLTimerService *srv = (ErtsHLTimerService *) vsrv;
1247 Uint32 roflgs;
1248
1249 ERTS_HLT_HDBG_CHK_SRV(srv);
1250
1251 roflgs = tmr->head.roflgs;
1252 ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_HLT);
1253
1254 if (roflgs & ERTS_TMR_ROFLG_BIF_TMR)
1255 bif_timer_timeout(srv, (ErtsBifTimer *) tmr, roflgs);
1256 else if (roflgs & ERTS_TMR_ROFLG_PROC)
1257 hlt_proc_timeout(tmr);
1258 else if (roflgs & ERTS_TMR_ROFLG_PORT)
1259 hlt_port_timeout(tmr);
1260 else {
1261 ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_CALLBACK);
1262 (*tmr->head.receiver.callback)(tmr->head.u.arg);
1263 }
1264
1265 tmr->time.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
1266
1267 ERTS_HLT_HDBG_CHK_SRV(srv);
1268
1269 hl_timer_dec_refc(tmr, roflgs);
1270 }
1271
1272 #ifdef ERTS_HLT_HARD_DEBUG
1273 static void
set_pending_timeout(ErtsHLTimer * tmr,void * unused)1274 set_pending_timeout(ErtsHLTimer *tmr, void *unused)
1275 {
1276 tmr->pending_timeout = -1;
1277 }
1278 #endif
1279
1280 static void
hlt_service_timeout(void * vesdp)1281 hlt_service_timeout(void *vesdp)
1282 {
1283 ErtsSchedulerData *esdp = (ErtsSchedulerData *) vesdp;
1284 ErtsHLTimerService *srv = esdp->timer_service;
1285 ErtsHLTimer *tmr = srv->next_timeout;
1286 int yield;
1287
1288 ERTS_HLT_HDBG_CHK_SRV(srv);
1289
1290 ERTS_HLT_ASSERT(esdp == erts_get_scheduler_data());
1291
1292 ERTS_HLT_ASSERT(!srv->yield.root || srv->yield.root == tmr);
1293 ERTS_HLT_ASSERT(tmr);
1294 ERTS_HLT_ASSERT(tmr->timeout <= erts_get_monotonic_time(esdp));
1295
1296 if (!srv->yield.root) {
1297 ERTS_HLT_ASSERT(tmr->time.tree.parent
1298 != ERTS_HLT_PFIELD_NOT_IN_TABLE);
1299 time_rbt_delete(&srv->time_tree, tmr);
1300 tmr->time.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
1301 #ifdef ERTS_HLT_HARD_DEBUG
1302 tmr->pending_timeout = 1;
1303 if (tmr->time.tree.same_time)
1304 same_time_list_foreach(tmr->time.tree.same_time, set_pending_timeout, NULL);
1305 #endif
1306 }
1307
1308 if (!tmr->time.tree.same_time && !srv->yield.root)
1309 yield = 0;
1310 else {
1311 yield = same_time_list_foreach_destroy_yielding(
1312 &tmr->time.tree.same_time, hlt_timeout, (void *) srv,
1313 &srv->yield.state, ERTS_TMR_TIMEOUT_YIELD_LIMIT);
1314 }
1315
1316 if (yield)
1317 srv->yield.root = tmr;
1318 else {
1319 srv->yield.root = NULL;
1320 hlt_timeout(tmr, (void *) srv);
1321
1322 tmr = time_rbt_smallest(srv->time_tree);
1323 srv->next_timeout = tmr;
1324 }
1325
1326 ERTS_HLT_HDBG_CHK_SRV(srv);
1327
1328 if (tmr)
1329 erts_twheel_set_timer(esdp->timer_wheel,
1330 &srv->service_timer,
1331 hlt_service_timeout,
1332 vesdp,
1333 tmr->timeout);
1334 }
1335
1336 static void
hlt_delete_timer(ErtsSchedulerData * esdp,ErtsHLTimer * tmr)1337 hlt_delete_timer(ErtsSchedulerData *esdp, ErtsHLTimer *tmr)
1338 {
1339 ErtsHLTimerService *srv = esdp->timer_service;
1340
1341 ERTS_HLT_HDBG_CHK_SRV(srv);
1342
1343 if (tmr->time.tree.parent == ERTS_HLT_PFIELD_NOT_IN_TABLE) {
1344 /* Already removed... */
1345 ERTS_HLT_HDBG_CHK_SRV(srv);
1346 return;
1347 }
1348
1349 if (tmr->time.tree.parent & ERTS_HLT_PFLG_SAME_TIME) {
1350 same_time_list_delete(tmr);
1351 }
1352 else if (tmr->time.tree.same_time) {
1353 ErtsHLTimer *st_container;
1354
1355 ERTS_HLT_ASSERT((tmr->time.tree.parent & ERTS_HLT_PFLG_SAME_TIME) == 0);
1356 st_container = tmr->time.tree.same_time->time.tree.u.l.prev;
1357
1358 ERTS_HLT_ASSERT(st_container);
1359 ERTS_HLT_ASSERT(st_container->time.tree.parent
1360 & ERTS_HLT_PFLG_SAME_TIME);
1361 ERTS_HLT_ASSERT(tmr->timeout == st_container->timeout);
1362
1363 same_time_list_delete(st_container);
1364 st_container->time.tree.same_time = tmr->time.tree.same_time;
1365 same_time_list_new_root(&st_container->time.tree.same_time);
1366
1367 time_rbt_replace(&srv->time_tree, tmr, st_container);
1368 ERTS_HLT_ASSERT((st_container->time.tree.parent
1369 & ERTS_HLT_PFLG_SAME_TIME) == 0);
1370
1371 if (srv->next_timeout == tmr)
1372 srv->next_timeout = st_container;
1373 }
1374 else {
1375 ERTS_HLT_ASSERT((tmr->time.tree.parent & ERTS_HLT_PFLG_SAME_TIME) == 0);
1376 time_rbt_delete(&srv->time_tree, tmr);
1377 if (tmr == srv->next_timeout) {
1378 ErtsHLTimer *smlst;
1379 erts_twheel_cancel_timer(esdp->timer_wheel,
1380 &srv->service_timer);
1381 smlst = time_rbt_smallest(srv->time_tree);
1382 srv->next_timeout = smlst;
1383 if (smlst) {
1384 ERTS_HLT_ASSERT(smlst->timeout > tmr->timeout);
1385 erts_twheel_set_timer(esdp->timer_wheel,
1386 &srv->service_timer,
1387 hlt_service_timeout,
1388 (void *) esdp,
1389 smlst->timeout);
1390 }
1391 }
1392 }
1393 tmr->time.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
1394
1395 hl_timer_dec_refc(tmr, tmr->head.roflgs);
1396
1397 ERTS_HLT_HDBG_CHK_SRV(srv);
1398 }
1399
1400 /*
1401 * Pass canceled timers back to originating scheduler
1402 */
1403
1404 static ERTS_INLINE void
cleanup_sched_local_canceled_timer(ErtsSchedulerData * esdp,ErtsTimer * tmr)1405 cleanup_sched_local_canceled_timer(ErtsSchedulerData *esdp,
1406 ErtsTimer *tmr)
1407 {
1408 Uint32 roflgs = tmr->head.roflgs;
1409 ERTS_HLT_ASSERT(esdp == erts_get_scheduler_data());
1410 ERTS_HLT_ASSERT((tmr->head.roflgs & ERTS_TMR_ROFLG_SID_MASK)
1411 == (Uint32) esdp->no);
1412
1413 if (roflgs & ERTS_TMR_ROFLG_BIF_TMR) {
1414 ErtsBifTimer *btm = (ErtsBifTimer *) tmr;
1415 if (btm->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
1416 btm_rbt_delete(&esdp->timer_service->btm_tree, btm);
1417 btm->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
1418 }
1419 }
1420
1421 if (roflgs & ERTS_TMR_ROFLG_HLT) {
1422 hlt_delete_timer(esdp, &tmr->hlt);
1423 hl_timer_dec_refc(&tmr->hlt, roflgs);
1424 }
1425 else {
1426 cancel_tw_timer(esdp, &tmr->twt);
1427 tw_timer_dec_refc(&tmr->twt);
1428 }
1429 }
1430
1431
1432 static void
init_canceled_queue(ErtsHLTCncldTmrQ * cq)1433 init_canceled_queue(ErtsHLTCncldTmrQ *cq)
1434 {
1435 erts_atomic_init_nob(&cq->tail.data.marker.u.next, ERTS_AINT_NULL);
1436 erts_atomic_init_nob(&cq->tail.data.last,
1437 (erts_aint_t) &cq->tail.data.marker);
1438 cq->head.first = (ErtsTimer *) &cq->tail.data.marker;
1439 cq->head.unref_end = (ErtsTimer *) &cq->tail.data.marker;
1440 cq->head.next.thr_progress = erts_thr_progress_current();
1441 cq->head.next.thr_progress_reached = 1;
1442 cq->head.next.unref_end = (ErtsTimer *) &cq->tail.data.marker;
1443 cq->head.used_marker = 1;
1444 }
1445
1446 static ERTS_INLINE int
cq_enqueue(ErtsHLTCncldTmrQ * cq,ErtsTimer * tmr,int cinit)1447 cq_enqueue(ErtsHLTCncldTmrQ *cq, ErtsTimer *tmr, int cinit)
1448 {
1449 erts_aint_t itmp;
1450 ErtsTimer *enq, *this = tmr;
1451
1452 erts_atomic_init_nob(&this->head.u.next, ERTS_AINT_NULL);
1453 /* Enqueue at end of list... */
1454
1455 enq = (ErtsTimer *) erts_atomic_read_nob(&cq->tail.data.last);
1456 itmp = erts_atomic_cmpxchg_relb(&enq->head.u.next,
1457 (erts_aint_t) this,
1458 ERTS_AINT_NULL);
1459 if (itmp == ERTS_AINT_NULL) {
1460 /* We are required to move last pointer */
1461 #ifdef DEBUG
1462 ASSERT(ERTS_AINT_NULL == erts_atomic_read_nob(&this->head.u.next));
1463 ASSERT(((erts_aint_t) enq)
1464 == erts_atomic_xchg_relb(&cq->tail.data.last,
1465 (erts_aint_t) this));
1466 #else
1467 erts_atomic_set_relb(&cq->tail.data.last, (erts_aint_t) this);
1468 #endif
1469 return 1;
1470 }
1471 else {
1472 /*
1473 * We *need* to insert element somewhere in between the
1474 * last element we read earlier and the actual last element.
1475 */
1476 int i = cinit;
1477
1478 while (1) {
1479 erts_aint_t itmp2;
1480 erts_atomic_set_nob(&this->head.u.next, itmp);
1481 itmp2 = erts_atomic_cmpxchg_relb(&enq->head.u.next,
1482 (erts_aint_t) this,
1483 itmp);
1484 if (itmp == itmp2)
1485 return 0; /* inserted this */
1486 if ((i & 1) == 0)
1487 itmp = itmp2;
1488 else {
1489 enq = (ErtsTimer *) itmp2;
1490 itmp = erts_atomic_read_acqb(&enq->head.u.next);
1491 ASSERT(itmp != ERTS_AINT_NULL);
1492 }
1493 i++;
1494 }
1495 }
1496 }
1497
1498 static ERTS_INLINE erts_aint_t
check_insert_marker(ErtsHLTCncldTmrQ * cq,erts_aint_t ilast)1499 check_insert_marker(ErtsHLTCncldTmrQ *cq, erts_aint_t ilast)
1500 {
1501 if (!cq->head.used_marker
1502 && cq->head.unref_end == (ErtsTimer *) ilast) {
1503 erts_aint_t itmp;
1504 ErtsTimer *last = (ErtsTimer *) ilast;
1505
1506 erts_atomic_init_nob(&cq->tail.data.marker.u.next, ERTS_AINT_NULL);
1507 itmp = erts_atomic_cmpxchg_relb(&last->head.u.next,
1508 (erts_aint_t) &cq->tail.data.marker,
1509 ERTS_AINT_NULL);
1510 if (itmp == ERTS_AINT_NULL) {
1511 ilast = (erts_aint_t) &cq->tail.data.marker;
1512 cq->head.used_marker = !0;
1513 erts_atomic_set_relb(&cq->tail.data.last, ilast);
1514 }
1515 }
1516 return ilast;
1517 }
1518
1519 static ERTS_INLINE ErtsTimer *
cq_dequeue(ErtsHLTCncldTmrQ * cq)1520 cq_dequeue(ErtsHLTCncldTmrQ *cq)
1521 {
1522 ErtsTimer *tmr;
1523
1524 if (cq->head.first == cq->head.unref_end)
1525 return NULL;
1526
1527 tmr = cq->head.first;
1528 if (tmr == (ErtsTimer *) &cq->tail.data.marker) {
1529 ASSERT(cq->head.used_marker);
1530 cq->head.used_marker = 0;
1531 tmr = (ErtsTimer *) erts_atomic_read_nob(&tmr->head.u.next);
1532 if (tmr == cq->head.unref_end) {
1533 cq->head.first = tmr;
1534 return NULL;
1535 }
1536 }
1537
1538 cq->head.first = (ErtsTimer *) erts_atomic_read_nob(&tmr->head.u.next);
1539
1540 ASSERT(cq->head.first);
1541
1542 return tmr;
1543 }
1544
1545 static int
cq_check_incoming(ErtsSchedulerData * esdp,ErtsHLTCncldTmrQ * cq)1546 cq_check_incoming(ErtsSchedulerData *esdp, ErtsHLTCncldTmrQ *cq)
1547 {
1548 erts_aint_t ilast = erts_atomic_read_nob(&cq->tail.data.last);
1549 if (((ErtsTimer *) ilast) == (ErtsTimer *) &cq->tail.data.marker
1550 && cq->head.first == (ErtsTimer *) &cq->tail.data.marker) {
1551 /* Nothing more to do... */
1552 return 0;
1553 }
1554
1555 if (cq->head.next.thr_progress_reached
1556 || erts_thr_progress_has_reached(cq->head.next.thr_progress)) {
1557 cq->head.next.thr_progress_reached = 1;
1558 /* Move unreferenced end pointer forward... */
1559
1560 ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
1561
1562 cq->head.unref_end = cq->head.next.unref_end;
1563
1564 ilast = check_insert_marker(cq, ilast);
1565
1566 if (cq->head.unref_end != (ErtsTimer *) ilast) {
1567 cq->head.next.unref_end = (ErtsTimer *) ilast;
1568 cq->head.next.thr_progress = erts_thr_progress_later(esdp);
1569 cq->head.next.thr_progress_reached = 0;
1570 }
1571 }
1572 return 1;
1573 }
1574
1575 static ERTS_INLINE void
store_earliest_thr_prgr(ErtsThrPrgrVal * prev_val,ErtsHLTCncldTmrQ * cq)1576 store_earliest_thr_prgr(ErtsThrPrgrVal *prev_val, ErtsHLTCncldTmrQ *cq)
1577 {
1578 if (!cq->head.next.thr_progress_reached
1579 && (*prev_val == ERTS_THR_PRGR_INVALID
1580 || erts_thr_progress_cmp(cq->head.next.thr_progress,
1581 *prev_val) < 0)) {
1582 *prev_val = cq->head.next.thr_progress;
1583 }
1584 }
1585
1586 static void
handle_canceled_queue(ErtsSchedulerData * esdp,ErtsHLTCncldTmrQ * cq,int use_limit,int ops_limit,int * need_thr_progress,ErtsThrPrgrVal * thr_prgr_p,int * need_more_work)1587 handle_canceled_queue(ErtsSchedulerData *esdp,
1588 ErtsHLTCncldTmrQ *cq,
1589 int use_limit,
1590 int ops_limit,
1591 int *need_thr_progress,
1592 ErtsThrPrgrVal *thr_prgr_p,
1593 int *need_more_work)
1594 {
1595 int need_thr_prgr = 0;
1596 int need_mr_wrk = 0;
1597 int have_checked_incoming = 0;
1598 int ops = 0;
1599
1600 ERTS_HLT_ASSERT(cq == &esdp->timer_service->canceled_queue);
1601
1602 while (1) {
1603 ErtsTimer *tmr = cq_dequeue(cq);
1604
1605 if (tmr)
1606 cleanup_sched_local_canceled_timer(esdp, tmr);
1607 else {
1608 if (have_checked_incoming)
1609 break;
1610 need_thr_prgr = cq_check_incoming(esdp, cq);
1611 if (need_thr_progress) {
1612 *need_thr_progress |= need_thr_prgr;
1613 if (need_thr_prgr)
1614 store_earliest_thr_prgr(thr_prgr_p, cq);
1615 }
1616 have_checked_incoming = 1;
1617 continue;
1618 }
1619
1620 if (use_limit && ++ops >= ops_limit) {
1621 if (cq->head.first != cq->head.unref_end) {
1622 need_mr_wrk = 1;
1623 if (need_more_work)
1624 *need_more_work |= 1;
1625 }
1626 break;
1627 }
1628 }
1629
1630 if (need_thr_progress && !(need_thr_prgr | need_mr_wrk)) {
1631 need_thr_prgr = cq_check_incoming(esdp, cq);
1632 *need_thr_progress |= need_thr_prgr;
1633 if (need_thr_prgr)
1634 store_earliest_thr_prgr(thr_prgr_p, cq);
1635 }
1636 }
1637
1638 void
erts_handle_canceled_timers(void * vesdp,int * need_thr_progress,ErtsThrPrgrVal * thr_prgr_p,int * need_more_work)1639 erts_handle_canceled_timers(void *vesdp,
1640 int *need_thr_progress,
1641 ErtsThrPrgrVal *thr_prgr_p,
1642 int *need_more_work)
1643 {
1644 ErtsSchedulerData *esdp = (ErtsSchedulerData *) vesdp;
1645 ERTS_HLT_ASSERT(esdp == erts_get_scheduler_data());
1646
1647 handle_canceled_queue(esdp, &esdp->timer_service->canceled_queue,
1648 1, ERTS_TMR_CANCELED_TIMER_LIMIT,
1649 need_thr_progress, thr_prgr_p,
1650 need_more_work);
1651 }
1652
1653
1654 static void
queue_canceled_timer(ErtsSchedulerData * esdp,int rsched_id,ErtsTimer * tmr)1655 queue_canceled_timer(ErtsSchedulerData *esdp, int rsched_id, ErtsTimer *tmr)
1656 {
1657 ErtsHLTCncldTmrQ *cq;
1658 cq = &ERTS_SCHEDULER_IX(rsched_id-1)->timer_service->canceled_queue;
1659 if (cq_enqueue(cq, tmr, rsched_id - (int) esdp->no))
1660 erts_notify_canceled_timer(esdp, rsched_id);
1661 }
1662
1663 static void
continue_cancel_ptimer(ErtsSchedulerData * esdp,ErtsTimer * tmr)1664 continue_cancel_ptimer(ErtsSchedulerData *esdp, ErtsTimer *tmr)
1665 {
1666 Uint32 sid = (tmr->head.roflgs & ERTS_TMR_ROFLG_SID_MASK);
1667
1668 if (esdp->no != sid)
1669 queue_canceled_timer(esdp, sid, tmr);
1670 else
1671 cleanup_sched_local_canceled_timer(esdp, tmr);
1672 }
1673
1674 /*
1675 * BIF timer specific
1676 */
1677
1678
erts_bif_timer_memory_size(void)1679 Uint erts_bif_timer_memory_size(void)
1680 {
1681 return (Uint) 0;
1682 }
1683
1684 static BIF_RETTYPE
setup_bif_timer(Process * c_p,int twheel,ErtsMonotonicTime timeout_pos,int short_time,Eterm rcvr,Eterm msg,int wrap)1685 setup_bif_timer(Process *c_p, int twheel, ErtsMonotonicTime timeout_pos,
1686 int short_time, Eterm rcvr, Eterm msg, int wrap)
1687 {
1688 BIF_RETTYPE ret;
1689 Eterm ref, tmo_msg, *hp;
1690 ErtsBifTimer *tmr;
1691 ErtsSchedulerData *esdp;
1692 Eterm tmp_hp[4];
1693 ErtsCreateTimerFunc create_timer;
1694
1695 if (is_not_internal_pid(rcvr) && is_not_atom(rcvr))
1696 goto badarg;
1697
1698 esdp = erts_proc_sched_data(c_p);
1699
1700 hp = HAlloc(c_p, ERTS_REF_THING_SIZE);
1701 ref = erts_sched_make_ref_in_buffer(esdp, hp);
1702 ASSERT(erts_get_ref_numbers_thr_id(internal_ordinary_ref_numbers(ref))
1703 == (Uint32) esdp->no);
1704
1705 tmo_msg = wrap ? TUPLE3(tmp_hp, am_timeout, ref, msg) : msg;
1706
1707 create_timer = twheel ? create_tw_timer : create_hl_timer;
1708 tmr = (ErtsBifTimer *) create_timer(esdp, timeout_pos,
1709 short_time, ERTS_TMR_BIF,
1710 NULL, rcvr, tmo_msg,
1711 internal_ordinary_ref_numbers(ref),
1712 NULL, NULL);
1713
1714 if (is_internal_pid(rcvr)) {
1715 Process *proc = erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN,
1716 rcvr, ERTS_PROC_LOCK_BTM,
1717 ERTS_P2P_FLG_INC_REFC);
1718 if (!proc) {
1719 if (tmr->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
1720 btm_rbt_delete(&esdp->timer_service->btm_tree, tmr);
1721 tmr->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
1722 }
1723 if (tmr->btm.bp)
1724 free_message_buffer(tmr->btm.bp);
1725 if (twheel)
1726 cancel_tw_timer(esdp, &tmr->type.twt);
1727 else
1728 hlt_delete_timer(esdp, &tmr->type.hlt);
1729 timer_destroy((ErtsTimer *) tmr, twheel, 1);
1730 }
1731 else {
1732 proc_btm_rbt_insert(&proc->bif_timers, tmr);
1733 erts_proc_unlock(proc, ERTS_PROC_LOCK_BTM);
1734 tmr->type.head.receiver.proc = proc;
1735 }
1736 }
1737
1738 ERTS_BIF_PREP_RET(ret, ref);
1739 return ret;
1740
1741 badarg:
1742
1743 ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
1744 return ret;
1745 }
1746
1747 static int
cancel_bif_timer(ErtsBifTimer * tmr)1748 cancel_bif_timer(ErtsBifTimer *tmr)
1749 {
1750 erts_aint_t state;
1751 Uint32 roflgs;
1752 int res;
1753
1754 state = erts_atomic32_cmpxchg_acqb(&tmr->btm.state,
1755 ERTS_TMR_STATE_CANCELED,
1756 ERTS_TMR_STATE_ACTIVE);
1757 if (state != ERTS_TMR_STATE_ACTIVE)
1758 return 0;
1759
1760 if (tmr->btm.bp)
1761 free_message_buffer(tmr->btm.bp);
1762
1763 res = -1;
1764
1765 roflgs = tmr->type.head.roflgs;
1766 if (roflgs & ERTS_TMR_ROFLG_PROC) {
1767 Process *proc;
1768
1769 proc = tmr->type.head.receiver.proc;
1770 ERTS_HLT_ASSERT(!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_REG_NAME));
1771
1772 erts_proc_lock(proc, ERTS_PROC_LOCK_BTM);
1773 /*
1774 * If process is exiting, let it clean up
1775 * the btm tree by itself (it may be in
1776 * the middle of tree destruction).
1777 */
1778 if (!ERTS_PROC_IS_EXITING(proc)
1779 && tmr->btm.proc_tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
1780 proc_btm_rbt_delete(&proc->bif_timers, tmr);
1781 tmr->btm.proc_tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
1782 res = 1;
1783 }
1784 erts_proc_unlock(proc, ERTS_PROC_LOCK_BTM);
1785 }
1786
1787 return res;
1788 }
1789
1790 static ERTS_INLINE Sint64
access_btm(ErtsBifTimer * tmr,Uint32 sid,ErtsSchedulerData * esdp,int cancel)1791 access_btm(ErtsBifTimer *tmr, Uint32 sid, ErtsSchedulerData *esdp, int cancel)
1792 {
1793 int cncl_res;
1794 Sint64 time_left;
1795 ErtsMonotonicTime timeout;
1796 int is_hlt;
1797
1798 if (!tmr)
1799 return -1;
1800
1801 is_hlt = !!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_HLT);
1802 timeout = (is_hlt
1803 ? tmr->type.hlt.timeout
1804 : erts_tweel_read_timeout(&tmr->type.twt.u.tw_tmr));
1805
1806 if (!cancel) {
1807 erts_aint32_t state = erts_atomic32_read_acqb(&tmr->btm.state);
1808 if (state == ERTS_TMR_STATE_ACTIVE)
1809 return get_time_left(esdp, timeout);
1810 return -1;
1811 }
1812
1813 cncl_res = cancel_bif_timer(tmr);
1814 if (!cncl_res)
1815 return -1;
1816
1817 time_left = get_time_left(esdp, timeout);
1818
1819 if (sid != (Uint32) esdp->no) {
1820 if (cncl_res > 0)
1821 queue_canceled_timer(esdp, sid, (ErtsTimer *) tmr);
1822 }
1823 else {
1824 if (tmr->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
1825 btm_rbt_delete(&esdp->timer_service->btm_tree, tmr);
1826 tmr->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
1827 }
1828 if (is_hlt) {
1829 if (cncl_res > 0)
1830 hl_timer_dec_refc(&tmr->type.hlt, tmr->type.hlt.head.roflgs);
1831 hlt_delete_timer(esdp, &tmr->type.hlt);
1832 }
1833 else {
1834 if (cncl_res > 0)
1835 tw_timer_dec_refc(&tmr->type.twt);
1836 cancel_tw_timer(esdp, &tmr->type.twt);
1837 }
1838 }
1839
1840 return time_left;
1841 }
1842
1843 static ERTS_INLINE Eterm
return_info(Process * c_p,Sint64 time_left)1844 return_info(Process *c_p, Sint64 time_left)
1845 {
1846 Uint hsz;
1847 Eterm *hp;
1848
1849 if (time_left < 0)
1850 return am_false;
1851
1852 if (time_left <= (Sint64) MAX_SMALL)
1853 return make_small((Sint) time_left);
1854
1855 hsz = ERTS_SINT64_HEAP_SIZE(time_left);
1856 hp = HAlloc(c_p, hsz);
1857 return erts_sint64_to_big(time_left, &hp);
1858 }
1859
1860 static ERTS_INLINE Eterm
send_async_info(Process * proc,ErtsProcLocks initial_locks,Eterm tref,int cancel,Sint64 time_left)1861 send_async_info(Process *proc, ErtsProcLocks initial_locks,
1862 Eterm tref, int cancel, Sint64 time_left)
1863 {
1864 ErtsProcLocks locks = initial_locks;
1865 ErtsMessage *mp;
1866 Eterm tag, res, msg, ref;
1867 Uint hsz;
1868 Eterm *hp;
1869 ErlOffHeap *ohp;
1870
1871 hsz = 4;
1872 hsz += NC_HEAP_SIZE(tref);
1873
1874 if (time_left > (Sint64) MAX_SMALL)
1875 hsz += ERTS_SINT64_HEAP_SIZE(time_left);
1876
1877 mp = erts_alloc_message_heap(proc, &locks, hsz, &hp, &ohp);
1878
1879 if (cancel)
1880 tag = am_cancel_timer;
1881 else
1882 tag = am_read_timer;
1883
1884 ref = STORE_NC(&hp, ohp, tref);
1885
1886 if (time_left < 0)
1887 res = am_false;
1888 else if (time_left <= (Sint64) MAX_SMALL)
1889 res = make_small((Sint) time_left);
1890 else
1891 res = erts_sint64_to_big(time_left, &hp);
1892
1893 msg = TUPLE3(hp, tag, ref, res);
1894
1895 erts_queue_message(proc, locks, mp, msg, am_clock_service);
1896
1897 locks &= ~initial_locks;
1898 if (locks)
1899 erts_proc_unlock(proc, locks);
1900
1901 return am_ok;
1902 }
1903
1904 static ERTS_INLINE Eterm
send_sync_info(Process * proc,ErtsProcLocks initial_locks,Uint32 * refn,int info,int cancel,Sint64 time_left)1905 send_sync_info(Process *proc, ErtsProcLocks initial_locks,
1906 Uint32 *refn, int info, int cancel, Sint64 time_left)
1907 {
1908 ErtsProcLocks locks = initial_locks;
1909 ErtsMessage *mp;
1910 Eterm res, msg, ref;
1911 Uint hsz;
1912 Eterm *hp;
1913 ErlOffHeap *ohp;
1914
1915 hsz = 3 + ERTS_REF_THING_SIZE;
1916
1917 if (time_left > (Sint64) MAX_SMALL)
1918 hsz += ERTS_SINT64_HEAP_SIZE(time_left);
1919
1920 mp = erts_alloc_message_heap(proc, &locks, hsz, &hp, &ohp);
1921
1922 write_ref_thing(hp, refn[0], refn[1], refn[2]);
1923 ref = make_internal_ref(hp);
1924 hp += ERTS_REF_THING_SIZE;
1925
1926 if (!info)
1927 res = am_ok;
1928 else if (time_left < 0)
1929 res = am_false;
1930 else if (time_left <= (Sint64) MAX_SMALL)
1931 res = make_small((Sint) time_left);
1932 else
1933 res = erts_sint64_to_big(time_left, &hp);
1934
1935 msg = TUPLE2(hp, ref, res);
1936
1937 erts_queue_message(proc, locks, mp, msg, am_clock_service);
1938
1939 locks &= ~initial_locks;
1940 if (locks)
1941 erts_proc_unlock(proc, locks);
1942
1943 return am_ok;
1944 }
1945
1946 static ERTS_INLINE Eterm
access_sched_local_btm(Process * c_p,Eterm pid,Eterm tref,Uint32 * trefn,Uint32 * rrefn,int async,int cancel,int return_res,int info)1947 access_sched_local_btm(Process *c_p, Eterm pid,
1948 Eterm tref, Uint32 *trefn,
1949 Uint32 *rrefn,
1950 int async, int cancel,
1951 int return_res,
1952 int info)
1953 {
1954 ErtsSchedulerData *esdp;
1955 ErtsHLTimerService *srv;
1956 ErtsBifTimer *tmr;
1957 Sint64 time_left;
1958 Process *proc;
1959 ErtsProcLocks proc_locks;
1960
1961 time_left = -1;
1962
1963 if (!c_p)
1964 esdp = erts_get_scheduler_data();
1965 else {
1966 esdp = erts_proc_sched_data(c_p);
1967 ERTS_HLT_ASSERT(esdp == erts_get_scheduler_data());
1968 }
1969
1970 ERTS_HLT_ASSERT(erts_get_ref_numbers_thr_id(trefn)
1971 == (Uint32) esdp->no);
1972
1973 srv = esdp->timer_service;
1974
1975 tmr = btm_rbt_lookup(srv->btm_tree, trefn);
1976
1977 time_left = access_btm(tmr, (Uint32) esdp->no, esdp, cancel);
1978
1979 if (async && !info)
1980 return am_ok;
1981
1982 if (c_p) {
1983 proc = c_p;
1984 proc_locks = ERTS_PROC_LOCK_MAIN;
1985 }
1986 else {
1987 proc = erts_proc_lookup(pid);
1988 proc_locks = 0;
1989 }
1990
1991 if (!async) {
1992 if (c_p) {
1993 if (!info)
1994 return am_ok;
1995 return return_info(c_p, time_left);
1996 }
1997
1998 if (proc)
1999 return send_sync_info(proc, proc_locks,
2000 rrefn, info, cancel, time_left);
2001 }
2002 else if (proc) {
2003 Eterm ref;
2004 Eterm heap[ERTS_REF_THING_SIZE];
2005 if (is_value(tref))
2006 ref = tref;
2007 else {
2008 write_ref_thing(&heap[0], trefn[0], trefn[1], trefn[2]);
2009 ref = make_internal_ref(&heap[0]);
2010 }
2011 return send_async_info(proc, proc_locks,
2012 ref, cancel, time_left);
2013 }
2014
2015 return am_ok;
2016 }
2017
2018 #define ERTS_BTM_REQ_FLG_ASYNC (((Uint32) 1) << 0)
2019 #define ERTS_BTM_REQ_FLG_CANCEL (((Uint32) 1) << 1)
2020 #define ERTS_BTM_REQ_FLG_INFO (((Uint32) 1) << 2)
2021
2022 typedef struct {
2023 Eterm pid;
2024 Uint32 trefn[ERTS_REF_NUMBERS];
2025 Uint32 rrefn[ERTS_REF_NUMBERS];
2026 Uint32 flags;
2027 } ErtsBifTimerRequest;
2028
2029 static void
bif_timer_access_request(void * vreq)2030 bif_timer_access_request(void *vreq)
2031 {
2032 ErtsBifTimerRequest *req = (ErtsBifTimerRequest *) vreq;
2033 int async = (int) (req->flags & ERTS_BTM_REQ_FLG_ASYNC);
2034 int cancel = (int) (req->flags & ERTS_BTM_REQ_FLG_CANCEL);
2035 int info = (int) (req->flags & ERTS_BTM_REQ_FLG_INFO);
2036 (void) access_sched_local_btm(NULL, req->pid, THE_NON_VALUE,
2037 req->trefn, req->rrefn, async,
2038 cancel, 0, info);
2039 erts_free(ERTS_ALC_T_TIMER_REQUEST, vreq);
2040 }
2041
2042 static int
try_access_sched_remote_btm(ErtsSchedulerData * esdp,Process * c_p,Uint32 sid,Eterm tref,Uint32 * trefn,int async,int cancel,int info,Eterm * resp)2043 try_access_sched_remote_btm(ErtsSchedulerData *esdp,
2044 Process *c_p, Uint32 sid,
2045 Eterm tref, Uint32 *trefn,
2046 int async, int cancel,
2047 int info, Eterm *resp)
2048 {
2049 ErtsBifTimer *tmr;
2050 Sint64 time_left;
2051
2052 ERTS_HLT_ASSERT(c_p);
2053
2054 /*
2055 * Check if the timer is aimed at current
2056 * process...
2057 */
2058 erts_proc_lock(c_p, ERTS_PROC_LOCK_BTM);
2059 tmr = proc_btm_rbt_lookup(c_p->bif_timers, trefn);
2060 erts_proc_unlock(c_p, ERTS_PROC_LOCK_BTM);
2061 if (!tmr)
2062 return 0;
2063
2064 time_left = access_btm(tmr, sid, esdp, cancel);
2065
2066 if (!info)
2067 *resp = am_ok;
2068 else if (!async)
2069 *resp = return_info(c_p, time_left);
2070 else
2071 *resp = send_async_info(c_p, ERTS_PROC_LOCK_MAIN,
2072 tref, cancel, time_left);
2073
2074 return 1;
2075 }
2076
2077 static Eterm
no_timer_result(Process * c_p,Eterm tref,int cancel,int async,int info)2078 no_timer_result(Process *c_p, Eterm tref, int cancel, int async, int info)
2079 {
2080 ErtsMessage *mp;
2081 Uint hsz;
2082 Eterm *hp, msg, ref, tag;
2083 ErlOffHeap *ohp;
2084 ErtsProcLocks locks;
2085
2086 if (!async)
2087 return am_false;
2088 if (!info)
2089 return am_ok;
2090
2091 hsz = 4;
2092 hsz += NC_HEAP_SIZE(tref);
2093 locks = ERTS_PROC_LOCK_MAIN;
2094 mp = erts_alloc_message_heap(c_p, &locks, hsz, &hp, &ohp);
2095 ref = STORE_NC(&hp, ohp, tref);
2096 tag = cancel ? am_cancel_timer : am_read_timer;
2097 msg = TUPLE3(hp, tag, ref, am_false);
2098 erts_queue_message(c_p, locks, mp, msg, am_clock_service);
2099 locks &= ~ERTS_PROC_LOCK_MAIN;
2100 if (locks)
2101 erts_proc_unlock(c_p, locks);
2102 return am_ok;
2103 }
2104
2105 static BIF_RETTYPE
access_bif_timer(Process * c_p,Eterm tref,int cancel,int async,int info)2106 access_bif_timer(Process *c_p, Eterm tref, int cancel, int async, int info)
2107 {
2108 BIF_RETTYPE ret;
2109 ErtsSchedulerData *esdp;
2110 Uint32 sid;
2111 Uint32 *trefn;
2112 Eterm res;
2113
2114 if (is_not_internal_ref(tref)) {
2115 if (is_not_ref(tref))
2116 goto badarg;
2117 else
2118 goto no_timer;
2119 }
2120
2121 esdp = erts_proc_sched_data(c_p);
2122
2123 trefn = internal_ref_numbers(tref);
2124 sid = erts_get_ref_numbers_thr_id(trefn);
2125 if (sid < 1 || erts_no_schedulers < sid)
2126 goto no_timer;
2127
2128 if (sid == (Uint32) esdp->no) {
2129 res = access_sched_local_btm(c_p, c_p->common.id,
2130 tref, trefn, NULL,
2131 async, cancel, !async,
2132 info);
2133 ERTS_BIF_PREP_RET(ret, res);
2134 }
2135 else if (try_access_sched_remote_btm(esdp, c_p,
2136 sid, tref, trefn,
2137 async, cancel,
2138 info, &res)) {
2139 ERTS_BIF_PREP_RET(ret, res);
2140 }
2141 else {
2142 /*
2143 * Schedule access for execution on
2144 * remote scheduler...
2145 */
2146 ErtsBifTimerRequest *req = erts_alloc(ERTS_ALC_T_TIMER_REQUEST,
2147 sizeof(ErtsBifTimerRequest));
2148
2149 req->flags = 0;
2150 if (cancel)
2151 req->flags |= ERTS_BTM_REQ_FLG_CANCEL;
2152 if (async)
2153 req->flags |= ERTS_BTM_REQ_FLG_ASYNC;
2154 if (info)
2155 req->flags |= ERTS_BTM_REQ_FLG_INFO;
2156
2157 req->pid = c_p->common.id;
2158
2159 req->trefn[0] = trefn[0];
2160 req->trefn[1] = trefn[1];
2161 req->trefn[2] = trefn[2];
2162
2163 if (async)
2164 ERTS_BIF_PREP_RET(ret, am_ok);
2165 else {
2166 Eterm *hp, rref;
2167 Uint32 *rrefn;
2168
2169 hp = HAlloc(c_p, ERTS_REF_THING_SIZE);
2170 rref = erts_sched_make_ref_in_buffer(esdp, hp);
2171 rrefn = internal_ref_numbers(rref);
2172
2173 req->rrefn[0] = rrefn[0];
2174 req->rrefn[1] = rrefn[1];
2175 req->rrefn[2] = rrefn[2];
2176
2177 /*
2178 * Caller needs to wait for a message containing
2179 * the ref that we just created. No such message
2180 * can exist in callers message queue at this time.
2181 * We therefore move the save pointer of the
2182 * callers message queue to the end of the queue.
2183 *
2184 * NOTE: It is of vital importance that the caller
2185 * immediately do a receive unconditionaly
2186 * waiting for the message with the reference;
2187 * otherwise, next receive will *not* work
2188 * as expected!
2189 */
2190 ERTS_RECV_MARK_SAVE(c_p);
2191 ERTS_RECV_MARK_SET(c_p);
2192
2193 ERTS_BIF_PREP_TRAP1(ret, erts_await_result, c_p, rref);
2194 }
2195
2196 erts_schedule_misc_aux_work(sid,
2197 bif_timer_access_request,
2198 (void *) req);
2199 }
2200
2201 return ret;
2202
2203 badarg:
2204 ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
2205 return ret;
2206
2207 no_timer:
2208 return no_timer_result(c_p, tref, cancel, async, info);
2209 }
2210
2211 static ERTS_INLINE int
bool_arg(Eterm val,int * argp)2212 bool_arg(Eterm val, int *argp)
2213 {
2214 switch (val) {
2215 case am_true: *argp = 1; return 1;
2216 case am_false: *argp = 0; return 1;
2217 default: return 0;
2218 }
2219 }
2220
2221 static ERTS_INLINE int
parse_bif_timer_options(Eterm option_list,int * async,int * info,int * abs)2222 parse_bif_timer_options(Eterm option_list, int *async,
2223 int *info, int *abs)
2224 {
2225 Eterm list = option_list;
2226
2227 if (async)
2228 *async = 0;
2229 if (info)
2230 *info = 1;
2231 if (abs)
2232 *abs = 0;
2233
2234 while (is_list(list)) {
2235 Eterm *consp, *tp, opt;
2236
2237 consp = list_val(list);
2238 opt = CAR(consp);
2239 if (is_not_tuple(opt))
2240 return 0;
2241
2242 tp = tuple_val(opt);
2243 if (arityval(tp[0]) != 2)
2244 return 0;
2245
2246 switch (tp[1]) {
2247 case am_async:
2248 if (!async || !bool_arg(tp[2], async))
2249 return 0;
2250 break;
2251 case am_info:
2252 if (!info || !bool_arg(tp[2], info))
2253 return 0;
2254 break;
2255 case am_abs:
2256 if (!abs || !bool_arg(tp[2], abs))
2257 return 0;
2258 break;
2259 default:
2260 return 0;
2261 }
2262
2263 list = CDR(consp);
2264 }
2265
2266 if (is_not_nil(list))
2267 return 0;
2268 return 1;
2269 }
2270
2271 static int
exit_cancel_bif_timer(ErtsBifTimer * tmr,void * vesdp,Sint reds)2272 exit_cancel_bif_timer(ErtsBifTimer *tmr, void *vesdp, Sint reds)
2273 {
2274 #define ERTS_BTM_CANCEL_REDS 80
2275 ErtsSchedulerData *esdp = (ErtsSchedulerData *) vesdp;
2276 Uint32 sid, roflgs;
2277 erts_aint_t state;
2278 int is_hlt;
2279
2280 state = erts_atomic32_cmpxchg_acqb(&tmr->btm.state,
2281 ERTS_TMR_STATE_CANCELED,
2282 ERTS_TMR_STATE_ACTIVE);
2283
2284 roflgs = tmr->type.head.roflgs;
2285 sid = roflgs & ERTS_TMR_ROFLG_SID_MASK;
2286 is_hlt = !!(roflgs & ERTS_TMR_ROFLG_HLT);
2287
2288 ERTS_HLT_ASSERT(sid == erts_get_ref_numbers_thr_id(ERTS_BTM_HLT2REFN(tmr)));
2289 ERTS_HLT_ASSERT(tmr->btm.proc_tree.parent
2290 != ERTS_HLT_PFIELD_NOT_IN_TABLE);
2291 tmr->btm.proc_tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
2292
2293 if (state == ERTS_TMR_STATE_ACTIVE) {
2294 if (tmr->btm.bp)
2295 free_message_buffer(tmr->btm.bp);
2296
2297 if (sid != (Uint32) esdp->no) {
2298 queue_canceled_timer(esdp, sid, (ErtsTimer *) tmr);
2299 return ERTS_BTM_CANCEL_REDS;
2300 }
2301
2302 if (tmr->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
2303 btm_rbt_delete(&esdp->timer_service->btm_tree, tmr);
2304 tmr->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
2305 }
2306 if (is_hlt)
2307 hlt_delete_timer(esdp, &tmr->type.hlt);
2308 else
2309 cancel_tw_timer(esdp, &tmr->type.twt);
2310 }
2311 if (is_hlt)
2312 hl_timer_dec_refc(&tmr->type.hlt, roflgs);
2313 else
2314 tw_timer_dec_refc(&tmr->type.twt);
2315 return ERTS_BTM_CANCEL_REDS;
2316 }
2317
2318 typedef struct {
2319 ErtsBifTimers *bif_timers;
2320 union {
2321 proc_btm_rbt_yield_state_t proc_btm_yield_state;
2322 } u;
2323 } ErtsBifTimerYieldState;
2324
erts_cancel_bif_timers(Process * p,ErtsBifTimers ** btm,void ** vyspp,int reds)2325 int erts_cancel_bif_timers(Process *p, ErtsBifTimers **btm, void **vyspp, int reds)
2326 {
2327 ErtsSchedulerData *esdp = erts_proc_sched_data(p);
2328 ErtsBifTimerYieldState ys = {*btm, {ERTS_RBT_YIELD_STAT_INITER}};
2329 ErtsBifTimerYieldState *ysp;
2330 int res;
2331
2332 ysp = (ErtsBifTimerYieldState *) *vyspp;
2333 if (!ysp)
2334 ysp = &ys;
2335
2336 res = proc_btm_rbt_foreach_destroy_yielding(&ysp->bif_timers,
2337 exit_cancel_bif_timer,
2338 (void *) esdp,
2339 &ysp->u.proc_btm_yield_state,
2340 reds);
2341
2342 if (res > 0) {
2343 if (ysp != &ys)
2344 erts_free(ERTS_ALC_T_BTM_YIELD_STATE, ysp);
2345 *vyspp = NULL;
2346 }
2347 else {
2348
2349 if (ysp == &ys) {
2350 ysp = erts_alloc(ERTS_ALC_T_BTM_YIELD_STATE,
2351 sizeof(ErtsBifTimerYieldState));
2352 sys_memcpy((void *) ysp, (void *) &ys,
2353 sizeof(ErtsBifTimerYieldState));
2354 }
2355
2356 *vyspp = (void *) ysp;
2357 }
2358
2359 return res;
2360
2361 }
2362
2363 static ERTS_INLINE int
parse_timeout_pos(ErtsSchedulerData * esdp,Eterm arg,ErtsMonotonicTime * conv_arg,int abs,ErtsMonotonicTime * tposp,int * stimep,ErtsMonotonicTime * msp)2364 parse_timeout_pos(ErtsSchedulerData *esdp, Eterm arg,
2365 ErtsMonotonicTime *conv_arg, int abs,
2366 ErtsMonotonicTime *tposp, int *stimep,
2367 ErtsMonotonicTime *msp)
2368 {
2369 ErtsMonotonicTime t, now;
2370
2371 if (!term_to_Sint64(arg, &t)) {
2372 ERTS_HLT_ASSERT(!is_small(arg));
2373 if (!is_big(arg))
2374 return -1;
2375
2376 if (abs || !big_sign(arg))
2377 return 1;
2378
2379 return -1;
2380 }
2381
2382 if (conv_arg)
2383 *conv_arg = t;
2384
2385 now = erts_get_monotonic_time(esdp);
2386
2387 if (abs) {
2388 t += -1*ERTS_MONOTONIC_OFFSET_MSEC; /* external to internal */
2389 if (t < ERTS_MONOTONIC_TO_MSEC(ERTS_MONOTONIC_BEGIN))
2390 return 1;
2391 if (t > ERTS_MONOTONIC_TO_MSEC(ERTS_MONOTONIC_END))
2392 return 1;
2393 if (msp)
2394 *msp = t - ERTS_MONOTONIC_TO_MSEC(now);
2395
2396 *stimep = (t - ERTS_MONOTONIC_TO_MSEC(esdp->last_monotonic_time)
2397 < ERTS_BIF_TIMER_SHORT_TIME);
2398 *tposp = ERTS_MSEC_TO_CLKTCKS(t);
2399 }
2400 else {
2401 ErtsMonotonicTime ticks;
2402
2403 if (t < 0)
2404 return -1;
2405
2406 if (msp)
2407 *msp = t;
2408
2409 ticks = ERTS_MSEC_TO_CLKTCKS(t);
2410
2411 if (ERTS_CLKTCK_RESOLUTION > 1000 && ticks < 0)
2412 return 1;
2413
2414 ERTS_HLT_ASSERT(ticks >= 0);
2415
2416 ticks += ERTS_MONOTONIC_TO_CLKTCKS(now-1);
2417 ticks += 1;
2418
2419 if (ticks < ERTS_MONOTONIC_TO_CLKTCKS(ERTS_MONOTONIC_BEGIN))
2420 return 1;
2421 if (ticks > ERTS_MONOTONIC_TO_CLKTCKS(ERTS_MONOTONIC_END))
2422 return 1;
2423
2424 *stimep = (t < ERTS_BIF_TIMER_SHORT_TIME);
2425 *tposp = ticks;
2426 }
2427
2428 return 0;
2429 }
2430
2431 /*
2432 *
2433 * The BIF timer BIFs...
2434 */
2435
send_after_3(BIF_ALIST_3)2436 BIF_RETTYPE send_after_3(BIF_ALIST_3)
2437 {
2438 ErtsMonotonicTime timeout_pos, tmo;
2439 int short_time, tres;
2440
2441 tres = parse_timeout_pos(erts_proc_sched_data(BIF_P), BIF_ARG_1,
2442 NULL, 0, &timeout_pos, &short_time, &tmo);
2443 if (tres != 0)
2444 BIF_ERROR(BIF_P, BADARG);
2445
2446 return setup_bif_timer(BIF_P, tmo < ERTS_TIMER_WHEEL_MSEC,
2447 timeout_pos, short_time, BIF_ARG_2,
2448 BIF_ARG_3, 0);
2449 }
2450
send_after_4(BIF_ALIST_4)2451 BIF_RETTYPE send_after_4(BIF_ALIST_4)
2452 {
2453 ErtsMonotonicTime timeout_pos, tmo;
2454 int short_time, abs, tres;
2455
2456 if (!parse_bif_timer_options(BIF_ARG_4, NULL, NULL, &abs))
2457 BIF_ERROR(BIF_P, BADARG);
2458
2459 tres = parse_timeout_pos(erts_proc_sched_data(BIF_P), BIF_ARG_1, NULL,
2460 abs, &timeout_pos, &short_time, &tmo);
2461 if (tres != 0)
2462 BIF_ERROR(BIF_P, BADARG);
2463
2464 return setup_bif_timer(BIF_P, tmo < ERTS_TIMER_WHEEL_MSEC,
2465 timeout_pos, short_time, BIF_ARG_2,
2466 BIF_ARG_3, 0);
2467 }
2468
start_timer_3(BIF_ALIST_3)2469 BIF_RETTYPE start_timer_3(BIF_ALIST_3)
2470 {
2471 ErtsMonotonicTime timeout_pos, tmo;
2472 int short_time, tres;
2473
2474 tres = parse_timeout_pos(erts_proc_sched_data(BIF_P), BIF_ARG_1, NULL,
2475 0, &timeout_pos, &short_time, &tmo);
2476 if (tres != 0)
2477 BIF_ERROR(BIF_P, BADARG);
2478
2479 return setup_bif_timer(BIF_P, tmo < ERTS_TIMER_WHEEL_MSEC,
2480 timeout_pos, short_time, BIF_ARG_2,
2481 BIF_ARG_3, !0);
2482 }
2483
start_timer_4(BIF_ALIST_4)2484 BIF_RETTYPE start_timer_4(BIF_ALIST_4)
2485 {
2486 ErtsMonotonicTime timeout_pos, tmo;
2487 int short_time, abs, tres;
2488
2489 if (!parse_bif_timer_options(BIF_ARG_4, NULL, NULL, &abs))
2490 BIF_ERROR(BIF_P, BADARG);
2491
2492 tres = parse_timeout_pos(erts_proc_sched_data(BIF_P), BIF_ARG_1, NULL,
2493 abs, &timeout_pos, &short_time, &tmo);
2494 if (tres != 0)
2495 BIF_ERROR(BIF_P, BADARG);
2496
2497 return setup_bif_timer(BIF_P, tmo < ERTS_TIMER_WHEEL_MSEC,
2498 timeout_pos, short_time, BIF_ARG_2,
2499 BIF_ARG_3, !0);
2500 }
2501
cancel_timer_1(BIF_ALIST_1)2502 BIF_RETTYPE cancel_timer_1(BIF_ALIST_1)
2503 {
2504 return access_bif_timer(BIF_P, BIF_ARG_1, 1, 0, 1);
2505 }
2506
cancel_timer_2(BIF_ALIST_2)2507 BIF_RETTYPE cancel_timer_2(BIF_ALIST_2)
2508 {
2509 BIF_RETTYPE ret;
2510 int async, info;
2511
2512 if (parse_bif_timer_options(BIF_ARG_2, &async, &info, NULL))
2513 return access_bif_timer(BIF_P, BIF_ARG_1, 1, async, info);
2514
2515 ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
2516 return ret;
2517 }
2518
read_timer_1(BIF_ALIST_1)2519 BIF_RETTYPE read_timer_1(BIF_ALIST_1)
2520 {
2521 return access_bif_timer(BIF_P, BIF_ARG_1, 0, 0, 1);
2522 }
2523
read_timer_2(BIF_ALIST_2)2524 BIF_RETTYPE read_timer_2(BIF_ALIST_2)
2525 {
2526 BIF_RETTYPE ret;
2527 int async;
2528
2529 if (parse_bif_timer_options(BIF_ARG_2, &async, NULL, NULL))
2530 return access_bif_timer(BIF_P, BIF_ARG_1, 0, async, 1);
2531
2532 ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
2533 return ret;
2534 }
2535
2536 static void
start_callback_timer(ErtsSchedulerData * esdp,int twt,ErtsMonotonicTime timeout_pos,void (* callback)(void *),void * arg)2537 start_callback_timer(ErtsSchedulerData *esdp,
2538 int twt,
2539 ErtsMonotonicTime timeout_pos,
2540 void (*callback)(void *),
2541 void *arg)
2542
2543 {
2544 ErtsCreateTimerFunc create_timer = (twt
2545 ? create_tw_timer
2546 : create_hl_timer);
2547 (void) create_timer(esdp, timeout_pos, 0,
2548 ERTS_TMR_CALLBACK, NULL,
2549 NIL, THE_NON_VALUE, NULL,
2550 callback, arg);
2551 }
2552
2553 typedef struct {
2554 int twt;
2555 ErtsMonotonicTime timeout_pos;
2556 void (*callback)(void *);
2557 void *arg;
2558 } ErtsStartCallbackTimerRequest;
2559
2560 static void
scheduled_start_callback_timer(void * vsctr)2561 scheduled_start_callback_timer(void *vsctr)
2562 {
2563 ErtsStartCallbackTimerRequest *sctr
2564 = (ErtsStartCallbackTimerRequest *) vsctr;
2565
2566 start_callback_timer(erts_get_scheduler_data(),
2567 sctr->twt,
2568 sctr->timeout_pos,
2569 sctr->callback,
2570 sctr->arg);
2571
2572 erts_free(ERTS_ALC_T_TIMER_REQUEST, vsctr);
2573 }
2574
2575 void
erts_start_timer_callback(ErtsMonotonicTime tmo,void (* callback)(void *),void * arg)2576 erts_start_timer_callback(ErtsMonotonicTime tmo,
2577 void (*callback)(void *),
2578 void *arg)
2579 {
2580 ErtsSchedulerData *esdp;
2581 ErtsMonotonicTime timeout_pos;
2582 int twt;
2583
2584 esdp = erts_get_scheduler_data();
2585 timeout_pos = get_timeout_pos(erts_get_monotonic_time(esdp),
2586 tmo);
2587 twt = tmo < ERTS_TIMER_WHEEL_MSEC;
2588
2589 if (esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp))
2590 start_callback_timer(esdp,
2591 twt,
2592 timeout_pos,
2593 callback,
2594 arg);
2595 else {
2596 ErtsStartCallbackTimerRequest *sctr;
2597 sctr = erts_alloc(ERTS_ALC_T_TIMER_REQUEST,
2598 sizeof(ErtsStartCallbackTimerRequest));
2599 sctr->twt = twt;
2600 sctr->timeout_pos = timeout_pos;
2601 sctr->callback = callback;
2602 sctr->arg = arg;
2603 erts_schedule_misc_aux_work(1,
2604 scheduled_start_callback_timer,
2605 (void *) sctr);
2606 }
2607 }
2608
2609 /*
2610 * Process and Port timer functionality.
2611 *
2612 * NOTE! These are only allowed to be called by a
2613 * scheduler thread that currently is
2614 * executing the process or port.
2615 */
2616
2617 static ERTS_INLINE void
set_proc_timer_common(Process * c_p,ErtsSchedulerData * esdp,Sint64 tmo,ErtsMonotonicTime timeout_pos,int short_time)2618 set_proc_timer_common(Process *c_p, ErtsSchedulerData *esdp, Sint64 tmo,
2619 ErtsMonotonicTime timeout_pos, int short_time)
2620 {
2621 void *tmr;
2622 check_canceled_queue(esdp, esdp->timer_service);
2623
2624 if (tmo == 0)
2625 c_p->flags |= F_TIMO;
2626 else {
2627 ErtsCreateTimerFunc create_timer;
2628
2629 c_p->flags |= F_INSLPQUEUE;
2630 c_p->flags &= ~F_TIMO;
2631
2632 create_timer = (tmo < ERTS_TIMER_WHEEL_MSEC
2633 ? create_tw_timer
2634 : create_hl_timer);
2635 tmr = (void *) create_timer(esdp, timeout_pos, short_time,
2636 ERTS_TMR_PROC, (void *) c_p,
2637 c_p->common.id, THE_NON_VALUE,
2638 NULL, NULL, NULL);
2639 erts_atomic_set_relb(&c_p->common.timer, (erts_aint_t) tmr);
2640 }
2641 }
2642
2643 int
erts_set_proc_timer_term(Process * c_p,Eterm etmo)2644 erts_set_proc_timer_term(Process *c_p, Eterm etmo)
2645 {
2646 ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
2647 ErtsMonotonicTime tmo, timeout_pos;
2648 int short_time, tres;
2649
2650 ERTS_HLT_ASSERT(erts_atomic_read_nob(&c_p->common.timer)
2651 == ERTS_PTMR_NONE);
2652
2653 tres = parse_timeout_pos(esdp, etmo, &tmo, 0,
2654 &timeout_pos, &short_time, NULL);
2655 if (tres != 0)
2656 return tres;
2657
2658 if ((tmo >> 32) != 0)
2659 return 1;
2660
2661 set_proc_timer_common(c_p, esdp, tmo, timeout_pos, short_time);
2662 return 0;
2663 }
2664
2665 void
erts_set_proc_timer_uword(Process * c_p,UWord tmo)2666 erts_set_proc_timer_uword(Process *c_p, UWord tmo)
2667 {
2668 ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
2669
2670 ERTS_HLT_ASSERT(erts_atomic_read_nob(&c_p->common.timer)
2671 == ERTS_PTMR_NONE);
2672
2673 #ifndef ARCH_32
2674 ERTS_HLT_ASSERT((tmo >> 32) == (UWord) 0);
2675 #endif
2676
2677 if (tmo == 0)
2678 c_p->flags |= F_TIMO;
2679 else {
2680 ErtsMonotonicTime timeout_pos;
2681 timeout_pos = get_timeout_pos(erts_get_monotonic_time(esdp),
2682 (ErtsMonotonicTime) tmo);
2683 set_proc_timer_common(c_p, esdp, (ErtsMonotonicTime) tmo,
2684 timeout_pos,
2685 tmo < ERTS_BIF_TIMER_SHORT_TIME);
2686 }
2687 }
2688
2689 void
erts_cancel_proc_timer(Process * c_p)2690 erts_cancel_proc_timer(Process *c_p)
2691 {
2692 erts_aint_t tval;
2693 tval = erts_atomic_xchg_acqb(&c_p->common.timer,
2694 ERTS_PTMR_NONE);
2695 c_p->flags &= ~(F_INSLPQUEUE|F_TIMO);
2696 if (tval == ERTS_PTMR_NONE)
2697 return;
2698 if (tval == ERTS_PTMR_TIMEDOUT) {
2699 erts_atomic_set_nob(&c_p->common.timer, ERTS_PTMR_NONE);
2700 return;
2701 }
2702 continue_cancel_ptimer(erts_proc_sched_data(c_p),
2703 (ErtsTimer *) tval);
2704 }
2705
2706 void
erts_set_port_timer(Port * c_prt,Sint64 tmo)2707 erts_set_port_timer(Port *c_prt, Sint64 tmo)
2708 {
2709 void *tmr;
2710 ErtsSchedulerData *esdp = erts_get_scheduler_data();
2711 ErtsMonotonicTime timeout_pos;
2712 ErtsCreateTimerFunc create_timer;
2713
2714 if (erts_atomic_read_nob(&c_prt->common.timer) != ERTS_PTMR_NONE)
2715 erts_cancel_port_timer(c_prt);
2716
2717 check_canceled_queue(esdp, esdp->timer_service);
2718
2719 if (tmo == 0) {
2720 erts_atomic_set_relb(&c_prt->common.timer, ERTS_PTMR_TIMEDOUT);
2721 erts_port_task_schedule(c_prt->common.id,
2722 &c_prt->timeout_task,
2723 ERTS_PORT_TASK_TIMEOUT);
2724 } else {
2725
2726 timeout_pos = get_timeout_pos(erts_get_monotonic_time(esdp), tmo);
2727
2728 create_timer = (tmo < ERTS_TIMER_WHEEL_MSEC
2729 ? create_tw_timer
2730 : create_hl_timer);
2731 tmr = (void *) create_timer(esdp, timeout_pos, 0, ERTS_TMR_PORT,
2732 (void *) c_prt, c_prt->common.id,
2733 THE_NON_VALUE, NULL, NULL, NULL);
2734 erts_atomic_set_relb(&c_prt->common.timer, (erts_aint_t) tmr);
2735 }
2736 }
2737
2738 void
erts_cancel_port_timer(Port * c_prt)2739 erts_cancel_port_timer(Port *c_prt)
2740 {
2741 erts_aint_t tval;
2742 tval = erts_atomic_xchg_acqb(&c_prt->common.timer,
2743 ERTS_PTMR_NONE);
2744 if (tval == ERTS_PTMR_NONE)
2745 return;
2746 if (tval == ERTS_PTMR_TIMEDOUT) {
2747 while (!erts_port_task_is_scheduled(&c_prt->timeout_task))
2748 erts_thr_yield();
2749 erts_port_task_abort(&c_prt->timeout_task);
2750 erts_atomic_set_nob(&c_prt->common.timer, ERTS_PTMR_NONE);
2751 return;
2752 }
2753 continue_cancel_ptimer(erts_get_scheduler_data(),
2754 (ErtsTimer *) tval);
2755 }
2756
2757 Sint64
erts_read_port_timer(Port * c_prt)2758 erts_read_port_timer(Port *c_prt)
2759 {
2760 ErtsTimer *tmr;
2761 erts_aint_t itmr;
2762 ErtsMonotonicTime timeout_pos;
2763
2764 itmr = erts_atomic_read_acqb(&c_prt->common.timer);
2765 if (itmr == ERTS_PTMR_NONE)
2766 return (Sint64) -1;
2767 if (itmr == ERTS_PTMR_TIMEDOUT)
2768 return (Sint64) 0;
2769 tmr = (ErtsTimer *) itmr;
2770 if (tmr->head.roflgs & ERTS_TMR_ROFLG_HLT)
2771 timeout_pos = tmr->hlt.timeout;
2772 else
2773 timeout_pos = erts_tweel_read_timeout(&tmr->twt.u.tw_tmr);
2774 return get_time_left(NULL, timeout_pos);
2775 }
2776
2777 /*
2778 * Debug stuff...
2779 */
2780
2781 typedef struct {
2782 fmtfn_t to;
2783 void *to_arg;
2784 ErtsMonotonicTime now;
2785 } ErtsBTMPrint;
2786
2787 static void
btm_print(ErtsBifTimer * tmr,void * vbtmp,ErtsMonotonicTime tpos,int is_hlt)2788 btm_print(ErtsBifTimer *tmr, void *vbtmp, ErtsMonotonicTime tpos, int is_hlt)
2789 {
2790 ErtsBTMPrint *btmp = (ErtsBTMPrint *) vbtmp;
2791 ErtsMonotonicTime left;
2792 Eterm receiver;
2793
2794 if (is_hlt) {
2795 ERTS_HLT_ASSERT(tmr->type.head.roflgs & ERTS_TMR_ROFLG_HLT);
2796 if (tmr->type.hlt.timeout <= btmp->now)
2797 left = 0;
2798 else
2799 left = ERTS_CLKTCKS_TO_MSEC(tmr->type.hlt.timeout - btmp->now);
2800 }
2801 else {
2802 ERTS_HLT_ASSERT(!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_HLT));
2803 if (tpos <= btmp->now)
2804 left = 0;
2805 else
2806 left = ERTS_CLKTCKS_TO_MSEC(tpos - btmp->now);
2807 }
2808
2809 receiver = ((tmr->type.head.roflgs & ERTS_TMR_ROFLG_REG_NAME)
2810 ? tmr->type.head.receiver.name
2811 : tmr->type.head.receiver.proc->common.id);
2812
2813 erts_print(btmp->to, btmp->to_arg,
2814 "=timer:%T\n"
2815 "Message: %T\n"
2816 "Time left: %b64d\n",
2817 receiver,
2818 tmr->btm.message,
2819 (Sint64) left);
2820 }
2821
2822 static int
btm_tree_print(ErtsBifTimer * tmr,void * vbtmp,Sint reds)2823 btm_tree_print(ErtsBifTimer *tmr, void *vbtmp, Sint reds)
2824 {
2825 int is_hlt = !!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_HLT);
2826 ErtsMonotonicTime tpos;
2827
2828 if (erts_atomic32_read_nob(&tmr->btm.state) != ERTS_TMR_STATE_ACTIVE)
2829 return 1;
2830
2831 if (is_hlt)
2832 tpos = 0;
2833 else
2834 tpos = erts_tweel_read_timeout(&tmr->type.twt.u.tw_tmr);
2835 btm_print(tmr, vbtmp, tpos, is_hlt);
2836 return 1;
2837 }
2838
2839 void
erts_print_bif_timer_info(fmtfn_t to,void * to_arg)2840 erts_print_bif_timer_info(fmtfn_t to, void *to_arg)
2841 {
2842 ErtsBTMPrint btmp;
2843 int six;
2844
2845 if (!ERTS_IS_CRASH_DUMPING)
2846 ERTS_INTERNAL_ERROR("Not crash dumping");
2847
2848 btmp.to = to;
2849 btmp.to_arg = to_arg;
2850 btmp.now = erts_get_monotonic_time(NULL);
2851 btmp.now = ERTS_MONOTONIC_TO_CLKTCKS(btmp.now);
2852
2853 for (six = 0; six < erts_no_schedulers; six++) {
2854 ErtsHLTimerService *srv =
2855 erts_aligned_scheduler_data[six].esd.timer_service;
2856 btm_rbt_foreach(srv->btm_tree, btm_tree_print, (void *) &btmp);
2857 }
2858 }
2859
2860 typedef struct {
2861 void (*func)(Eterm,
2862 Eterm,
2863 ErlHeapFragment *,
2864 void *);
2865 void *arg;
2866 } ErtsBTMForeachDebug;
2867
2868 static int
debug_btm_foreach(ErtsBifTimer * tmr,void * vbtmfd,Sint reds)2869 debug_btm_foreach(ErtsBifTimer *tmr, void *vbtmfd, Sint reds)
2870 {
2871 if (erts_atomic32_read_nob(&tmr->btm.state) == ERTS_TMR_STATE_ACTIVE) {
2872 ErtsBTMForeachDebug *btmfd = (ErtsBTMForeachDebug *) vbtmfd;
2873 Eterm id = ((tmr->type.head.roflgs & ERTS_TMR_ROFLG_REG_NAME)
2874 ? tmr->type.head.receiver.name
2875 : tmr->type.head.receiver.proc->common.id);
2876 (*btmfd->func)(id, tmr->btm.message, tmr->btm.bp, btmfd->arg);
2877 }
2878 return 1;
2879 }
2880
2881 void
erts_debug_bif_timer_foreach(void (* func)(Eterm,Eterm,ErlHeapFragment *,void *),void * arg)2882 erts_debug_bif_timer_foreach(void (*func)(Eterm,
2883 Eterm,
2884 ErlHeapFragment *,
2885 void *),
2886 void *arg)
2887 {
2888 ErtsBTMForeachDebug btmfd;
2889 int six;
2890
2891 btmfd.func = func;
2892 btmfd.arg = arg;
2893
2894 if (!erts_thr_progress_is_blocking())
2895 ERTS_INTERNAL_ERROR("Not blocking thread progress");
2896
2897 for (six = 0; six < erts_no_schedulers; six++) {
2898 ErtsHLTimerService *srv =
2899 erts_aligned_scheduler_data[six].esd.timer_service;
2900 btm_rbt_foreach(srv->btm_tree,
2901 debug_btm_foreach,
2902 (void *) &btmfd);
2903 }
2904 }
2905
2906 typedef struct {
2907 void (*tclbk)(void *);
2908 void (*func)(void *,
2909 ErtsMonotonicTime,
2910 void *);
2911 void *arg;
2912 } ErtsDebugForeachCallbackTimer;
2913
2914 static void
debug_callback_timer_foreach_list(ErtsHLTimer * tmr,void * vdfct)2915 debug_callback_timer_foreach_list(ErtsHLTimer *tmr, void *vdfct)
2916 {
2917 ErtsDebugForeachCallbackTimer *dfct
2918 = (ErtsDebugForeachCallbackTimer *) vdfct;
2919
2920 if ((tmr->head.roflgs & ERTS_TMR_ROFLG_CALLBACK)
2921 && (tmr->head.receiver.callback == dfct->tclbk))
2922 (*dfct->func)(dfct->arg,
2923 tmr->timeout,
2924 tmr->head.u.arg);
2925 }
2926
2927 static int
debug_callback_timer_foreach(ErtsHLTimer * tmr,void * vdfct,Sint reds)2928 debug_callback_timer_foreach(ErtsHLTimer *tmr, void *vdfct, Sint reds)
2929 {
2930 ErtsDebugForeachCallbackTimer *dfct
2931 = (ErtsDebugForeachCallbackTimer *) vdfct;
2932
2933 if (tmr->time.tree.same_time)
2934 same_time_list_foreach(tmr->time.tree.same_time,
2935 debug_callback_timer_foreach_list,
2936 vdfct);
2937
2938 if ((tmr->head.roflgs & ERTS_TMR_ROFLG_CALLBACK)
2939 && (tmr->head.receiver.callback == dfct->tclbk))
2940 (*dfct->func)(dfct->arg,
2941 tmr->timeout,
2942 tmr->head.u.arg);
2943 return 1;
2944 }
2945
2946 static void
debug_tw_callback_timer(void * vdfct,ErtsMonotonicTime timeout_pos,void * vtwtp)2947 debug_tw_callback_timer(void *vdfct,
2948 ErtsMonotonicTime timeout_pos,
2949 void *vtwtp)
2950 {
2951 ErtsTWTimer *twtp = (ErtsTWTimer *) vtwtp;
2952 ErtsDebugForeachCallbackTimer *dfct
2953 = (ErtsDebugForeachCallbackTimer *) vdfct;
2954
2955 if (twtp->head.receiver.callback == dfct->tclbk)
2956 (*dfct->func)(dfct->arg,
2957 timeout_pos,
2958 twtp->head.u.arg);
2959 }
2960
2961 void
erts_debug_callback_timer_foreach(void (* tclbk)(void *),void (* func)(void *,ErtsMonotonicTime,void *),void * arg)2962 erts_debug_callback_timer_foreach(void (*tclbk)(void *),
2963 void (*func)(void *,
2964 ErtsMonotonicTime,
2965 void *),
2966 void *arg)
2967 {
2968 int six;
2969 ErtsDebugForeachCallbackTimer dfct;
2970
2971 dfct.tclbk = tclbk;
2972 dfct.func = func;
2973 dfct.arg = arg;
2974
2975 if (!erts_thr_progress_is_blocking())
2976 ERTS_INTERNAL_ERROR("Not blocking thread progress");
2977
2978 for (six = 0; six < erts_no_schedulers; six++) {
2979 ErtsHLTimerService *srv =
2980 erts_aligned_scheduler_data[six].esd.timer_service;
2981 ErtsTimerWheel *twheel =
2982 erts_aligned_scheduler_data[six].esd.timer_wheel;
2983
2984 erts_twheel_debug_foreach(twheel,
2985 tw_callback_timeout,
2986 debug_tw_callback_timer,
2987 (void *) &dfct);
2988
2989 if (srv->yield.root)
2990 debug_callback_timer_foreach(srv->yield.root,
2991 (void *) &dfct,
2992 -1);
2993
2994 time_rbt_foreach(srv->time_tree,
2995 debug_callback_timer_foreach,
2996 (void *) &dfct);
2997 }
2998 }
2999
3000 #ifdef ERTS_HLT_HARD_DEBUG
3001
3002 typedef struct {
3003 ErtsHLTimerService *srv;
3004 int found_root;
3005 ErtsHLTimer **rootpp;
3006 } ErtsHdbgHLT;
3007
3008 static void
st_hdbg_func(ErtsHLTimer * tmr,void * vhdbg)3009 st_hdbg_func(ErtsHLTimer *tmr, void *vhdbg)
3010 {
3011 ErtsHdbgHLT *hdbg = (ErtsHdbgHLT *) vhdbg;
3012 ErtsHLTimer **rootpp;
3013 ERTS_HLT_ASSERT(tmr->time.tree.parent & ERTS_HLT_PFLG_SAME_TIME);
3014 if (tmr->time.tree.parent == ERTS_HLT_PFLG_SAME_TIME) {
3015 ERTS_HLT_ASSERT(tmr != *hdbg->rootpp);
3016 }
3017 else {
3018 rootpp = (ErtsHLTimer **) (tmr->time.tree.parent
3019 & ~ERTS_HLT_PFLG_SAME_TIME);
3020 ERTS_HLT_ASSERT(rootpp == hdbg->rootpp);
3021 ERTS_HLT_ASSERT(tmr == *rootpp);
3022 ERTS_HLT_ASSERT(!hdbg->found_root);
3023 hdbg->found_root = 1;
3024 }
3025 ERTS_HLT_ASSERT(tmr->time.tree.u.l.next->time.tree.u.l.prev == tmr);
3026 ERTS_HLT_ASSERT(tmr->time.tree.u.l.prev->time.tree.u.l.next == tmr);
3027 ERTS_HLT_ASSERT(btm_rbt_lookup(hdbg->srv->btm_tree, ERTS_BTM_HLT2REFN(tmr)) == tmr);
3028 }
3029
3030 static void
tt_hdbg_func(ErtsHLTimer * tmr,void * vhdbg)3031 tt_hdbg_func(ErtsHLTimer *tmr, void *vhdbg)
3032 {
3033 ErtsHdbgHLT *hdbg = (ErtsHdbgHLT *) vhdbg;
3034 ErtsHLTimer *prnt;
3035 ERTS_HLT_ASSERT((tmr->time.tree.parent & ERTS_HLT_PFLG_SAME_TIME) == 0);
3036 prnt = (ErtsHLTimer *) (tmr->time.tree.parent & ~ERTS_HLT_PFLGS_MASK);
3037 if (prnt) {
3038 ERTS_HLT_ASSERT(prnt->time.tree.u.t.left == tmr
3039 || prnt->time.tree.u.t.right == tmr);
3040 }
3041 else {
3042 ERTS_HLT_ASSERT(!hdbg->found_root);
3043 hdbg->found_root = 1;
3044 ERTS_HLT_ASSERT(tmr == *hdbg->rootpp);
3045 }
3046 if (tmr->time.tree.u.t.left) {
3047 prnt = (ErtsHLTimer *) (tmr->time.tree.u.t.left->time.tree.parent
3048 & ~ERTS_HLT_PFLGS_MASK);
3049 ERTS_HLT_ASSERT(tmr == prnt);
3050 }
3051 if (tmr->time.tree.u.t.right) {
3052 prnt = (ErtsHLTimer *) (tmr->time.tree.u.t.right->time.tree.parent
3053 & ~ERTS_HLT_PFLGS_MASK);
3054 ERTS_HLT_ASSERT(tmr == prnt);
3055 }
3056 if (tmr->head.roflgs & ERTS_TMR_ROFLG_BIF_TMR)
3057 ERTS_HLT_ASSERT(btm_rbt_lookup(hdbg->srv->btm_tree, ERTS_BTM_HLT2REFN(tmr)) == tmr);
3058 if (tmr->time.tree.same_time) {
3059 ErtsHdbgHLT st_hdbg;
3060 st_hdbg.srv = hdbg->srv;
3061 st_hdbg.found_root = 0;
3062 st_hdbg.rootpp = &tmr->time.tree.same_time;
3063 same_time_list_foreach(tmr->time.tree.same_time, st_hdbg_func, (void *) &st_hdbg);
3064 ERTS_HLT_ASSERT(st_hdbg.found_root);
3065 }
3066 }
3067
3068 static void
bt_hdbg_func(ErtsHLTimer * tmr,void * vhdbg)3069 bt_hdbg_func(ErtsHLTimer *tmr, void *vhdbg)
3070 {
3071 ErtsHdbgHLT *hdbg = (ErtsHdbgHLT *) vhdbg;
3072 ErtsHLTimer *prnt;
3073 ERTS_HLT_ASSERT((tmr->btm.tree.parent & ERTS_HLT_PFLG_SAME_TIME) == 0);
3074 prnt = (ErtsHLTimer *) (tmr->btm.tree.parent & ~ERTS_HLT_PFLGS_MASK);
3075 if (prnt) {
3076 ERTS_HLT_ASSERT(prnt->btm.tree.left == tmr
3077 || prnt->btm.tree.right == tmr);
3078 }
3079 else {
3080 ERTS_HLT_ASSERT(!hdbg->found_root);
3081 hdbg->found_root = 1;
3082 ERTS_HLT_ASSERT(tmr == *hdbg->rootpp);
3083 }
3084 if (tmr->btm.tree.left) {
3085 prnt = (ErtsHLTimer *) (tmr->btm.tree.left->btm.tree.parent
3086 & ~ERTS_HLT_PFLGS_MASK);
3087 ERTS_HLT_ASSERT(tmr == prnt);
3088 }
3089 if (tmr->btm.tree.right) {
3090 prnt = (ErtsHLTimer *) (tmr->btm.tree.right->btm.tree.parent
3091 & ~ERTS_HLT_PFLGS_MASK);
3092 ERTS_HLT_ASSERT(tmr == prnt);
3093 }
3094 if (tmr->pending_timeout) {
3095 if (tmr->pending_timeout > 0) /* container > 0 */
3096 ERTS_HLT_ASSERT(tmr->time.tree.parent == ERTS_HLT_PFIELD_NOT_IN_TABLE);
3097 else {
3098 ERTS_HLT_ASSERT(tmr->time.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE);
3099 ERTS_HLT_ASSERT(tmr->time.tree.parent & ERTS_HLT_PFLG_SAME_TIME);
3100 }
3101 }
3102 else {
3103 ErtsHLTimer *ttmr = time_rbt_lookup(hdbg->srv->time_tree, tmr->timeout);
3104 ERTS_HLT_ASSERT(ttmr);
3105 if (ttmr != tmr) {
3106 ERTS_HLT_ASSERT(ttmr->time.tree.same_time);
3107 ERTS_HLT_ASSERT(tmr == same_time_list_lookup(ttmr->time.tree.same_time, tmr));
3108 }
3109 }
3110 }
3111
3112 static void
hdbg_chk_srv(ErtsHLTimerService * srv)3113 hdbg_chk_srv(ErtsHLTimerService *srv)
3114 {
3115 if (srv->time_tree) {
3116 ErtsHdbgHLT hdbg;
3117 hdbg.srv = srv;
3118 hdbg.found_root = 0;
3119 hdbg.rootpp = &srv->time_tree;
3120 time_rbt_foreach(srv->time_tree, tt_hdbg_func, (void *) &hdbg);
3121 ERTS_HLT_ASSERT(hdbg.found_root);
3122 }
3123 if (srv->btm_tree) {
3124 ErtsHdbgHLT hdbg;
3125 hdbg.srv = srv;
3126 hdbg.found_root = 0;
3127 hdbg.rootpp = &srv->btm_tree;
3128 btm_rbt_foreach(srv->btm_tree, bt_hdbg_func, (void *) &hdbg);
3129 ERTS_HLT_ASSERT(hdbg.found_root);
3130 }
3131 }
3132
3133 #endif /* ERTS_HLT_HARD_DEBUG */
3134