1 //
2 //  Copyright (C) 2011-2018  Nick Gasson
3 //
4 //  This program is free software: you can redistribute it and/or modify
5 //  it under the terms of the GNU General Public License as published by
6 //  the Free Software Foundation, either version 3 of the License, or
7 //  (at your option) any later version.
8 //
9 //  This program is distributed in the hope that it will be useful,
10 //  but WITHOUT ANY WARRANTY; without even the implied warranty of
11 //  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12 //  GNU General Public License for more details.
13 //
14 //  You should have received a copy of the GNU General Public License
15 //  along with this program.  If not, see <http://www.gnu.org/licenses/>.
16 //
17 
18 #include "rt.h"
19 #include "tree.h"
20 #include "lib.h"
21 #include "util.h"
22 #include "alloc.h"
23 #include "heap.h"
24 #include "common.h"
25 #include "netdb.h"
26 #include "cover.h"
27 #include "hash.h"
28 
29 #include <assert.h>
30 #include <stdint.h>
31 #include <stdarg.h>
32 #include <inttypes.h>
33 #include <stdlib.h>
34 #include <string.h>
35 #include <setjmp.h>
36 #include <math.h>
37 #include <errno.h>
38 #include <signal.h>
39 #include <sys/time.h>
40 #include <float.h>
41 #include <ctype.h>
42 #include <time.h>
43 
44 #ifdef HAVE_ALLOCA_H
45 #include <alloca.h>
46 #endif
47 
48 #ifdef __MINGW32__
49 #define WIN32_LEAN_AND_MEAN
50 #include <windows.h>
51 #undef SEVERITY_ERROR
52 #endif
53 
54 #define TRACE_DELTAQ  1
55 #define TRACE_PENDING 0
56 #define RT_DEBUG      0
57 
58 struct uarray;
59 
60 typedef void (*proc_fn_t)(int32_t reset);
61 typedef uint64_t (*resolution_fn_t)(const struct uarray *u);
62 
63 typedef struct netgroup   netgroup_t;
64 typedef struct driver     driver_t;
65 typedef struct rt_proc    rt_proc_t;
66 typedef struct event      event_t;
67 typedef struct waveform   waveform_t;
68 typedef struct sens_list  sens_list_t;
69 typedef struct value      value_t;
70 typedef struct watch_list watch_list_t;
71 typedef struct res_memo   res_memo_t;
72 typedef struct callback   callback_t;
73 typedef struct image_map  image_map_t;
74 typedef struct rt_loc     rt_loc_t;
75 typedef struct size_list  size_list_t;
76 
77 struct rt_proc {
78    tree_t    source;
79    proc_fn_t proc_fn;
80    uint32_t  wakeup_gen;
81    void     *tmp_stack;
82    uint32_t  tmp_alloc;
83    bool      postponed;
84    bool      pending;
85    uint64_t  usage;
86 };
87 
88 typedef enum {
89    E_TIMEOUT,
90    E_DRIVER,
91    E_PROCESS
92 } event_kind_t;
93 
94 struct event {
95    uint64_t      when;
96    event_kind_t  kind;
97    uint32_t      wakeup_gen;
98    event_t      *delta_chain;
99    rt_proc_t    *proc;
100    netgroup_t   *group;
101    timeout_fn_t  timeout_fn;
102    void         *timeout_user;
103 };
104 
105 struct waveform {
106    uint64_t    when;
107    waveform_t *next;
108    value_t    *values;
109 };
110 
111 struct sens_list {
112    rt_proc_t    *proc;
113    sens_list_t  *next;
114    sens_list_t **reenq;
115    uint32_t      wakeup_gen;
116    netid_t       first;
117    netid_t       last;
118 };
119 
120 struct driver {
121    rt_proc_t  *proc;
122    waveform_t *waveforms;
123 };
124 
125 struct value {
126    value_t *next;
127    union {
128       char     data[0];
129       uint64_t qwords[0];
130    };
131 } __attribute__((aligned(8)));
132 
133 struct netgroup {
134    netid_t       first;
135    uint32_t      length;
136    net_flags_t   flags;
137    void         *resolved;
138    void         *last_value;
139    value_t      *forcing;
140    uint16_t      size;
141    uint16_t      n_drivers;
142    driver_t     *drivers;
143    res_memo_t   *resolution;
144    uint64_t      last_event;
145    tree_t        sig_decl;
146    value_t      *free_values;
147    sens_list_t  *pending;
148    watch_list_t *watching;
149 };
150 
151 struct uarray {
152    void    *ptr;
153    struct {
154       int32_t left;
155       int32_t right;
156       int8_t  dir;
157    } dims[1];
158 };
159 
160 struct run_queue {
161    event_t **queue;
162    size_t    wr, rd;
163    size_t    alloc;
164 };
165 
166 struct watch {
167    tree_t         signal;
168    sig_event_fn_t fn;
169    bool           pending;
170    watch_t       *chain_all;
171    watch_t       *chain_pending;
172    netgroup_t   **groups;
173    int            n_groups;
174    void          *user_data;
175    range_kind_t   dir;
176    size_t         length;
177    bool           postponed;
178 };
179 
180 struct watch_list {
181    watch_t      *watch;
182    watch_list_t *next;
183 };
184 
185 struct res_memo {
186    resolution_fn_t fn;
187    res_flags_t     flags;
188    int32_t         ileft;
189    int8_t          tab2[16][16];
190    int8_t          tab1[16];
191 };
192 
193 typedef enum {
194    SIDE_EFFECT_ALLOW,
195    SIDE_EFFECT_DISALLOW,
196    SIDE_EFFECT_OCCURRED
197 } side_effect_t;
198 
199 struct callback {
200    rt_event_fn_t  fn;
201    void          *user;
202    callback_t    *next;
203 };
204 
205 struct image_map {
206    int32_t        kind;
207    int32_t        stride;
208    const char    *elems;
209    const int64_t *values;
210    int32_t        count;
211 };
212 
213 struct rt_loc {
214    int32_t     first_line;
215    int32_t     last_line;
216    int16_t     first_column;
217    int16_t     last_column;
218    const char *file;
219 };
220 
221 struct size_list {
222    uint32_t size;
223    uint32_t count;
224    void    *resolution;
225    uint32_t flags;
226    int32_t  ileft;
227 };
228 
229 static struct rt_proc   *procs = NULL;
230 static struct rt_proc   *active_proc = NULL;
231 static struct run_queue  run_queue;
232 
233 static heap_t        eventq_heap = NULL;
234 static size_t        n_procs = 0;
235 static uint64_t      now = 0;
236 static int           iteration = -1;
237 static bool          trace_on = false;
238 static nvc_rusage_t  ready_rusage;
239 static jmp_buf       fatal_jmp;
240 static bool          aborted = false;
241 static netdb_t      *netdb = NULL;
242 static netgroup_t   *groups = NULL;
243 static sens_list_t  *pending = NULL;
244 static sens_list_t  *resume = NULL;
245 static sens_list_t  *postponed = NULL;
246 static watch_t      *watches = NULL;
247 static watch_t      *callbacks = NULL;
248 static event_t      *delta_proc = NULL;
249 static event_t      *delta_driver = NULL;
250 static void         *global_tmp_stack = NULL;
251 static void         *proc_tmp_stack = NULL;
252 static uint32_t      global_tmp_alloc;
253 static hash_t       *res_memo_hash = NULL;
254 static side_effect_t init_side_effect = SIDE_EFFECT_ALLOW;
255 static bool          force_stop;
256 static bool          can_create_delta;
257 static callback_t   *global_cbs[RT_LAST_EVENT];
258 static rt_severity_t exit_severity = SEVERITY_ERROR;
259 static hash_t       *decl_hash = NULL;
260 static bool          profiling = false;
261 
262 static rt_alloc_stack_t event_stack = NULL;
263 static rt_alloc_stack_t waveform_stack = NULL;
264 static rt_alloc_stack_t sens_list_stack = NULL;
265 static rt_alloc_stack_t watch_stack = NULL;
266 static rt_alloc_stack_t callback_stack = NULL;
267 
268 static netgroup_t **active_groups;
269 static unsigned     n_active_groups = 0;
270 static unsigned     n_active_alloc = 0;
271 
272 static void deltaq_insert_proc(uint64_t delta, rt_proc_t *wake);
273 static void deltaq_insert_driver(uint64_t delta, netgroup_t *group,
274                                  rt_proc_t *driver);
275 static bool rt_sched_driver(netgroup_t *group, uint64_t after,
276                             uint64_t reject, value_t *values);
277 static void rt_sched_event(sens_list_t **list, netid_t first, netid_t last,
278                            rt_proc_t *proc, bool is_static);
279 static void *rt_tmp_alloc(size_t sz);
280 static value_t *rt_alloc_value(netgroup_t *g);
281 static tree_t rt_recall_decl(const char *name);
282 static res_memo_t *rt_memo_resolution_fn(type_t type, resolution_fn_t fn);
283 static void _tracef(const char *fmt, ...);
284 
285 #define GLOBAL_TMP_STACK_SZ (1024 * 1024)
286 #define PROC_TMP_STACK_SZ   (64 * 1024)
287 
288 #if RT_DEBUG
289 #define RT_ASSERT(x) assert((x))
290 #else
291 #define RT_ASSERT(x)
292 #endif
293 
294 #define TRACE(...) do {                                 \
295       if (unlikely(trace_on)) _tracef(__VA_ARGS__);     \
296    } while (0)
297 
298 #define FOR_ALL_SIZES(size, macro) do {                 \
299       switch (size) {                                   \
300       case 1:                                           \
301          macro(uint8_t); break;                         \
302       case 2:                                           \
303          macro(uint16_t); break;                        \
304       case 4:                                           \
305          macro(uint32_t); break;                        \
306       case 8:                                           \
307          macro(uint64_t); break;                        \
308       }                                                 \
309    } while (0)
310 
311 ////////////////////////////////////////////////////////////////////////////////
312 // Utilities
313 
fmt_group(const netgroup_t * g)314 static const char *fmt_group(const netgroup_t *g)
315 {
316    static const size_t BUF_LEN = 512;
317    char *buf = get_fmt_buf(BUF_LEN);
318 
319    const char *eptr = buf + BUF_LEN;
320    char *p = buf;
321 
322    p += checked_sprintf(p, eptr - p, "%s", istr(tree_ident(g->sig_decl)));
323 
324    groupid_t sig_group0 = netdb_lookup(netdb, tree_net(g->sig_decl, 0));
325    netid_t sig_net0 = groups[sig_group0].first;
326    int offset = g->first - sig_net0;
327 
328    const int length = g->length;
329    type_t type = tree_type(g->sig_decl);
330    while (type_is_array(type)) {
331       const int stride = type_width(type_elem(type));
332       const int ndims = array_dimension(type);
333 
334       p += checked_sprintf(p, eptr - p, "[");
335       for (int i = 0; i < ndims; i++) {
336          int stride2 = stride;
337          for (int j = i + 1; j < ndims; j++) {
338             range_t r = range_of(type, j);
339 
340             int64_t low, high;
341             range_bounds(r, &low, &high);
342 
343             stride2 *= (high - low) + 1;
344          }
345 
346          const int index = offset / stride2;
347          p += checked_sprintf(p, eptr - p, "%s%d", (i > 0) ? "," : "", index);
348          if ((length / stride2) > 1)
349             p += checked_sprintf(p, eptr - p, "..%d",
350                                  index + (length / stride2) - 1);
351          offset %= stride2;
352       }
353       p += checked_sprintf(p, eptr - p, "]");
354 
355       type = type_elem(type);
356    }
357 
358    return buf;
359 }
360 
fmt_net(netid_t nid)361 static const char *fmt_net(netid_t nid)
362 {
363    return fmt_group(&(groups[netdb_lookup(netdb, nid)]));
364 }
365 
fmt_values(const void * values,int length)366 static const char *fmt_values(const void *values, int length)
367 {
368    const size_t len = (length * 2) + 1;
369    char *vbuf = get_fmt_buf(len);
370 
371    char *p = vbuf;
372    for (int i = 0; i < length; i++)
373       p += checked_sprintf(p, vbuf + len - p, "%02x",
374                            ((uint8_t *)values)[i]);
375 
376    return vbuf;
377 }
378 
heap_key(uint64_t when,event_kind_t kind)379 static inline uint64_t heap_key(uint64_t when, event_kind_t kind)
380 {
381    // Use the bottom bit of the key to indicate the kind
382    // The highest priority should have the lowest enumeration value
383    return (when << 2) | (kind & 3);
384 }
385 
from_rt_loc(const rt_loc_t * rt,loc_t * loc)386 static void from_rt_loc(const rt_loc_t *rt, loc_t *loc)
387 {
388    // This function can be expensive: only call it when loc_t is required
389    loc->file = ident_new(rt->file);
390    loc->first_line = rt->first_line;
391    loc->last_line = rt->last_line;
392    loc->first_column = rt->first_column;
393    loc->last_column = rt->last_column;
394    loc->linebuf = NULL;
395 }
396 
rt_show_trace(void)397 static void rt_show_trace(void)
398 {
399    jit_trace_t *trace;
400    size_t count;
401    jit_trace(&trace, &count);
402 
403    for (size_t i = 0; i < count; i++)
404       note_at(&(trace[i].loc), "in subprogram %s",
405               istr(tree_ident(trace[i].tree)));
406 
407    free(trace);
408 }
409 
410 ////////////////////////////////////////////////////////////////////////////////
411 // Runtime support functions
412 
413 DLLEXPORT void     *_tmp_stack;
414 DLLEXPORT uint32_t  _tmp_alloc;
415 
416 DLLEXPORT
_sched_process(int64_t delay)417 void _sched_process(int64_t delay)
418 {
419    TRACE("_sched_process delay=%s", fmt_time(delay));
420    deltaq_insert_proc(delay, active_proc);
421 }
422 
423 DLLEXPORT
_sched_waveform_s(void * _nids,uint64_t scalar,int64_t after,int64_t reject)424 void _sched_waveform_s(void *_nids, uint64_t scalar,
425                        int64_t after, int64_t reject)
426 {
427    const int32_t *nids = _nids;
428 
429    TRACE("_sched_waveform_s %s value=%08x after=%s reject=%s",
430          fmt_net(nids[0]), scalar, fmt_time(after), fmt_time(reject));
431 
432    if (unlikely(active_proc->postponed && (after == 0)))
433       fatal("postponed process %s cannot cause a delta cycle",
434             istr(tree_ident(active_proc->source)));
435 
436    const netid_t nid = nids[0];
437    if (likely(nid != NETID_INVALID)) {
438       netgroup_t *g = &(groups[netdb_lookup(netdb, nid)]);
439 
440       value_t *values_copy = rt_alloc_value(g);
441       values_copy->qwords[0] = scalar;
442 
443       if (!rt_sched_driver(g, after, reject, values_copy))
444          deltaq_insert_driver(after, g, active_proc);
445    }
446 }
447 
448 DLLEXPORT
_sched_waveform(void * _nids,void * values,int32_t n,int64_t after,int64_t reject)449 void _sched_waveform(void *_nids, void *values, int32_t n,
450                      int64_t after, int64_t reject)
451 {
452    const int32_t *nids = _nids;
453 
454    TRACE("_sched_waveform %s values=%s n=%d after=%s reject=%s",
455          fmt_net(nids[0]),
456          fmt_values(values, n * groups[netdb_lookup(netdb, nids[0])].size),
457          n, fmt_time(after), fmt_time(reject));
458 
459    if (unlikely(active_proc->postponed && (after == 0)))
460       fatal("postponed process %s cannot cause a delta cycle",
461             istr(tree_ident(active_proc->source)));
462 
463    const uint8_t *vp = values;
464    int offset = 0;
465    while (offset < n) {
466       const netid_t nid = nids[offset];
467       if (likely(nid != NETID_INVALID)) {
468          netgroup_t *g = &(groups[netdb_lookup(netdb, nid)]);
469 
470          value_t *values_copy = rt_alloc_value(g);
471          memcpy(values_copy->data, vp, g->size * g->length);
472 
473          if (!rt_sched_driver(g, after, reject, values_copy))
474             deltaq_insert_driver(after, g, active_proc);
475 
476          vp += g->size * g->length;
477          offset += g->length;
478       }
479       else
480          offset++;
481    }
482 
483    RT_ASSERT(offset == n);
484 }
485 
486 DLLEXPORT
_sched_event(void * _nids,int32_t n,int32_t flags)487 void _sched_event(void *_nids, int32_t n, int32_t flags)
488 {
489    const int32_t *nids = _nids;
490 
491    TRACE("_sched_event %s n=%d flags=%d proc %s", fmt_net(nids[0]), n,
492          flags, istr(tree_ident(active_proc->source)));
493 
494    netgroup_t *g0 = &(groups[netdb_lookup(netdb, nids[0])]);
495 
496    if (g0->length == n) {
497       rt_sched_event(&(g0->pending), NETID_INVALID, NETID_INVALID,
498                      active_proc, flags & SCHED_STATIC);
499    }
500    else {
501       const bool global = !!(flags & SCHED_SEQUENTIAL);
502       if (global) {
503          // Place on the global pending list
504          rt_sched_event(&pending, nids[0], nids[n - 1], active_proc,
505                         flags & SCHED_STATIC);
506       }
507 
508       int offset = 0;
509       netgroup_t *g = g0;
510       for (;;) {
511          if (global)
512             g->flags |= NET_F_GLOBAL;
513          else {
514             // Place on the net group's pending list
515             rt_sched_event(&(g->pending), NETID_INVALID, NETID_INVALID,
516                            active_proc, flags & SCHED_STATIC);
517          }
518 
519          offset += g->length;
520          if (offset < n)
521             g = &(groups[netdb_lookup(netdb, nids[offset])]);
522          else
523             break;
524       }
525    }
526 }
527 
528 DLLEXPORT
_alloc_driver(const int32_t * all_nets,int32_t all_length,const int32_t * driven_nets,int32_t driven_length,const void * init)529 void _alloc_driver(const int32_t *all_nets, int32_t all_length,
530                    const int32_t *driven_nets, int32_t driven_length,
531                    const void *init)
532 {
533    TRACE("_alloc_driver all=%s+%d driven=%s+%d", fmt_net(all_nets[0]),
534          all_length, fmt_net(driven_nets[0]), driven_length);
535 
536    const char *initp = init;
537 
538    int offset = 0;
539    while (offset < driven_length) {
540       netgroup_t *g = &(groups[netdb_lookup(netdb, driven_nets[offset])]);
541       offset += g->length;
542 
543       // Try to find this process in the list of existing drivers
544       int driver;
545       for (driver = 0; driver < g->n_drivers; driver++) {
546          if (likely(g->drivers[driver].proc == active_proc))
547             break;
548       }
549 
550       // Allocate memory for drivers on demand
551       if (driver == g->n_drivers) {
552          if ((g->n_drivers == 1) && (g->resolution == NULL))
553             fatal_at(tree_loc(g->sig_decl), "group %s has multiple drivers "
554                      "but no resolution function", fmt_group(g));
555 
556          const size_t driver_sz = sizeof(struct driver);
557          g->drivers = xrealloc(g->drivers, (driver + 1) * driver_sz);
558          memset(&g->drivers[g->n_drivers], '\0',
559                 (driver + 1 - g->n_drivers) * driver_sz);
560          g->n_drivers = driver + 1;
561 
562          TRACE("allocate driver %s %d %s", fmt_group(g), driver,
563                istr(tree_ident(active_proc->source)));
564 
565          driver_t *d = &(g->drivers[driver]);
566          d->proc = active_proc;
567 
568          const void *src = (init == NULL) ? g->resolved : initp;
569 
570          // Assign the initial value of the driver
571          waveform_t *dummy = rt_alloc(waveform_stack);
572          dummy->when   = 0;
573          dummy->next   = NULL;
574          dummy->values = rt_alloc_value(g);
575          memcpy(dummy->values->data, src, g->length * g->size);
576 
577          d->waveforms = dummy;
578       }
579 
580       initp += g->length * g->size;
581    }
582 }
583 
584 DLLEXPORT
_private_stack(void)585 void _private_stack(void)
586 {
587    TRACE("_private_stack %p %d %d", active_proc->tmp_stack,
588          active_proc->tmp_alloc, _tmp_alloc);
589 
590    if (active_proc->tmp_stack == NULL && _tmp_alloc > 0) {
591       active_proc->tmp_stack = _tmp_stack;
592 
593       proc_tmp_stack = mmap_guarded(PROC_TMP_STACK_SZ,
594                                     istr(tree_ident(active_proc->source)));
595    }
596 
597    active_proc->tmp_alloc = _tmp_alloc;
598 }
599 
600 DLLEXPORT
_resolved_address(int32_t nid)601 void *_resolved_address(int32_t nid)
602 {
603    groupid_t gid = netdb_lookup(netdb, nid);
604    netgroup_t *g = &(groups[gid]);
605    TRACE("_resolved_address %d %p", nid, g->resolved);
606    return g->resolved;
607 }
608 
609 DLLEXPORT
_needs_last_value(const int32_t * nids,int32_t n)610 void _needs_last_value(const int32_t *nids, int32_t n)
611 {
612    TRACE("_needs_last_value %s n=%d", fmt_net(nids[0]), n);
613 
614    int offset = 0;
615    while (offset < n) {
616       netgroup_t *g = &(groups[netdb_lookup(netdb, nids[offset])]);
617       g->flags |= NET_F_LAST_VALUE;
618 
619       offset += g->length;
620    }
621 }
622 
623 DLLEXPORT
_set_initial(int32_t nid,const uint8_t * values,const size_list_t * size_list,int32_t nparts,const char * name)624 void _set_initial(int32_t nid, const uint8_t *values,
625                   const size_list_t *size_list, int32_t nparts,
626                   const char *name)
627 {
628    tree_t decl = rt_recall_decl(name);
629    RT_ASSERT(tree_kind(decl) == T_SIGNAL_DECL);
630 
631    TRACE("_set_initial %s values=%s nparts=%d", name,
632          fmt_values(values, size_list[0].count * size_list[0].size), nparts);
633 
634    int total_size = 0;
635    for (int i = 0; i < nparts; i++)
636       total_size += size_list[i].size * size_list[i].count;
637 
638    uint8_t *res_mem  = xmalloc(total_size * 2);
639    uint8_t *last_mem = res_mem + total_size;
640 
641    type_t type = tree_type(decl);
642 
643    const uint8_t *src = values;
644    int offset = 0, part = 0, remain = size_list[0].count;
645    while (part < nparts) {
646       groupid_t gid = netdb_lookup(netdb, nid + offset);
647       netgroup_t *g = &(groups[gid]);
648 
649       const int size = size_list[part].size;
650 
651       RT_ASSERT(g->sig_decl == NULL);
652       RT_ASSERT(remain >= g->length);
653 
654       res_memo_t *memo = NULL;
655       if (size_list[part].resolution != NULL) {
656          memo = rt_memo_resolution_fn(type, size_list[part].resolution);
657          memo->flags |= size_list[part].flags;
658          memo->ileft = size_list[part].ileft;
659 
660          if (size_list[part].flags & R_BOUNDARY)
661             g->flags |= NET_F_BOUNDARY;
662       }
663 
664       g->sig_decl   = decl;
665       g->resolution = memo;
666       g->size       = size;
667       g->resolved   = res_mem;
668       g->last_value = last_mem;
669 
670       if (offset == 0)
671          g->flags |= NET_F_OWNS_MEM;
672 
673       const int nbytes = g->length * size;
674 
675       res_mem += nbytes;
676       last_mem += nbytes;
677 
678       memcpy(g->resolved, src, nbytes);
679       memcpy(g->last_value, src, nbytes);
680 
681       offset += g->length;
682       src    += nbytes;
683       remain -= g->length;
684 
685       if (remain == 0) {
686          part++;
687          remain = size_list[part].count;
688       }
689    }
690 }
691 
692 DLLEXPORT
_set_initial_1(int32_t nid,const uint8_t * values,uint32_t size,uint32_t count,void * resolution,int32_t ileft,const char * name)693 void _set_initial_1(int32_t nid, const uint8_t *values, uint32_t size,
694                     uint32_t count, void *resolution, int32_t ileft,
695                     const char *name)
696 {
697    const size_list_t size_list = {
698       .size       = size,
699       .count      = count,
700       .resolution = resolution,
701       .flags      = 0,
702       .ileft      = ileft
703    };
704 
705    _set_initial(nid, values, &size_list, 1, name);
706 }
707 
708 DLLEXPORT
_assert_fail(const uint8_t * msg,int32_t msg_len,int8_t severity,int8_t is_report,const rt_loc_t * where)709 void _assert_fail(const uint8_t *msg, int32_t msg_len, int8_t severity,
710                   int8_t is_report, const rt_loc_t *where)
711 {
712    // LRM 93 section 8.2
713    // The error message consists of at least
714    // a) An indication that this message is from an assertion
715    // b) The value of the severity level
716    // c) The value of the message string
717    // d) The name of the design unit containing the assertion
718 
719    RT_ASSERT(severity <= SEVERITY_FAILURE);
720 
721    const char *levels[] = {
722       "Note", "Warning", "Error", "Failure"
723    };
724 
725    if (init_side_effect != SIDE_EFFECT_ALLOW) {
726       init_side_effect = SIDE_EFFECT_OCCURRED;
727       return;
728    }
729 
730    rt_show_trace();
731 
732    loc_t loc;
733    from_rt_loc(where, &loc);
734 
735    void (*fn)(const loc_t *loc, const char *fmt, ...) = fatal_at;
736 
737    switch (severity) {
738    case SEVERITY_NOTE:    fn = note_at; break;
739    case SEVERITY_WARNING: fn = warn_at; break;
740    case SEVERITY_ERROR:
741    case SEVERITY_FAILURE: fn = error_at; break;
742    }
743 
744    if (severity >= exit_severity)
745       fn = fatal_at;
746 
747    (*fn)(&loc, "%s+%d: %s %s: %.*s\r\tProcess %s",
748          fmt_time(now), iteration,
749          (is_report ? "Report" : "Assertion"),
750          levels[severity],
751          msg_len, msg,
752          ((active_proc == NULL) ? "(init)"
753           : istr(tree_ident(active_proc->source))));
754 }
755 
756 DLLEXPORT
_bounds_fail(int32_t value,int32_t min,int32_t max,int32_t kind,rt_loc_t * where,const char * hint)757 void _bounds_fail(int32_t value, int32_t min, int32_t max, int32_t kind,
758                   rt_loc_t *where, const char *hint)
759 {
760    rt_show_trace();
761 
762    loc_t loc;
763    from_rt_loc(where, &loc);
764 
765    char *copy LOCAL = xstrdup(hint ?: "");
766    const char *prefix = copy, *suffix = copy;
767    char *sep = strchr(copy, '|');
768    if (sep != NULL) {
769       suffix = sep + 1;
770       *sep = '\0';
771    }
772 
773    const char *spacer = hint ? " " : "";
774 
775    switch ((bounds_kind_t)kind) {
776    case BOUNDS_ARRAY_TO:
777       fatal_at(&loc, "array index %d outside bounds %d to %d%s%s",
778                value, min, max, spacer, suffix);
779       break;
780    case BOUNDS_ARRAY_DOWNTO:
781       fatal_at(&loc, "array index %d outside bounds %d downto %d%s%s",
782                value, max, min, spacer, suffix);
783       break;
784 
785    case BOUNDS_ENUM:
786       fatal_at(&loc, "value %d outside %s bounds %d to %d%s%s",
787                value, prefix, min, max, spacer, suffix);
788       break;
789 
790    case BOUNDS_TYPE_TO:
791       fatal_at(&loc, "value %d outside bounds %d to %d%s%s",
792                value, min, max, spacer, suffix);
793       break;
794 
795    case BOUNDS_TYPE_DOWNTO:
796       fatal_at(&loc, "value %d outside bounds %d downto %d%s%s",
797                value, max, min, spacer, suffix);
798       break;
799 
800    case BOUNDS_ARRAY_SIZE:
801       fatal_at(&loc, "length of target %d does not match length of value "
802                "%d%s%s", min, max, spacer, suffix);
803       break;
804 
805    case BOUNDS_INDEX_TO:
806       fatal_at(&loc, "index %d violates constraint bounds %d to %d",
807                value, min, max);
808       break;
809 
810    case BOUNDS_INDEX_DOWNTO:
811       fatal_at(&loc, "index %d violates constraint bounds %d downto %d",
812                value, max, min);
813       break;
814    }
815 }
816 
817 DLLEXPORT
_value_attr(const uint8_t * raw_str,int32_t str_len,image_map_t * map,const rt_loc_t * where)818 int64_t _value_attr(const uint8_t *raw_str, int32_t str_len,
819                     image_map_t *map, const rt_loc_t *where)
820 {
821    const char *p = (const char *)raw_str;
822    const char *endp = p + str_len;
823 
824    while (p < endp && isspace((int)*p))
825       ++p;
826 
827    loc_t loc = LOC_INVALID;
828    int64_t value = INT64_MIN;
829 
830    switch (map->kind) {
831    case IMAGE_INTEGER:
832       {
833          bool is_negative = p < endp && *p == '-';
834          int num_digits = 0;
835 
836          if (is_negative) {
837             ++p;
838          }
839          while (p < endp && (isdigit((int)*p) || *p == '_')) {
840             if (*p != '_') {
841                value *= 10;
842                value += (*p - '0');
843                num_digits++;
844             }
845             ++p;
846          }
847          if (is_negative) {
848             value = -value;
849          }
850 
851          if (num_digits == 0) {
852             from_rt_loc(where, &loc);
853             fatal_at(&loc, "invalid integer value "
854                      "\"%.*s\"", str_len, (const char *)raw_str);
855          }
856       }
857       break;
858 
859    case IMAGE_REAL:
860       from_rt_loc(where, &loc);
861       fatal_at(&loc, "real values not yet supported in 'VALUE");
862       break;
863 
864    case IMAGE_PHYSICAL:
865       from_rt_loc(where, &loc);
866       fatal_at(&loc, "physical values not yet supported in 'VALUE");
867       break;
868 
869    case IMAGE_ENUM:
870       for (int i = 0; value < 0 && i < map->count; i++) {
871          const char *elem = map->elems + (i * map->stride);
872          bool match_case = false;
873          for (int j = 0; j < map->stride && p + j < endp; j++) {
874             if (elem[j] != p[j]
875                 && (match_case || tolower((int)elem[j]) != tolower((int)p[j])))
876                break;
877             else if (elem[j + 1] == '\0') {
878                value = i;
879                p += j + 1;
880                break;
881             }
882             else if (elem[j] == '\'')
883                match_case = !match_case;
884          }
885       }
886 
887       if (value < 0) {
888          from_rt_loc(where, &loc);
889          fatal_at(&loc, "\"%.*s\" is not a valid enumeration value",
890                   str_len, (const char *)raw_str);
891       }
892       break;
893    }
894 
895    while (p < endp && *p != '\0') {
896       if (!isspace((int)*p)) {
897          from_rt_loc(where, &loc);
898          fatal_at(&loc, "found invalid characters \"%.*s\" after value "
899                   "\"%.*s\"", (int)(endp - p), p, str_len,
900                   (const char *)raw_str);
901       }
902       p++;
903    }
904 
905    return value;
906 }
907 
908 DLLEXPORT
_div_zero(const rt_loc_t * where)909 void _div_zero(const rt_loc_t *where)
910 {
911    loc_t loc;
912    from_rt_loc(where, &loc);
913    fatal_at(&loc, "division by zero");
914 }
915 
916 DLLEXPORT
_null_deref(const rt_loc_t * where)917 void _null_deref(const rt_loc_t *where)
918 {
919    loc_t loc;
920    from_rt_loc(where, &loc);
921    fatal_at(&loc, "null access dereference");
922 }
923 
924 DLLEXPORT
_std_standard_now(void)925 int64_t _std_standard_now(void)
926 {
927    return now;
928 }
929 
930 DLLEXPORT
_nvc_env_stop(int32_t finish,int32_t have_status,int32_t status)931 void _nvc_env_stop(int32_t finish, int32_t have_status, int32_t status)
932 {
933    if (have_status)
934       notef("%s called with status %d", finish ? "FINISH" : "STOP", status);
935    else
936       notef("%s called", finish ? "FINISH" : "STOP");
937 
938    exit(status);
939 }
940 
941 DLLEXPORT
_vec_load(const int32_t * nids,void * where,int32_t low,int32_t high,int32_t last)942 void *_vec_load(const int32_t *nids, void *where,
943                 int32_t low, int32_t high, int32_t last)
944 {
945    TRACE("_vec_load %s where=%p low=%d high=%d last=%d",
946          fmt_net(nids[0]), where, low, high, last);
947 
948    int offset = low;
949 
950    groupid_t gid = netdb_lookup(netdb, nids[offset]);
951    netgroup_t *g = &(groups[gid]);
952    int skip = nids[offset] - g->first;
953 
954    RT_ASSERT((g->flags & NET_F_LAST_VALUE) || !last);
955 
956    if (offset + g->length - skip > high) {
957       // If the signal data is already contiguous return a pointer to
958       // that rather than copying into the user buffer
959       void *r = unlikely(last) ? g->last_value : g->resolved;
960       return (uint8_t *)r + (skip * g->size);
961    }
962 
963    uint8_t *p = where;
964    for (;;) {
965       const int to_copy = MIN(high - offset + 1, g->length - skip);
966       const int bytes   = to_copy * g->size;
967 
968       const void *src = unlikely(last) ? g->last_value : g->resolved;
969 
970       memcpy(p, (uint8_t *)src + (skip * g->size), bytes);
971 
972       offset += g->length - skip;
973       p += bytes;
974 
975       if (offset > high)
976          break;
977 
978       gid = netdb_lookup(netdb, nids[offset]);
979       g = &(groups[gid]);
980       skip = nids[offset] - g->first;
981    }
982 
983    // Signal data was non-contiguous so return the user buffer
984    return where;
985 }
986 
987 DLLEXPORT
_image(int64_t val,image_map_t * map,struct uarray * u)988 void _image(int64_t val, image_map_t *map, struct uarray *u)
989 {
990    char *buf = NULL;
991    size_t len = 0;
992 
993    switch (map->kind) {
994    case IMAGE_INTEGER:
995       buf = rt_tmp_alloc(16);
996       len = checked_sprintf(buf, 16, "%"PRIi64, val);
997       break;
998 
999    case IMAGE_ENUM:
1000       buf = rt_tmp_alloc(map->stride);
1001       strncpy(buf, map->elems + (val * map->stride), map->stride);
1002       len = strlen(buf);
1003       break;
1004 
1005    case IMAGE_REAL:
1006       {
1007          union {
1008             double  d;
1009             int64_t i;
1010          } u = { .i = val };
1011          buf = rt_tmp_alloc(32);
1012          len = checked_sprintf(buf, 32, "%.*g", 17, u.d);
1013       }
1014       break;
1015 
1016    case IMAGE_PHYSICAL:
1017       buf = rt_tmp_alloc(20 + map->stride);
1018       len = checked_sprintf(buf, 20 + map->stride, "%"PRIi64" %s",
1019                             val, map->elems + (0 * map->stride));
1020       break;
1021    }
1022 
1023    u->ptr = buf;
1024    u->dims[0].left  = 1;
1025    u->dims[0].right = len;
1026    u->dims[0].dir   = RANGE_TO;
1027 }
1028 
1029 DLLEXPORT
_bit_shift(int32_t kind,const uint8_t * data,int32_t len,int8_t dir,int32_t shift,struct uarray * u)1030 void _bit_shift(int32_t kind, const uint8_t *data, int32_t len,
1031                 int8_t dir, int32_t shift, struct uarray *u)
1032 {
1033    if (shift < 0) {
1034       kind  = kind ^ 1;
1035       shift = -shift;
1036    }
1037 
1038    shift %= len;
1039 
1040    uint8_t *buf = rt_tmp_alloc(len);
1041 
1042    for (int i = 0; i < len; i++) {
1043       switch (kind) {
1044       case BIT_SHIFT_SLL:
1045          buf[i] = (i < len - shift) ? data[i + shift] : 0;
1046          break;
1047       case BIT_SHIFT_SRL:
1048          buf[i] = (i >= shift) ? data[i - shift] : 0;
1049          break;
1050       case BIT_SHIFT_SLA:
1051          buf[i] = (i < len - shift) ? data[i + shift] : data[len - 1];
1052          break;
1053       case BIT_SHIFT_SRA:
1054          buf[i] = (i >= shift) ? data[i - shift] : data[0];
1055          break;
1056       case BIT_SHIFT_ROL:
1057          buf[i] = (i < len - shift) ? data[i + shift] : data[(i + shift) % len];
1058          break;
1059       case BIT_SHIFT_ROR:
1060          buf[i] = (i >= shift) ? data[i - shift] : data[len + i - shift];
1061          break;
1062       }
1063    }
1064 
1065    u->ptr = buf;
1066    u->dims[0].left  = (dir == RANGE_TO) ? 0 : len - 1;
1067    u->dims[0].right = (dir == RANGE_TO) ? len - 1 : 0;
1068    u->dims[0].dir   = dir;
1069 }
1070 
1071 DLLEXPORT
_bit_vec_op(int32_t kind,const uint8_t * left,int32_t left_len,int8_t left_dir,const uint8_t * right,int32_t right_len,int8_t right_dir,struct uarray * u)1072 void _bit_vec_op(int32_t kind, const uint8_t *left, int32_t left_len,
1073                  int8_t left_dir, const uint8_t *right, int32_t right_len,
1074                  int8_t right_dir, struct uarray *u)
1075 {
1076    if ((kind != BIT_VEC_NOT) && (left_len != right_len))
1077       fatal("arguments to bit vector operation are not the same length");
1078 
1079    uint8_t *buf = rt_tmp_alloc(left_len);
1080 
1081    switch (kind) {
1082    case BIT_VEC_NOT:
1083       for (int i = 0; i < left_len; i++)
1084          buf[i] = !left[i];
1085       break;
1086 
1087    case BIT_VEC_AND:
1088       for (int i = 0; i < left_len; i++)
1089          buf[i] = left[i] && right[i];
1090       break;
1091 
1092    case BIT_VEC_OR:
1093       for (int i = 0; i < left_len; i++)
1094          buf[i] = left[i] || right[i];
1095       break;
1096 
1097    case BIT_VEC_XOR:
1098       for (int i = 0; i < left_len; i++)
1099          buf[i] = left[i] ^ right[i];
1100       break;
1101 
1102    case BIT_VEC_XNOR:
1103       for (int i = 0; i < left_len; i++)
1104          buf[i] = !(left[i] ^ right[i]);
1105       break;
1106 
1107    case BIT_VEC_NAND:
1108       for (int i = 0; i < left_len; i++)
1109          buf[i] = !(left[i] && right[i]);
1110       break;
1111 
1112    case BIT_VEC_NOR:
1113       for (int i = 0; i < left_len; i++)
1114          buf[i] = !(left[i] || right[i]);
1115       break;
1116    }
1117 
1118    u->ptr = buf;
1119    u->dims[0].left  = (left_dir == RANGE_TO) ? 0 : left_len - 1;
1120    u->dims[0].right = (left_dir == RANGE_TO) ? left_len - 1 : 0;
1121    u->dims[0].dir   = left_dir;
1122 }
1123 
1124 DLLEXPORT
_debug_out(int32_t val,int32_t reg)1125 void _debug_out(int32_t val, int32_t reg)
1126 {
1127    printf("DEBUG: r%d val=%"PRIx32"\n", reg, val);
1128 }
1129 
1130 DLLEXPORT
_debug_dump(const uint8_t * ptr,int32_t len)1131 void _debug_dump(const uint8_t *ptr, int32_t len)
1132 {
1133    printf("---- %p ----\n", ptr);
1134 
1135    if (ptr != NULL) {
1136       for (int i = 0; i < len; i++)
1137          printf("%02x%c", ptr[i], (i % 8 == 7) ? '\n' : ' ');
1138       if (len % 8 != 0)
1139          printf("\n");
1140    }
1141 }
1142 
1143 DLLEXPORT
_last_event(const int32_t * nids,int32_t n)1144 int64_t _last_event(const int32_t *nids, int32_t n)
1145 {
1146    //TRACE("_last_event %s n=%d %d", fmt_net(nids[0]), n);
1147 
1148    int64_t last = INT64_MAX;
1149    int offset = 0;
1150    while (offset < n) {
1151       netgroup_t *g = &(groups[netdb_lookup(netdb, nids[offset])]);
1152       if (g->last_event < now)
1153          last = MIN(last, now - g->last_event);
1154 
1155       offset += g->length;
1156    }
1157 
1158    return last;
1159 }
1160 
1161 DLLEXPORT
_test_net_flag(const int32_t * nids,int32_t n,int32_t flag)1162 int32_t _test_net_flag(const int32_t *nids, int32_t n, int32_t flag)
1163 {
1164    //TRACE("_test_net_flag %s n=%d flag=%d", fmt_net(nids[0]), n, flag);
1165 
1166    int offset = 0;
1167    while (offset < n) {
1168       netgroup_t *g = &(groups[netdb_lookup(netdb, nids[offset])]);
1169 
1170       if (g->flags & flag)
1171          return 1;
1172 
1173       offset += g->length;
1174    }
1175 
1176    return 0;
1177 }
1178 
1179 DLLEXPORT
_file_open(int8_t * status,void ** _fp,uint8_t * name_bytes,int32_t name_len,int8_t mode)1180 void _file_open(int8_t *status, void **_fp, uint8_t *name_bytes,
1181                 int32_t name_len, int8_t mode)
1182 {
1183    FILE **fp = (FILE **)_fp;
1184    if (*fp != NULL) {
1185       if (status != NULL) {
1186          *status = 1;   // STATUS_ERROR
1187          return;
1188       }
1189       else
1190          // This is to support closing a file implicitly when the
1191          // design is reset
1192          fclose(*fp);
1193    }
1194 
1195    char *fname = xmalloc(name_len + 1);
1196    memcpy(fname, name_bytes, name_len);
1197    fname[name_len] = '\0';
1198 
1199    TRACE("_file_open %s fp=%p mode=%d", fname, fp, mode);
1200 
1201    const char *mode_str[] = {
1202       "rb", "wb", "w+b"
1203    };
1204    RT_ASSERT(mode < ARRAY_LEN(mode_str));
1205 
1206    if (status != NULL)
1207       *status = 0;   // OPEN_OK
1208 
1209    if (strcmp(fname, "STD_INPUT") == 0)
1210       *fp = stdin;
1211    else if (strcmp(fname, "STD_OUTPUT") == 0)
1212       *fp = stdout;
1213    else
1214       *fp = fopen(fname, mode_str[mode]);
1215 
1216    if (*fp == NULL) {
1217       if (status == NULL)
1218          fatal_errno("failed to open %s", fname);
1219       else {
1220          switch (errno) {
1221          case ENOENT:
1222             *status = 2;   // NAME_ERROR
1223             break;
1224          case EPERM:
1225             *status = 3;   // MODE_ERROR
1226             break;
1227          default:
1228             fatal_errno("%s", fname);
1229          }
1230       }
1231    }
1232 
1233    free(fname);
1234 }
1235 
1236 DLLEXPORT
_file_write(void ** _fp,uint8_t * data,int32_t len)1237 void _file_write(void **_fp, uint8_t *data, int32_t len)
1238 {
1239    FILE **fp = (FILE **)_fp;
1240 
1241    TRACE("_file_write fp=%p data=%p len=%d", fp, data, len);
1242 
1243    if (*fp == NULL)
1244       fatal("write to closed file");
1245 
1246    fwrite(data, 1, len, *fp);
1247 }
1248 
1249 DLLEXPORT
_file_read(void ** _fp,uint8_t * data,int32_t len,int32_t * out)1250 void _file_read(void **_fp, uint8_t *data, int32_t len, int32_t *out)
1251 {
1252    FILE **fp = (FILE **)_fp;
1253 
1254    TRACE("_file_read fp=%p data=%p len=%d", fp, data, len);
1255 
1256    if (*fp == NULL)
1257       fatal("read from closed file");
1258 
1259    size_t n = fread(data, 1, len, *fp);
1260    if (out != NULL)
1261       *out = n;
1262 }
1263 
1264 DLLEXPORT
_file_close(void ** _fp)1265 void _file_close(void **_fp)
1266 {
1267    FILE **fp = (FILE **)_fp;
1268 
1269    TRACE("_file_close fp=%p", fp);
1270 
1271    if (*fp == NULL)
1272       fatal("attempt to close already closed file");
1273 
1274    fclose(*fp);
1275    *fp = NULL;
1276 }
1277 
1278 DLLEXPORT
_endfile(void * _f)1279 int8_t _endfile(void *_f)
1280 {
1281    FILE *f = _f;
1282 
1283    if (f == NULL)
1284       fatal("ENDFILE called on closed file");
1285 
1286    int c = fgetc(f);
1287    if (c == EOF)
1288       return 1;
1289    else {
1290       ungetc(c, f);
1291       return 0;
1292    }
1293 }
1294 
1295 ////////////////////////////////////////////////////////////////////////////////
1296 // Simulation kernel
1297 
_tracef(const char * fmt,...)1298 static void _tracef(const char *fmt, ...)
1299 {
1300    va_list ap;
1301    va_start(ap, fmt);
1302 
1303    char buf[64];
1304    if (iteration < 0)
1305       fprintf(stderr, "TRACE (init): ");
1306    else
1307       fprintf(stderr, "TRACE %s+%d: ",
1308               fmt_time_r(buf, sizeof(buf), now), iteration);
1309    vfprintf(stderr, fmt, ap);
1310    fprintf(stderr, "\n");
1311    fflush(stderr);
1312 
1313    va_end(ap);
1314 }
1315 
deltaq_insert(event_t * e)1316 static void deltaq_insert(event_t *e)
1317 {
1318    if (e->when == now) {
1319       event_t **chain = (e->kind == E_DRIVER) ? &delta_driver : &delta_proc;
1320       e->delta_chain = *chain;
1321       *chain = e;
1322    }
1323    else {
1324       e->delta_chain = NULL;
1325       heap_insert(eventq_heap, heap_key(e->when, e->kind), e);
1326    }
1327 }
1328 
deltaq_insert_proc(uint64_t delta,rt_proc_t * wake)1329 static void deltaq_insert_proc(uint64_t delta, rt_proc_t *wake)
1330 {
1331    event_t *e = rt_alloc(event_stack);
1332    e->when       = now + delta;
1333    e->kind       = E_PROCESS;
1334    e->proc       = wake;
1335    e->wakeup_gen = wake->wakeup_gen;
1336 
1337    deltaq_insert(e);
1338 }
1339 
deltaq_insert_driver(uint64_t delta,netgroup_t * group,rt_proc_t * driver)1340 static void deltaq_insert_driver(uint64_t delta, netgroup_t *group,
1341                                  rt_proc_t *driver)
1342 {
1343    event_t *e = rt_alloc(event_stack);
1344    e->when       = now + delta;
1345    e->kind       = E_DRIVER;
1346    e->group      = group;
1347    e->proc       = driver;
1348    e->wakeup_gen = UINT32_MAX;
1349 
1350    deltaq_insert(e);
1351 }
1352 
1353 #if TRACE_DELTAQ > 0
deltaq_walk(uint64_t key,void * user,void * context)1354 static void deltaq_walk(uint64_t key, void *user, void *context)
1355 {
1356    event_t *e = user;
1357 
1358    fprintf(stderr, "%s\t", fmt_time(e->when));
1359    switch (e->kind) {
1360    case E_DRIVER:
1361       fprintf(stderr, "driver\t %s\n", fmt_group(e->group));
1362       break;
1363    case E_PROCESS:
1364       fprintf(stderr, "process\t %s%s\n", istr(tree_ident(e->proc->source)),
1365               (e->wakeup_gen == e->proc->wakeup_gen) ? "" : " (stale)");
1366       break;
1367    case E_TIMEOUT:
1368       fprintf(stderr, "timeout\t %p %p\n", e->timeout_fn, e->timeout_user);
1369       break;
1370    }
1371 }
1372 
deltaq_dump(void)1373 static void deltaq_dump(void)
1374 {
1375    for (event_t *e = delta_driver; e != NULL; e = e->delta_chain)
1376       fprintf(stderr, "delta\tdriver\t %s\n", fmt_group(e->group));
1377 
1378    for (event_t *e = delta_proc; e != NULL; e = e->delta_chain)
1379       fprintf(stderr, "delta\tprocess\t %s%s\n",
1380               istr(tree_ident(e->proc->source)),
1381               (e->wakeup_gen == e->proc->wakeup_gen) ? "" : " (stale)");
1382 
1383    heap_walk(eventq_heap, deltaq_walk, NULL);
1384 }
1385 #endif
1386 
rt_memo_resolution_fn(type_t type,resolution_fn_t fn)1387 static res_memo_t *rt_memo_resolution_fn(type_t type, resolution_fn_t fn)
1388 {
1389    // Optimise some common resolution functions by memoising them
1390 
1391    res_memo_t *memo = hash_get(res_memo_hash, fn);
1392    if (memo != NULL)
1393       return memo;
1394 
1395    if (type_is_array(type))
1396       type = type_elem(type);
1397 
1398    memo = xmalloc(sizeof(res_memo_t));
1399    memo->fn    = fn;
1400    memo->flags = 0;
1401 
1402    hash_put(res_memo_hash, fn, memo);
1403 
1404    if (type_kind(type_base_recur(type)) != T_ENUM)
1405       return memo;
1406 
1407    int nlits;
1408    switch (type_kind(type)) {
1409    case T_ENUM:
1410       nlits = type_enum_literals(type);
1411       break;
1412    case T_SUBTYPE:
1413       {
1414          int64_t low, high;
1415          range_bounds(range_of(type, 0), &low, &high);
1416          nlits = high - low + 1;
1417       }
1418       break;
1419    default:
1420       return memo;
1421    }
1422 
1423    if (nlits > 16)
1424       return memo;
1425 
1426    init_side_effect = SIDE_EFFECT_DISALLOW;
1427 
1428    // Memoise the function for all two value cases
1429 
1430    for (int i = 0; i < nlits; i++) {
1431       for (int j = 0; j < nlits; j++) {
1432          int8_t args[2] = { i, j };
1433          struct uarray u = {
1434             args, { { memo->ileft, memo->ileft + 1, RANGE_TO } }
1435          };
1436          memo->tab2[i][j] = (*fn)(&u);
1437       }
1438    }
1439 
1440    // Memoise the function for all single value cases and determine if the
1441    // function behaves like the identity function
1442 
1443    bool identity = true;
1444    for (int i = 0; i < nlits; i++) {
1445       int8_t args[1] = { i };
1446       struct uarray u = { args, { { memo->ileft, memo->ileft, RANGE_TO } } };
1447       memo->tab1[i] = (*fn)(&u);
1448       identity = identity && (memo->tab1[i] == i);
1449    }
1450 
1451    if (init_side_effect != SIDE_EFFECT_OCCURRED) {
1452       memo->flags |= R_MEMO;
1453       if (identity)
1454          memo->flags |= R_IDENT;
1455    }
1456 
1457    return memo;
1458 }
1459 
rt_global_event(rt_event_t kind)1460 static void rt_global_event(rt_event_t kind)
1461 {
1462    callback_t *it = global_cbs[kind];
1463    if (unlikely(it != NULL)) {
1464       while (it != NULL) {
1465          callback_t *tmp = it->next;
1466          (*it->fn)(it->user);
1467          rt_free(callback_stack, it);
1468          it = tmp;
1469       }
1470 
1471       global_cbs[kind] = NULL;
1472    }
1473 }
1474 
rt_alloc_value(netgroup_t * g)1475 static value_t *rt_alloc_value(netgroup_t *g)
1476 {
1477    if (g->free_values == NULL) {
1478       const size_t size = MAX(sizeof(uint64_t), g->size * g->length);
1479       value_t *v = xmalloc(sizeof(struct value) + size);
1480       v->next = NULL;
1481       return v;
1482    }
1483    else {
1484       value_t *v = g->free_values;
1485       g->free_values = v->next;
1486       v->next = NULL;
1487       return v;
1488    }
1489 }
1490 
rt_free_value(netgroup_t * g,value_t * v)1491 static void rt_free_value(netgroup_t *g, value_t *v)
1492 {
1493    RT_ASSERT(v->next == NULL);
1494    v->next = g->free_values;
1495    g->free_values = v;
1496 }
1497 
rt_tmp_alloc(size_t sz)1498 static void *rt_tmp_alloc(size_t sz)
1499 {
1500    // Allocate sz bytes that will be freed by the active process
1501 
1502    uint8_t *ptr = (uint8_t *)_tmp_stack + _tmp_alloc;
1503    _tmp_alloc += sz;
1504    return ptr;
1505 }
1506 
rt_sched_event(sens_list_t ** list,netid_t first,netid_t last,rt_proc_t * proc,bool is_static)1507 static void rt_sched_event(sens_list_t **list, netid_t first, netid_t last,
1508                            rt_proc_t *proc, bool is_static)
1509 {
1510    // See if there is already a stale entry in the pending
1511    // list for this process
1512    sens_list_t *it = *list;
1513    int count = 0;
1514    for (; it != NULL; it = it->next, ++count) {
1515       if ((it->proc == proc)
1516           && (it->wakeup_gen != proc->wakeup_gen))
1517          break;
1518    }
1519 
1520    if (it == NULL) {
1521       sens_list_t *node = rt_alloc(sens_list_stack);
1522       node->proc       = proc;
1523       node->wakeup_gen = proc->wakeup_gen;
1524       node->next       = *list;
1525       node->first      = first;
1526       node->last       = last;
1527       node->reenq      = (is_static ? list : NULL);
1528 
1529       *list = node;
1530    }
1531    else {
1532       // Reuse the stale entry
1533       RT_ASSERT(!is_static);
1534       it->wakeup_gen = proc->wakeup_gen;
1535       it->first      = first;
1536       it->last       = last;
1537    }
1538 }
1539 
1540 #if TRACE_PENDING
rt_dump_pending(void)1541 static void rt_dump_pending(void)
1542 {
1543    for (struct sens_list *it = pending; it != NULL; it = it->next) {
1544       printf("%d..%d\t%s%s\n", it->first, it->last,
1545              istr(tree_ident(it->proc->source)),
1546              (it->wakeup_gen == it->proc->wakeup_gen) ? "" : " (stale)");
1547    }
1548 }
1549 #endif  // TRACE_PENDING
1550 
rt_reset_group(groupid_t gid,netid_t first,unsigned length)1551 static void rt_reset_group(groupid_t gid, netid_t first, unsigned length)
1552 {
1553    netgroup_t *g = &(groups[gid]);
1554    memset(g, '\0', sizeof(netgroup_t));
1555    g->first       = first;
1556    g->length      = length;
1557    g->last_event  = INT64_MAX;
1558 }
1559 
rt_free_delta_events(event_t * e)1560 static void rt_free_delta_events(event_t *e)
1561 {
1562    while (e != NULL) {
1563       event_t *tmp = e->delta_chain;
1564       rt_free(event_stack, e);
1565       e = tmp;
1566    }
1567 }
1568 
rt_setup(tree_t top)1569 static void rt_setup(tree_t top)
1570 {
1571    now = 0;
1572    iteration = -1;
1573    active_proc = NULL;
1574    force_stop = false;
1575    can_create_delta = true;
1576 
1577    RT_ASSERT(resume == NULL);
1578 
1579    rt_free_delta_events(delta_proc);
1580    rt_free_delta_events(delta_driver);
1581 
1582    if (eventq_heap != NULL)
1583       heap_free(eventq_heap);
1584    eventq_heap = heap_new(512);
1585 
1586    if (netdb == NULL) {
1587       netdb = netdb_open(top);
1588       groups = xmalloc(sizeof(struct netgroup) * netdb_size(netdb));
1589    }
1590 
1591    if (procs == NULL) {
1592       n_procs = tree_stmts(top);
1593       procs   = xmalloc(sizeof(struct rt_proc) * n_procs);
1594    }
1595 
1596    const int ndecls = tree_decls(top);
1597    decl_hash = hash_new(next_power_of_2(ndecls * 2), true);
1598    for (int i = 0; i < ndecls; i++) {
1599       tree_t d = tree_decl(top, i);
1600       hash_put(decl_hash, tree_ident(d), d);
1601    }
1602 
1603    res_memo_hash = hash_new(128, true);
1604 
1605    netdb_walk(netdb, rt_reset_group);
1606 
1607    const int nstmts = tree_stmts(top);
1608    for (int i = 0; i < nstmts; i++) {
1609       tree_t p = tree_stmt(top, i);
1610       RT_ASSERT(tree_kind(p) == T_PROCESS);
1611 
1612       procs[i].source     = p;
1613       procs[i].proc_fn    = jit_find_symbol(istr(tree_ident(p)), true);
1614       procs[i].wakeup_gen = 0;
1615       procs[i].postponed  = !!(tree_flags(p) & TREE_F_POSTPONED);
1616       procs[i].tmp_stack  = NULL;
1617       procs[i].tmp_alloc  = 0;
1618       procs[i].pending    = false;
1619       procs[i].usage      = 0;
1620    }
1621 }
1622 
rt_run(struct rt_proc * proc,bool reset)1623 static void rt_run(struct rt_proc *proc, bool reset)
1624 {
1625    TRACE("%s process %s", reset ? "reset" : "run",
1626          istr(tree_ident(proc->source)));
1627 
1628    uint64_t start_clock = 0;
1629    if (profiling)
1630       start_clock = get_timestamp_us();
1631 
1632    if (reset) {
1633       _tmp_stack = global_tmp_stack;
1634       _tmp_alloc = global_tmp_alloc;
1635    }
1636    else if (proc->tmp_stack != NULL) {
1637       TRACE("using private stack at %p %d", proc->tmp_stack, proc->tmp_alloc);
1638       _tmp_stack = proc->tmp_stack;
1639       _tmp_alloc = proc->tmp_alloc;
1640 
1641       // Will be updated by _private_stack if suspending in procedure otherwise
1642       // clear stack when process suspends
1643       proc->tmp_alloc = 0;
1644    }
1645    else {
1646       _tmp_stack = proc_tmp_stack;
1647       _tmp_alloc = 0;
1648    }
1649 
1650    active_proc = proc;
1651    (*proc->proc_fn)(reset ? 1 : 0);
1652 
1653    if (reset)
1654       global_tmp_alloc = _tmp_alloc;
1655 
1656    if (start_clock != 0)
1657       proc->usage += get_timestamp_us() - start_clock;
1658 }
1659 
rt_call_module_reset(ident_t name)1660 static void rt_call_module_reset(ident_t name)
1661 {
1662    char *buf LOCAL = xasprintf("%s_reset", istr(name));
1663 
1664    _tmp_stack = global_tmp_stack;
1665    _tmp_alloc = global_tmp_alloc;
1666 
1667    void (*reset_fn)(void) = jit_find_symbol(buf, false);
1668    if (reset_fn != NULL) {
1669       TRACE("reset module %s", istr(name));
1670       (*reset_fn)();
1671    }
1672 
1673    global_tmp_alloc = _tmp_alloc;
1674 }
1675 
rt_resolve_group(netgroup_t * group,int driver,void * values)1676 static int32_t rt_resolve_group(netgroup_t *group, int driver, void *values)
1677 {
1678    // Set driver to -1 for initial call to resolution function
1679 
1680    const size_t valuesz = group->size * group->length;
1681 
1682    void *resolved = NULL;
1683    if (unlikely(group->flags & NET_F_FORCED)) {
1684       resolved = group->forcing->data;
1685    }
1686    else if (group->resolution == NULL) {
1687       resolved = values;
1688    }
1689    else if ((group->resolution->flags & R_IDENT) && (group->n_drivers == 1)) {
1690       // Resolution function behaves like identity for a single driver
1691       resolved = values;
1692    }
1693    else if ((group->resolution->flags & R_MEMO) && (group->n_drivers == 1)) {
1694       // Resolution function has been memoised so do a table lookup
1695 
1696       resolved = alloca(valuesz);
1697 
1698       for (int j = 0; j < group->length; j++) {
1699          const int index = { ((const char *)values)[j] };
1700          const int8_t r = group->resolution->tab1[index];
1701          ((int8_t *)resolved)[j] = r;
1702       }
1703    }
1704    else if ((group->resolution->flags & R_MEMO) && (group->n_drivers == 2)) {
1705       // Resolution function has been memoised so do a table lookup
1706 
1707       resolved = alloca(valuesz);
1708 
1709       const char *p0 = group->drivers[0].waveforms->values->data;
1710       const char *p1 = group->drivers[1].waveforms->values->data;
1711 
1712       for (int j = 0; j < group->length; j++) {
1713          int driving[2] = { p0[j], p1[j] };
1714          if (likely(driver >= 0))
1715             driving[driver] = ((const char *)values)[j];
1716 
1717          const int8_t r = group->resolution->tab2[driving[0]][driving[1]];
1718          ((int8_t *)resolved)[j] = r;
1719       }
1720    }
1721    else if (group->resolution->flags & R_RECORD) {
1722       // Call resolution function for resolved record
1723 
1724       netid_t first = group->first, last = group->first + group->length - 1;
1725 
1726       for (const netgroup_t *it = group;
1727            it->resolution == group->resolution && it->first > 0
1728               && !(it->flags & NET_F_BOUNDARY);
1729            it = &(groups[netdb_lookup(netdb, it->first - 1)]),
1730               first = it->first)
1731          ;
1732 
1733       for (const netgroup_t *it = group;
1734            it->resolution == group->resolution
1735               && it->first + it->length < netdb_size(netdb)
1736               && (it == group
1737                   || !(it->flags & (NET_F_BOUNDARY | NET_F_OWNS_MEM)));
1738            it = &(groups[netdb_lookup(netdb, it->first + it->length)]),
1739               last = it->first + it->length - 1)
1740          ;
1741 
1742       size_t size = 0, group_off = 0;
1743       for (int offset = first; offset <= last;) {
1744          netgroup_t *g = &(groups[netdb_lookup(netdb, offset)]);
1745          size += g->size * g->length;
1746          if (offset < group->first)
1747             group_off = size;
1748          assert(g->n_drivers == group->n_drivers);
1749 
1750          offset += g->length;
1751       }
1752 
1753       uint8_t *inputs = alloca(size * group->n_drivers);
1754 
1755       size_t ptr = 0;
1756       for (int offset = first; offset <= last;) {
1757          netgroup_t *p = &(groups[netdb_lookup(netdb, offset)]);
1758 
1759          for (int i = 0; i < p->n_drivers; i++) {
1760             void *src = NULL;
1761             if (i == driver && p == group)
1762                src = p->drivers[i].waveforms->next->values->data;
1763             else
1764                src = p->drivers[i].waveforms->values->data;
1765             memcpy(inputs + ptr + (i * size),
1766                    src,
1767                    p->size * p->length);
1768          }
1769          ptr += p->size * p->length;
1770 
1771          offset += p->length;
1772       }
1773 
1774       struct uarray u = {
1775          inputs, { { group->resolution->ileft,
1776                      group->resolution->ileft + group->n_drivers - 1,
1777                      RANGE_TO } }
1778       };
1779       uint8_t *result = (uint8_t *)(*group->resolution->fn)(&u);
1780       resolved = result + group_off;
1781    }
1782    else {
1783       // Must actually call resolution function in general case
1784 
1785       resolved = alloca(valuesz);
1786 
1787       for (int j = 0; j < group->length; j++) {
1788 #define CALL_RESOLUTION_FN(type) do {                                   \
1789             type vals[group->n_drivers];                                \
1790             for (int i = 0; i < group->n_drivers; i++) {                \
1791                const value_t *v = group->drivers[i].waveforms->values;  \
1792                vals[i] = ((const type *)v->data)[j];                    \
1793             }                                                           \
1794             if (likely(driver >= 0))                                    \
1795                vals[driver] = ((const type *)values)[j];                \
1796             type *r = (type *)resolved;                                 \
1797             struct uarray u = {                                         \
1798                vals, {                                                  \
1799                   { group->resolution->ileft,                           \
1800                     group->resolution->ileft + group->n_drivers - 1,    \
1801                     RANGE_TO } }                                        \
1802             };                                                          \
1803             r[j] = (*group->resolution->fn)(&u);                        \
1804          } while (0)
1805 
1806          FOR_ALL_SIZES(group->size, CALL_RESOLUTION_FN);
1807       }
1808    }
1809 
1810    int32_t new_flags = NET_F_ACTIVE;
1811    if (memcmp(group->resolved, resolved, valuesz) != 0)
1812       new_flags |= NET_F_EVENT;
1813 
1814    // LAST_VALUE is the same as the initial value when
1815    // there have been no events on the signal otherwise
1816    // only update it when there is an event
1817    if (new_flags & NET_F_EVENT) {
1818       if (group->flags & NET_F_LAST_VALUE)
1819          memcpy(group->last_value, group->resolved, valuesz);
1820       memcpy(group->resolved, resolved, valuesz);
1821 
1822       group->last_event = now;
1823    }
1824 
1825    return new_flags;
1826 }
1827 
rt_group_inital(groupid_t gid,netid_t first,unsigned length)1828 static void rt_group_inital(groupid_t gid, netid_t first, unsigned length)
1829 {
1830    netgroup_t *g = &(groups[gid]);
1831    if ((g->n_drivers == 1) && (g->resolution == NULL))
1832       rt_resolve_group(g, -1, g->drivers[0].waveforms->values->data);
1833    else if (g->n_drivers > 0)
1834       rt_resolve_group(g, -1, g->resolved);
1835 }
1836 
rt_initial(tree_t top)1837 static void rt_initial(tree_t top)
1838 {
1839    // Initialisation is described in LRM 93 section 12.6.4
1840 
1841    const int ncontext = tree_contexts(top);
1842    for (int i = 0; i < ncontext; i++) {
1843       tree_t c = tree_context(top, i);
1844       ident_t unit_name = tree_ident(c);
1845       rt_call_module_reset(unit_name);
1846    }
1847 
1848    rt_call_module_reset(tree_ident(top));
1849 
1850    for (size_t i = 0; i < n_procs; i++)
1851       rt_run(&procs[i], true /* reset */);
1852 
1853    TRACE("calculate initial driver values");
1854 
1855    init_side_effect = SIDE_EFFECT_ALLOW;
1856    netdb_walk(netdb, rt_group_inital);
1857 
1858    TRACE("used %d bytes of global temporary stack", global_tmp_alloc);
1859 }
1860 
rt_watch_signal(watch_t * w)1861 static void rt_watch_signal(watch_t *w)
1862 {
1863    const int nnets = tree_nets(w->signal);
1864    int offset = 0;
1865    while (offset < nnets) {
1866       netid_t nid = tree_net(w->signal, offset);
1867       netgroup_t *g = &(groups[netdb_lookup(netdb, nid)]);
1868 
1869       watch_list_t *link = xmalloc(sizeof(watch_list_t));
1870       link->next  = g->watching;
1871       link->watch = w;
1872 
1873       g->watching = link;
1874 
1875       offset += g->length;
1876       (w->n_groups)++;
1877    }
1878 
1879    w->groups = xmalloc(sizeof(netgroup_t *) * w->n_groups);
1880 
1881    int ptr = 0;
1882    offset = 0;
1883    while (offset < nnets) {
1884       netid_t nid = tree_net(w->signal, offset);
1885       netgroup_t *g = &(groups[netdb_lookup(netdb, nid)]);
1886       w->groups[ptr++] = g;
1887       w->length += g->length;
1888       offset += g->length;
1889    }
1890 }
1891 
rt_wakeup(sens_list_t * sl)1892 static void rt_wakeup(sens_list_t *sl)
1893 {
1894    // To avoid having each process keep a list of the signals it is
1895    // sensitive to, each process has a "wakeup generation" number which
1896    // is incremented after each wait statement and stored in the signal
1897    // sensitivity list. We then ignore any sensitivity list elements
1898    // where the generation doesn't match the current process wakeup
1899    // generation: these correspond to stale "wait on" statements that
1900    // have already resumed.
1901 
1902    if (sl->wakeup_gen == sl->proc->wakeup_gen || sl->reenq != NULL) {
1903       TRACE("wakeup process %s%s", istr(tree_ident(sl->proc->source)),
1904             sl->proc->postponed ? " [postponed]" : "");
1905       ++(sl->proc->wakeup_gen);
1906 
1907       if (unlikely(sl->proc->postponed)) {
1908          sl->next  = postponed;
1909          postponed = sl;
1910       }
1911       else {
1912          sl->next = resume;
1913          resume = sl;
1914       }
1915 
1916       sl->proc->pending = true;
1917    }
1918    else
1919       rt_free(sens_list_stack, sl);
1920 }
1921 
rt_sched_driver(netgroup_t * group,uint64_t after,uint64_t reject,value_t * values)1922 static bool rt_sched_driver(netgroup_t *group, uint64_t after,
1923                             uint64_t reject, value_t *values)
1924 {
1925    if (unlikely(reject > after))
1926       fatal("signal %s pulse reject limit %s is greater than "
1927             "delay %s", fmt_group(group), fmt_time(reject), fmt_time(after));
1928 
1929    int driver = 0;
1930    if (unlikely(group->n_drivers != 1)) {
1931       // Try to find this process in the list of existing drivers
1932       for (driver = 0; driver < group->n_drivers; driver++) {
1933          if (likely(group->drivers[driver].proc == active_proc))
1934             break;
1935       }
1936 
1937       RT_ASSERT(driver != group->n_drivers);
1938    }
1939 
1940    driver_t *d = &(group->drivers[driver]);
1941 
1942    const size_t valuesz = group->size * group->length;
1943 
1944    waveform_t *w = rt_alloc(waveform_stack);
1945    w->when   = now + after;
1946    w->next   = NULL;
1947    w->values = values;
1948 
1949    waveform_t *last = d->waveforms;
1950    waveform_t *it   = last->next;
1951    while ((it != NULL) && (it->when < w->when)) {
1952       // If the current transaction is within the pulse rejection interval
1953       // and the value is different to that of the new transaction then
1954       // delete the current transaction
1955       if ((it->when >= w->when - reject)
1956           && (memcmp(it->values->data, w->values->data, valuesz) != 0)) {
1957          waveform_t *next = it->next;
1958          last->next = next;
1959          rt_free_value(group, it->values);
1960          rt_free(waveform_stack, it);
1961          it = next;
1962       }
1963       else {
1964          last = it;
1965          it = it->next;
1966       }
1967    }
1968    w->next = NULL;
1969    last->next = w;
1970 
1971    // Delete all transactions later than this
1972    // We could remove this transaction from the deltaq as well but the
1973    // overhead of doing so is probably higher than the cost of waking
1974    // up for the empty event
1975    bool already_scheduled = false;
1976    while (it != NULL) {
1977       rt_free_value(group, it->values);
1978 
1979       if (it->when == w->when)
1980          already_scheduled = true;
1981 
1982       waveform_t *next = it->next;
1983       rt_free(waveform_stack, it);
1984       it = next;
1985    }
1986 
1987    return already_scheduled;
1988 }
1989 
rt_update_group(netgroup_t * group,int driver,void * values)1990 static void rt_update_group(netgroup_t *group, int driver, void *values)
1991 {
1992    const size_t valuesz = group->size * group->length;
1993 
1994    TRACE("update group %s values=%s driver=%d",
1995          fmt_group(group), fmt_values(values, valuesz), driver);
1996 
1997    const int32_t new_flags = rt_resolve_group(group, driver, values);
1998    group->flags |= new_flags;
1999 
2000    if (unlikely(n_active_groups == n_active_alloc)) {
2001       n_active_alloc *= 2;
2002       const size_t newsz = n_active_alloc * sizeof(struct netgroup *);
2003       active_groups = xrealloc(active_groups, newsz);
2004    }
2005    active_groups[n_active_groups++] = group;
2006 
2007    // Wake up any processes sensitive to this group
2008    if (new_flags & NET_F_EVENT) {
2009       sens_list_t *it, *last = NULL, *next = NULL;
2010 
2011       // First wakeup everything on the group specific pending list
2012       for (it = group->pending; it != NULL; it = next) {
2013          next = it->next;
2014          rt_wakeup(it);
2015          group->pending = next;
2016       }
2017 
2018       // Now check the global pending list
2019       if (group->flags & NET_F_GLOBAL) {
2020          for (it = pending; it != NULL; it = next) {
2021             next = it->next;
2022 
2023             const netid_t x = group->first;
2024             const netid_t y = group->first + group->length - 1;
2025             const netid_t a = it->first;
2026             const netid_t b = it->last;
2027 
2028             const bool hit = (x <= b) && (a <= y);
2029 
2030             if (hit) {
2031                rt_wakeup(it);
2032                if (last == NULL)
2033                   pending = next;
2034                else
2035                   last->next = next;
2036             }
2037             else
2038                last = it;
2039          }
2040       }
2041 
2042       // Schedule any callbacks to run
2043       for (watch_list_t *wl = group->watching; wl != NULL; wl = wl->next) {
2044          if (!wl->watch->pending) {
2045             wl->watch->chain_pending = callbacks;
2046             wl->watch->pending = true;
2047             callbacks = wl->watch;
2048          }
2049       }
2050    }
2051 }
2052 
rt_update_driver(netgroup_t * group,rt_proc_t * proc)2053 static void rt_update_driver(netgroup_t *group, rt_proc_t *proc)
2054 {
2055    if (likely(proc != NULL)) {
2056       // Find the driver owned by proc
2057       int driver;
2058       for (driver = 0; driver < group->n_drivers; driver++) {
2059          if (likely(group->drivers[driver].proc == proc))
2060             break;
2061       }
2062       RT_ASSERT(driver != group->n_drivers);
2063 
2064       waveform_t *w_now  = group->drivers[driver].waveforms;
2065       waveform_t *w_next = w_now->next;
2066 
2067       if (likely((w_next != NULL) && (w_next->when == now))) {
2068          rt_update_group(group, driver, w_next->values->data);
2069          group->drivers[driver].waveforms = w_next;
2070          rt_free_value(group, w_now->values);
2071          rt_free(waveform_stack, w_now);
2072       }
2073       else
2074          RT_ASSERT(w_now != NULL);
2075    }
2076    else if (group->flags & NET_F_FORCED)
2077       rt_update_group(group, -1, group->forcing->data);
2078 }
2079 
rt_stale_event(event_t * e)2080 static bool rt_stale_event(event_t *e)
2081 {
2082    return (e->kind == E_PROCESS) && (e->wakeup_gen != e->proc->wakeup_gen);
2083 }
2084 
rt_push_run_queue(event_t * e)2085 static void rt_push_run_queue(event_t *e)
2086 {
2087    if (unlikely(run_queue.wr == run_queue.alloc)) {
2088       if (run_queue.alloc == 0) {
2089          run_queue.alloc = 128;
2090          run_queue.queue = xmalloc(sizeof(event_t *) * run_queue.alloc);
2091       }
2092       else {
2093          run_queue.alloc *= 2;
2094          run_queue.queue = realloc(run_queue.queue,
2095                                    sizeof(event_t *) * run_queue.alloc);
2096       }
2097    }
2098 
2099    if (unlikely(rt_stale_event(e)))
2100       rt_free(event_stack, e);
2101    else {
2102       run_queue.queue[(run_queue.wr)++] = e;
2103       if (e->kind == E_PROCESS)
2104          ++(e->proc->wakeup_gen);
2105    }
2106 }
2107 
rt_pop_run_queue(void)2108 static event_t *rt_pop_run_queue(void)
2109 {
2110    if (run_queue.wr == run_queue.rd) {
2111       run_queue.wr = 0;
2112       run_queue.rd = 0;
2113       return NULL;
2114    }
2115    else
2116       return run_queue.queue[(run_queue.rd)++];
2117 }
2118 
rt_iteration_limit(void)2119 static void rt_iteration_limit(void)
2120 {
2121    text_buf_t *buf = tb_new();
2122    tb_printf(buf, "Iteration limit of %d delta cycles reached. "
2123              "The following processes are active:\n",
2124              opt_get_int("stop-delta"));
2125 
2126    for (sens_list_t *it = resume; it != NULL; it = it->next) {
2127       tree_t p = it->proc->source;
2128       const loc_t *l = tree_loc(p);
2129       tb_printf(buf, "  %-30s %s line %d\n", istr(tree_ident(p)),
2130                 istr(l->file), l->first_line);
2131    }
2132 
2133    tb_printf(buf, "You can increase this limit with --stop-delta");
2134 
2135    fatal("%s", tb_get(buf));
2136 }
2137 
rt_resume_processes(sens_list_t ** list)2138 static void rt_resume_processes(sens_list_t **list)
2139 {
2140    sens_list_t *it = *list;
2141    while (it != NULL) {
2142       if (it->proc->pending) {
2143          rt_run(it->proc, false /* reset */);
2144          it->proc->pending = false;
2145       }
2146 
2147       sens_list_t *next = it->next;
2148 
2149       if (it->reenq == NULL)
2150          rt_free(sens_list_stack, it);
2151       else {
2152          it->next = *(it->reenq);
2153          *(it->reenq) = it;
2154       }
2155 
2156       it = next;
2157    }
2158 
2159    *list = NULL;
2160 }
2161 
rt_event_callback(bool postponed)2162 static void rt_event_callback(bool postponed)
2163 {
2164    watch_t **last = &callbacks;
2165    watch_t *next = NULL, *it;
2166    for (it = callbacks; it != NULL; it = next) {
2167       next = it->chain_pending;
2168       if (it->postponed == postponed) {
2169          (*it->fn)(now, it->signal, it, it->user_data);
2170          it->pending = false;
2171 
2172          *last = it->chain_pending;
2173          it->chain_pending = NULL;
2174       }
2175       else
2176          last = &(it->chain_pending);
2177    }
2178 }
2179 
rt_next_cycle_is_delta(void)2180 static inline bool rt_next_cycle_is_delta(void)
2181 {
2182    return (delta_driver != NULL) || (delta_proc != NULL);
2183 }
2184 
rt_cycle(int stop_delta)2185 static void rt_cycle(int stop_delta)
2186 {
2187    // Simulation cycle is described in LRM 93 section 12.6.4
2188 
2189    const bool is_delta_cycle = (delta_driver != NULL) || (delta_proc != NULL);
2190 
2191    if (is_delta_cycle)
2192       iteration = iteration + 1;
2193    else {
2194       event_t *peek = heap_min(eventq_heap);
2195       while (unlikely(rt_stale_event(peek))) {
2196          // Discard stale events
2197          rt_free(event_stack, heap_extract_min(eventq_heap));
2198          if (heap_size(eventq_heap) == 0)
2199             return;
2200          else
2201             peek = heap_min(eventq_heap);
2202       }
2203       now = peek->when;
2204       iteration = 0;
2205    }
2206 
2207    TRACE("begin cycle");
2208 
2209 #if TRACE_DELTAQ > 0
2210    if (trace_on)
2211       deltaq_dump();
2212 #endif
2213 #if TRACE_PENDING > 0
2214    if (trace_on)
2215       rt_dump_pending();
2216 #endif
2217 
2218    if (is_delta_cycle) {
2219       for (event_t *e = delta_driver; e != NULL; e = e->delta_chain)
2220          rt_push_run_queue(e);
2221 
2222       for (event_t *e = delta_proc; e != NULL; e = e->delta_chain)
2223          rt_push_run_queue(e);
2224 
2225       delta_driver = NULL;
2226       delta_proc = NULL;
2227    }
2228    else {
2229       rt_global_event(RT_NEXT_TIME_STEP);
2230 
2231       for (;;) {
2232          rt_push_run_queue(heap_extract_min(eventq_heap));
2233 
2234          if (heap_size(eventq_heap) == 0)
2235             break;
2236 
2237          event_t *peek = heap_min(eventq_heap);
2238          if (peek->when > now)
2239             break;
2240       }
2241    }
2242 
2243    event_t *event;
2244    while ((event = rt_pop_run_queue())) {
2245       switch (event->kind) {
2246       case E_PROCESS:
2247          rt_run(event->proc, false /* reset */);
2248          break;
2249       case E_DRIVER:
2250          rt_update_driver(event->group, event->proc);
2251          break;
2252       case E_TIMEOUT:
2253          (*event->timeout_fn)(now, event->timeout_user);
2254          break;
2255       }
2256 
2257       rt_free(event_stack, event);
2258    }
2259 
2260    if (unlikely(now == 0 && iteration == 0)) {
2261       vcd_restart();
2262       lxt_restart();
2263       fst_restart();
2264    }
2265    else if (unlikely((stop_delta > 0) && (iteration == stop_delta)))
2266       rt_iteration_limit();
2267 
2268    // Run all non-postponed event callbacks
2269    rt_event_callback(false);
2270 
2271    // Run all processes that resumed because of signal events
2272    rt_resume_processes(&resume);
2273    rt_global_event(RT_END_OF_PROCESSES);
2274 
2275    for (unsigned i = 0; i < n_active_groups; i++) {
2276       netgroup_t *g = active_groups[i];
2277       g->flags &= ~(NET_F_ACTIVE | NET_F_EVENT);
2278    }
2279    n_active_groups = 0;
2280 
2281    if (!rt_next_cycle_is_delta()) {
2282       can_create_delta = false;
2283       rt_global_event(RT_LAST_KNOWN_DELTA_CYCLE);
2284 
2285       // Run any postponed processes
2286       rt_resume_processes(&postponed);
2287 
2288       // Execute all postponed event callbacks
2289       rt_event_callback(true);
2290 
2291       can_create_delta = true;
2292    }
2293 }
2294 
rt_recall_decl(const char * name)2295 static tree_t rt_recall_decl(const char *name)
2296 {
2297    tree_t decl = hash_get(decl_hash, ident_new(name));
2298    if (decl != NULL)
2299       return decl;
2300    else
2301       fatal("cannot find name %s in elaborated design", name);
2302 }
2303 
rt_cleanup_group(groupid_t gid,netid_t first,unsigned length)2304 static void rt_cleanup_group(groupid_t gid, netid_t first, unsigned length)
2305 {
2306    netgroup_t *g = &(groups[gid]);
2307 
2308    RT_ASSERT(g->first == first);
2309    RT_ASSERT(g->length == length);
2310 
2311    if (g->flags & NET_F_OWNS_MEM)
2312       free(g->resolved);
2313 
2314    free(g->forcing);
2315 
2316    for (int j = 0; j < g->n_drivers; j++) {
2317       while (g->drivers[j].waveforms != NULL) {
2318          waveform_t *next = g->drivers[j].waveforms->next;
2319          rt_free_value(g, g->drivers[j].waveforms->values);
2320          rt_free(waveform_stack, g->drivers[j].waveforms);
2321          g->drivers[j].waveforms = next;
2322       }
2323    }
2324    free(g->drivers);
2325 
2326    while (g->free_values != NULL) {
2327       value_t *next = g->free_values->next;
2328       free(g->free_values);
2329       g->free_values = next;
2330    }
2331 
2332    while (g->pending != NULL) {
2333       sens_list_t *next = g->pending->next;
2334       rt_free(sens_list_stack, g->pending);
2335       g->pending = next;
2336    }
2337 
2338    while (g->watching != NULL) {
2339       watch_list_t *next = g->watching->next;
2340       free(g->watching);
2341       g->watching = next;
2342    }
2343 }
2344 
rt_cleanup(tree_t top)2345 static void rt_cleanup(tree_t top)
2346 {
2347    RT_ASSERT(resume == NULL);
2348 
2349    while (heap_size(eventq_heap) > 0)
2350       rt_free(event_stack, heap_extract_min(eventq_heap));
2351 
2352    rt_free_delta_events(delta_proc);
2353    rt_free_delta_events(delta_driver);
2354 
2355    heap_free(eventq_heap);
2356    eventq_heap = NULL;
2357 
2358    netdb_walk(netdb, rt_cleanup_group);
2359    netdb_close(netdb);
2360 
2361    hash_free(decl_hash);
2362    decl_hash = NULL;
2363 
2364    while (watches != NULL) {
2365       watch_t *next = watches->chain_all;
2366       rt_free(watch_stack, watches);
2367       free(watches->groups);
2368       watches = next;
2369    }
2370 
2371    while (pending != NULL) {
2372       sens_list_t *next = pending->next;
2373       rt_free(sens_list_stack, pending);
2374       pending = next;
2375    }
2376 
2377    for (int i = 0; i < RT_LAST_EVENT; i++) {
2378       while (global_cbs[i] != NULL) {
2379          callback_t *tmp = global_cbs[i]->next;
2380          rt_free(callback_stack, global_cbs[i]);
2381          global_cbs[i] = tmp;
2382       }
2383    }
2384 
2385    rt_alloc_stack_destroy(event_stack);
2386    rt_alloc_stack_destroy(waveform_stack);
2387    rt_alloc_stack_destroy(sens_list_stack);
2388    rt_alloc_stack_destroy(watch_stack);
2389    rt_alloc_stack_destroy(callback_stack);
2390 
2391    hash_free(res_memo_hash);
2392 }
2393 
rt_stop_now(uint64_t stop_time)2394 static bool rt_stop_now(uint64_t stop_time)
2395 {
2396    if ((delta_driver != NULL) || (delta_proc != NULL))
2397       return false;
2398    else if (heap_size(eventq_heap) == 0)
2399       return true;
2400    else if (force_stop)
2401       return true;
2402    else if (stop_time == UINT64_MAX)
2403       return false;
2404    else {
2405       event_t *peek = heap_min(eventq_heap);
2406       return peek->when > stop_time;
2407    }
2408 }
2409 
rt_proc_usage_cmp(const void * lhs,const void * rhs)2410 static int rt_proc_usage_cmp(const void *lhs, const void *rhs)
2411 {
2412    return ((const rt_proc_t *)rhs)->usage - ((const rt_proc_t *)lhs)->usage;
2413 }
2414 
rt_stats_print(void)2415 static void rt_stats_print(void)
2416 {
2417    nvc_rusage_t ru;
2418    nvc_rusage(&ru);
2419 
2420    if (profiling) {
2421       notef("top processes by CPU usage");
2422 
2423       qsort(procs, n_procs, sizeof(rt_proc_t), rt_proc_usage_cmp);
2424 
2425       const uint64_t ru_us = ru.ms * 1000;
2426 
2427       color_printf("$white$%10s %5s %s$$\n", "us", "%", "process");
2428       for (size_t i = 0; i < MIN(n_procs, 10); i++) {
2429          const double pc = ((double)procs[i].usage / ru_us) * 100.0;
2430          printf("%10"PRIu64" %5.1f %s\n", procs[i].usage, pc,
2431                 istr(tree_ident(procs[i].source)));
2432       }
2433    }
2434 
2435    notef("setup:%ums run:%ums maxrss:%ukB", ready_rusage.ms, ru.ms, ru.rss);
2436 }
2437 
rt_reset_coverage(tree_t top)2438 static void rt_reset_coverage(tree_t top)
2439 {
2440    int32_t *cover_stmts = jit_find_symbol("cover_stmts", false);
2441    if (cover_stmts != NULL) {
2442       const int ntags = tree_attr_int(top, ident_new("stmt_tags"), 0);
2443       memset(cover_stmts, '\0', sizeof(int32_t) * ntags);
2444    }
2445 
2446    int32_t *cover_conds = jit_find_symbol("cover_conds", false);
2447    if (cover_conds != NULL) {
2448       const int ntags = tree_attr_int(top, ident_new("cond_tags"), 0);
2449       memset(cover_conds, '\0', sizeof(int32_t) * ntags);
2450    }
2451 }
2452 
rt_emit_coverage(tree_t top)2453 static void rt_emit_coverage(tree_t top)
2454 {
2455    const int32_t *cover_stmts = jit_find_symbol("cover_stmts", false);
2456    const int32_t *cover_conds = jit_find_symbol("cover_conds", false);
2457    if (cover_stmts != NULL)
2458       cover_report(top, cover_stmts, cover_conds);
2459 }
2460 
rt_interrupt(void)2461 static void rt_interrupt(void)
2462 {
2463    if (active_proc != NULL)
2464       fatal_at(tree_loc(active_proc->source),
2465                "interrupted in process %s at %s+%d",
2466                istr(tree_ident(active_proc->source)), fmt_time(now), iteration);
2467    else
2468       fatal("interrupted");
2469 }
2470 
2471 #ifdef __MINGW32__
rt_win_ctrl_handler(DWORD fdwCtrlType)2472 static BOOL rt_win_ctrl_handler(DWORD fdwCtrlType)
2473 {
2474    switch (fdwCtrlType) {
2475    case CTRL_C_EVENT:
2476       rt_interrupt();
2477       return TRUE;
2478 
2479    default:
2480       return FALSE;
2481    }
2482 }
2483 #endif
2484 
rt_start_of_tool(tree_t top)2485 void rt_start_of_tool(tree_t top)
2486 {
2487    jit_init(top);
2488 
2489 #if RT_DEBUG
2490    warnf("runtime debug assertions enabled");
2491 #endif
2492 
2493 #ifndef __MINGW32__
2494    struct sigaction sa;
2495    sa.sa_sigaction = (void*)rt_interrupt;
2496    sigemptyset(&sa.sa_mask);
2497    sa.sa_flags = SA_RESTART | SA_SIGINFO;
2498 
2499    sigaction(SIGINT, &sa, NULL);
2500 #else
2501    if (!SetConsoleCtrlHandler(rt_win_ctrl_handler, TRUE))
2502       fatal_trace("SetConsoleCtrlHandler");
2503 #endif
2504 
2505    trace_on = opt_get_int("rt_trace_en");
2506    profiling = opt_get_int("rt_profile");
2507 
2508    event_stack     = rt_alloc_stack_new(sizeof(event_t), "event");
2509    waveform_stack  = rt_alloc_stack_new(sizeof(waveform_t), "waveform");
2510    sens_list_stack = rt_alloc_stack_new(sizeof(sens_list_t), "sens_list");
2511    watch_stack     = rt_alloc_stack_new(sizeof(watch_t), "watch");
2512    callback_stack  = rt_alloc_stack_new(sizeof(callback_t), "callback");
2513 
2514    n_active_alloc = 128;
2515    active_groups = xmalloc(n_active_alloc * sizeof(struct netgroup *));
2516 
2517    global_tmp_stack = mmap_guarded(GLOBAL_TMP_STACK_SZ, "global temp stack");
2518    proc_tmp_stack   = mmap_guarded(PROC_TMP_STACK_SZ, "process temp stack");
2519 
2520    global_tmp_alloc = 0;
2521 
2522    rt_reset_coverage(top);
2523 
2524    nvc_rusage(&ready_rusage);
2525 }
2526 
rt_end_of_tool(tree_t top)2527 void rt_end_of_tool(tree_t top)
2528 {
2529    rt_cleanup(top);
2530    rt_emit_coverage(top);
2531 
2532    jit_shutdown();
2533 
2534    if (opt_get_int("rt-stats") || profiling)
2535       rt_stats_print();
2536 }
2537 
rt_run_sim(uint64_t stop_time)2538 void rt_run_sim(uint64_t stop_time)
2539 {
2540    const int stop_delta = opt_get_int("stop-delta");
2541 
2542    rt_global_event(RT_START_OF_SIMULATION);
2543    while (!rt_stop_now(stop_time))
2544       rt_cycle(stop_delta);
2545    rt_global_event(RT_END_OF_SIMULATION);
2546 }
2547 
rt_interactive_fatal(void)2548 static void rt_interactive_fatal(void)
2549 {
2550    aborted = true;
2551    longjmp(fatal_jmp, 1);
2552 }
2553 
rt_run_interactive(uint64_t stop_time)2554 void rt_run_interactive(uint64_t stop_time)
2555 {
2556    if (aborted)
2557       errorf("simulation has aborted and must be restarted");
2558    else if ((heap_size(eventq_heap) == 0) && (delta_proc == NULL))
2559       warnf("no future simulation events");
2560    else {
2561       set_fatal_fn(rt_interactive_fatal);
2562 
2563       if (setjmp(fatal_jmp) == 0)
2564          rt_run_sim(stop_time);
2565 
2566       set_fatal_fn(NULL);
2567    }
2568 }
2569 
rt_restart(tree_t top)2570 void rt_restart(tree_t top)
2571 {
2572    rt_setup(top);
2573    rt_initial(top);
2574    aborted = false;
2575 }
2576 
rt_set_timeout_cb(uint64_t when,timeout_fn_t fn,void * user)2577 void rt_set_timeout_cb(uint64_t when, timeout_fn_t fn, void *user)
2578 {
2579    event_t *e = rt_alloc(event_stack);
2580    e->when         = now + when;
2581    e->kind         = E_TIMEOUT;
2582    e->group        = NULL;
2583    e->proc         = NULL;
2584    e->timeout_fn   = fn;
2585    e->timeout_user = user;
2586    e->wakeup_gen   = UINT32_MAX;
2587 
2588    deltaq_insert(e);
2589 }
2590 
rt_set_event_cb(tree_t s,sig_event_fn_t fn,void * user,bool postponed)2591 watch_t *rt_set_event_cb(tree_t s, sig_event_fn_t fn, void *user,
2592                          bool postponed)
2593 {
2594    RT_ASSERT(tree_kind(s) == T_SIGNAL_DECL);
2595 
2596    if (fn == NULL) {
2597       // Find the first entry in the watch list and disable it
2598       for (watch_t *it = watches; it != NULL; it = it->chain_all) {
2599          if ((it->signal == s) && (it->user_data == user)) {
2600             it->pending = true;   // TODO: not a good way of doing this
2601             break;
2602          }
2603       }
2604 
2605       return NULL;
2606    }
2607    else {
2608       watch_t *w = rt_alloc(watch_stack);
2609       RT_ASSERT(w != NULL);
2610       w->signal        = s;
2611       w->fn            = fn;
2612       w->chain_all     = watches;
2613       w->chain_pending = NULL;
2614       w->pending       = false;
2615       w->groups        = NULL;
2616       w->n_groups      = 0;
2617       w->user_data     = user;
2618       w->length        = 0;
2619       w->postponed     = postponed;
2620 
2621       type_t type = tree_type(s);
2622       if (type_is_array(type))
2623          w->dir = direction_of(type, 0);
2624       else
2625          w->dir = RANGE_TO;
2626 
2627       watches = w;
2628 
2629       rt_watch_signal(w);
2630       return w;
2631    }
2632 }
2633 
rt_set_global_cb(rt_event_t event,rt_event_fn_t fn,void * user)2634 void rt_set_global_cb(rt_event_t event, rt_event_fn_t fn, void *user)
2635 {
2636    RT_ASSERT(event < RT_LAST_EVENT);
2637 
2638    callback_t *cb = rt_alloc(callback_stack);
2639    cb->next = global_cbs[event];
2640    cb->fn   = fn;
2641    cb->user = user;
2642 
2643    global_cbs[event] = cb;
2644 }
2645 
rt_watch_value(watch_t * w,uint64_t * buf,size_t max,bool last)2646 size_t rt_watch_value(watch_t *w, uint64_t *buf, size_t max, bool last)
2647 {
2648    int offset = 0;
2649    for (int i = 0; (i < w->n_groups) && (offset < max); i++) {
2650       netgroup_t *g = w->groups[i];
2651 
2652 #define SIGNAL_VALUE_EXPAND_U64(type) do {                              \
2653          const type *sp = (type *)(last ? g->last_value : g->resolved); \
2654          for (int j = 0; (j < g->length) && (offset + j < max); j++)    \
2655             buf[offset + j] = sp[j];                                    \
2656       } while (0)
2657 
2658       FOR_ALL_SIZES(g->size, SIGNAL_VALUE_EXPAND_U64);
2659 
2660       offset += g->length;
2661    }
2662 
2663    return offset;
2664 }
2665 
rt_group_string(netgroup_t * group,const char * map,char * buf,const char * end1)2666 static size_t rt_group_string(netgroup_t *group, const char *map,
2667                               char *buf, const char *end1)
2668 {
2669    char *bp = buf;
2670    const char *vals = group->resolved;
2671 
2672    if (likely(map != NULL)) {
2673       for (int j = 0; j < group->length; j++) {
2674          if (bp + 1 < end1)
2675             *bp++ = map[(int)vals[j]];
2676       }
2677    }
2678    else {
2679       for (int j = 0; j < group->length; j++) {
2680          if (bp + 1 < end1)
2681             *bp++ = vals[j];
2682       }
2683    }
2684 
2685    if (bp < end1)
2686       *bp = '\0';
2687 
2688    return bp - buf;
2689 }
2690 
rt_watch_string(watch_t * w,const char * map,char * buf,size_t max)2691 size_t rt_watch_string(watch_t *w, const char *map, char *buf, size_t max)
2692 {
2693    char *bp = buf;
2694    size_t offset = 0;
2695    for (int i = 0; i < w->n_groups; i++) {
2696       netgroup_t *g = w->groups[i];
2697       bp += rt_group_string(g, map, bp, buf + max);
2698       offset += g->length;
2699    }
2700 
2701    return offset + 1;
2702 }
2703 
rt_signal_string(tree_t s,const char * map,char * buf,size_t max)2704 size_t rt_signal_string(tree_t s, const char *map, char *buf, size_t max)
2705 {
2706    char *bp = buf;
2707    const int nnets = tree_nets(s);
2708    int offset = 0;
2709    while (offset < nnets) {
2710       netid_t nid = tree_net(s, offset);
2711       netgroup_t *g = &(groups[netdb_lookup(netdb, nid)]);
2712       bp += rt_group_string(g, map, bp, buf + max);
2713       offset += g->length;
2714    }
2715 
2716    return offset + 1;
2717 }
2718 
rt_signal_value(tree_t s,uint64_t * buf,size_t max)2719 size_t rt_signal_value(tree_t s, uint64_t *buf, size_t max)
2720 {
2721    const int nnets = tree_nets(s);
2722    int offset = 0;
2723    while (offset < nnets) {
2724       netid_t nid = tree_net(s, offset);
2725       netgroup_t *g = &(groups[netdb_lookup(netdb, nid)]);
2726 
2727 #define SIGNAL_READ_EXPAND_U64(type) do {                               \
2728          const type *sp = (type *)g->resolved;                          \
2729          for (int i = 0; (i < g->length) && (offset + i < max); i++)    \
2730             buf[offset + i] = sp[i];                                    \
2731       } while (0)
2732 
2733       FOR_ALL_SIZES(g->size, SIGNAL_READ_EXPAND_U64);
2734 
2735       offset += g->length;
2736    }
2737 
2738    return offset;
2739 }
2740 
rt_force_signal(tree_t s,const uint64_t * buf,size_t count,bool propagate)2741 bool rt_force_signal(tree_t s, const uint64_t *buf, size_t count,
2742                      bool propagate)
2743 {
2744    TRACE("force signal %s to %s propagate=%d", istr(tree_ident(s)),
2745          fmt_values(buf, count * sizeof(uint64_t)), propagate);
2746 
2747    RT_ASSERT(!propagate || can_create_delta);
2748 
2749    const int nnets = tree_nets(s);
2750    int offset = 0;
2751    while (offset < nnets) {
2752       netid_t nid = tree_net(s, offset);
2753       netgroup_t *g = &(groups[netdb_lookup(netdb, nid)]);
2754 
2755       g->flags |= NET_F_FORCED;
2756 
2757       if (g->forcing == NULL)
2758          g->forcing = rt_alloc_value(g);
2759 
2760 #define SIGNAL_FORCE_EXPAND_U64(type) do {                              \
2761          type *dp = (type *)g->forcing->data;                           \
2762          for (int i = 0; (i < g->length) && (offset + i < count); i++)  \
2763             dp[i] = buf[offset + i];                                    \
2764       } while (0)
2765 
2766       FOR_ALL_SIZES(g->size, SIGNAL_FORCE_EXPAND_U64);
2767 
2768       if (propagate)
2769          deltaq_insert_driver(0, g, NULL);
2770 
2771       offset += g->length;
2772    }
2773 
2774    return (offset == count);
2775 }
2776 
rt_can_create_delta(void)2777 bool rt_can_create_delta(void)
2778 {
2779    return can_create_delta;
2780 }
2781 
rt_now(unsigned * deltas)2782 uint64_t rt_now(unsigned *deltas)
2783 {
2784    if (deltas != NULL)
2785       *deltas = MAX(iteration, 0);
2786    return now;
2787 }
2788 
rt_stop(void)2789 void rt_stop(void)
2790 {
2791    force_stop = true;
2792 }
2793 
rt_set_exit_severity(rt_severity_t severity)2794 void rt_set_exit_severity(rt_severity_t severity)
2795 {
2796    exit_severity = severity;
2797 }
2798