1 /*
2  * Copyright (c) 2001-2018 Stephen Williams (steve@icarus.com)
3  *
4  *    This source code is free software; you can redistribute it
5  *    and/or modify it in source code form under the terms of the GNU
6  *    General Public License as published by the Free Software
7  *    Foundation; either version 2 of the License, or (at your option)
8  *    any later version.
9  *
10  *    This program is distributed in the hope that it will be useful,
11  *    but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  *    GNU General Public License for more details.
14  *
15  *    You should have received a copy of the GNU General Public License
16  *    along with this program; if not, write to the Free Software
17  *    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18  */
19 
20 # include  "config.h"
21 # include  "schedule.h"
22 # include  "vthread.h"
23 # include  "vpi_priv.h"
24 # include  "vvp_net_sig.h"
25 # include  "slab.h"
26 # include  "compile.h"
27 # include  <new>
28 # include  <typeinfo>
29 # include  <csignal>
30 # include  <cstdlib>
31 # include  <cassert>
32 # include  <iostream>
33 #ifdef CHECK_WITH_VALGRIND
34 # include  "vvp_cleanup.h"
35 # include  "ivl_alloc.h"
36 #endif
37 
38 unsigned long count_assign_events = 0;
39 unsigned long count_gen_events = 0;
40 unsigned long count_thread_events = 0;
41   // Count the time events (A time cell created)
42 unsigned long count_time_events = 0;
43 
44 
45 
46 /*
47  * The event_s and event_time_s structures implement the Verilog
48  * stratified event queue.
49  *
50  * The event_time_s objects are one per time step. Each time step in
51  * turn contains a list of event_s objects that are the actual events.
52  *
53  * The event_s objects are base classes for the more specific sort of
54  * event.
55  */
56 struct event_s {
57       struct event_s*next;
~event_sevent_s58       virtual ~event_s() { }
59       virtual void run_run(void) =0;
60 
61 	// Write something about the event to stderr
62       virtual void single_step_display(void);
63 
64 	// Fallback new/delete
operator newevent_s65       static void*operator new (size_t size) { return ::new char[size]; }
operator deleteevent_s66       static void operator delete(void*ptr)  { ::delete[]( (char*)ptr ); }
67 };
68 
single_step_display(void)69 void event_s::single_step_display(void)
70 {
71       std::cerr << "event_s: Step into event " << typeid(*this).name() << std::endl;
72 }
73 
74 struct event_time_s {
event_time_sevent_time_s75       event_time_s() {
76 	    count_time_events += 1;
77 	    start = 0;
78 	    active = 0;
79 	    inactive = 0;
80 	    nbassign = 0;
81 	    rwsync = 0;
82 	    rosync = 0;
83 	    del_thr = 0;
84 	    next = NULL;
85       }
86       vvp_time64_t delay;
87 
88       struct event_s*start;
89       struct event_s*active;
90       struct event_s*inactive;
91       struct event_s*nbassign;
92       struct event_s*rwsync;
93       struct event_s*rosync;
94       struct event_s*del_thr;
95 
96       struct event_time_s*next;
97 
98       static void* operator new (size_t);
99       static void operator delete(void*obj, size_t s);
100 };
101 
~vvp_gen_event_s()102 vvp_gen_event_s::~vvp_gen_event_s()
103 {
104 }
105 
single_step_display(void)106 void vvp_gen_event_s::single_step_display(void)
107 {
108       cerr << "vvp_gen_event_s: Step into event " << typeid(*this).name() << endl;
109 }
110 
111 /*
112  * Derived event types
113  */
114 struct vthread_event_s : public event_s {
115       vthread_t thr;
116       void run_run(void);
117       void single_step_display(void);
118 
119       static void* operator new(size_t);
120       static void operator delete(void*);
121 };
122 
run_run(void)123 void vthread_event_s::run_run(void)
124 {
125       count_thread_events += 1;
126       vthread_run(thr);
127 }
128 
single_step_display(void)129 void vthread_event_s::single_step_display(void)
130 {
131       __vpiScope*scope = vthread_scope(thr);
132       cerr << "vthread_event: Resume thread"
133 	   << " scope=" << scope->vpi_get_str(vpiFullName)
134 	   << endl;
135 }
136 
137 static const size_t VTHR_CHUNK_COUNT = 8192 / sizeof(struct vthread_event_s);
138 static slab_t<sizeof(vthread_event_s),VTHR_CHUNK_COUNT> vthread_event_heap;
139 
operator new(size_t size)140 inline void* vthread_event_s::operator new(size_t size)
141 {
142       assert(size == sizeof(vthread_event_s));
143       return vthread_event_heap.alloc_slab();
144 }
145 
operator delete(void * dptr)146 void vthread_event_s::operator delete(void*dptr)
147 {
148       vthread_event_heap.free_slab(dptr);
149 }
150 
151 struct del_thr_event_s : public event_s {
152       vthread_t thr;
153       void run_run(void);
154       void single_step_display(void);
155 };
156 
run_run(void)157 void del_thr_event_s::run_run(void)
158 {
159       vthread_delete(thr);
160 }
161 
single_step_display(void)162 void del_thr_event_s::single_step_display(void)
163 {
164       __vpiScope*scope = vthread_scope(thr);
165       cerr << "del_thr_event: Reap completed thread"
166 	   << " scope=" << scope->vpi_get_str(vpiFullName) << endl;
167 }
168 
169 struct assign_vector4_event_s  : public event_s {
170 	/* The default constructor. */
assign_vector4_event_sassign_vector4_event_s171       explicit assign_vector4_event_s(const vvp_vector4_t&that) : val(that) {
172 	    base = 0;
173 	    vwid = 0;
174       }
175 
176 	/* Where to do the assign. */
177       vvp_net_ptr_t ptr;
178 	/* Value to assign. */
179       vvp_vector4_t val;
180 	/* Offset of the part into the destination. */
181       unsigned base;
182 	/* Width of the destination vector. */
183       unsigned vwid;
184       void run_run(void);
185       void single_step_display(void);
186 
187       static void* operator new(size_t);
188       static void operator delete(void*);
189 };
190 
run_run(void)191 void assign_vector4_event_s::run_run(void)
192 {
193       count_assign_events += 1;
194       if (vwid > 0)
195 	    vvp_send_vec4_pv(ptr, val, base, val.size(), vwid, 0);
196       else
197 	    vvp_send_vec4(ptr, val, 0);
198 }
199 
single_step_display(void)200 void assign_vector4_event_s::single_step_display(void)
201 {
202       cerr << "assign_vector4_event: Propagate val=" << val
203 	   << ", vwid=" << vwid << ", base=" << base << endl;
204 }
205 
206 static const size_t ASSIGN4_CHUNK_COUNT = 524288 / sizeof(struct assign_vector4_event_s);
207 static slab_t<sizeof(assign_vector4_event_s),ASSIGN4_CHUNK_COUNT> assign4_heap;
208 
operator new(size_t size)209 inline void* assign_vector4_event_s::operator new(size_t size)
210 {
211       assert(size == sizeof(assign_vector4_event_s));
212       return assign4_heap.alloc_slab();
213 }
214 
operator delete(void * dptr)215 void assign_vector4_event_s::operator delete(void*dptr)
216 {
217       assign4_heap.free_slab(dptr);
218 }
219 
count_assign4_pool(void)220 unsigned long count_assign4_pool(void) { return assign4_heap.pool; }
221 
222 struct assign_vector8_event_s  : public event_s {
223       vvp_net_ptr_t ptr;
224       vvp_vector8_t val;
225       void run_run(void);
226       void single_step_display(void);
227 
228       static void* operator new(size_t);
229       static void operator delete(void*);
230 };
231 
run_run(void)232 void assign_vector8_event_s::run_run(void)
233 {
234       count_assign_events += 1;
235       vvp_send_vec8(ptr, val);
236 }
237 
single_step_display(void)238 void assign_vector8_event_s::single_step_display(void)
239 {
240       cerr << "assign_vector8_event: Propagate val=" << val << endl;
241 }
242 
243 static const size_t ASSIGN8_CHUNK_COUNT = 8192 / sizeof(struct assign_vector8_event_s);
244 static slab_t<sizeof(assign_vector8_event_s),ASSIGN8_CHUNK_COUNT> assign8_heap;
245 
operator new(size_t size)246 inline void* assign_vector8_event_s::operator new(size_t size)
247 {
248       assert(size == sizeof(assign_vector8_event_s));
249       return assign8_heap.alloc_slab();
250 }
251 
operator delete(void * dptr)252 void assign_vector8_event_s::operator delete(void*dptr)
253 {
254       assign8_heap.free_slab(dptr);
255 }
256 
count_assign8_pool()257 unsigned long count_assign8_pool() { return assign8_heap.pool; }
258 
259 struct assign_real_event_s  : public event_s {
260       vvp_net_ptr_t ptr;
261       double val;
262       void run_run(void);
263       void single_step_display(void);
264 
265       static void* operator new(size_t);
266       static void operator delete(void*);
267 };
268 
run_run(void)269 void assign_real_event_s::run_run(void)
270 {
271       count_assign_events += 1;
272       vvp_send_real(ptr, val, 0);
273 }
274 
single_step_display(void)275 void assign_real_event_s::single_step_display(void)
276 {
277       cerr << "assign_real_event: Propagate val=" << val << endl;
278 }
279 
280 static const size_t ASSIGNR_CHUNK_COUNT = 8192 / sizeof(struct assign_real_event_s);
281 static slab_t<sizeof(assign_real_event_s),ASSIGNR_CHUNK_COUNT> assignr_heap;
282 
operator new(size_t size)283 inline void* assign_real_event_s::operator new (size_t size)
284 {
285       assert(size == sizeof(assign_real_event_s));
286       return assignr_heap.alloc_slab();
287 }
288 
operator delete(void * dptr)289 void assign_real_event_s::operator delete(void*dptr)
290 {
291       assignr_heap.free_slab(dptr);
292 }
293 
count_assign_real_pool(void)294 unsigned long count_assign_real_pool(void) { return assignr_heap.pool; }
295 
296 struct assign_array_word_s  : public event_s {
297       vvp_array_t mem;
298       unsigned adr;
299       vvp_vector4_t val;
300       unsigned off;
301       void run_run(void);
302 
303       static void* operator new(size_t);
304       static void operator delete(void*);
305 };
306 
run_run(void)307 void assign_array_word_s::run_run(void)
308 {
309       count_assign_events += 1;
310       mem->set_word(adr, off, val);
311 }
312 
313 static const size_t ARRAY_W_CHUNK_COUNT = 8192 / sizeof(struct assign_array_word_s);
314 static slab_t<sizeof(assign_array_word_s),ARRAY_W_CHUNK_COUNT> array_w_heap;
315 
operator new(size_t size)316 inline void* assign_array_word_s::operator new (size_t size)
317 {
318       assert(size == sizeof(assign_array_word_s));
319       return array_w_heap.alloc_slab();
320 }
321 
operator delete(void * ptr)322 void assign_array_word_s::operator delete(void*ptr)
323 {
324       array_w_heap.free_slab(ptr);
325 }
326 
count_assign_aword_pool(void)327 unsigned long count_assign_aword_pool(void) { return array_w_heap.pool; }
328 
329 struct force_vector4_event_s  : public event_s {
330 	/* The default constructor. */
force_vector4_event_sforce_vector4_event_s331       explicit force_vector4_event_s(const vvp_vector4_t&that): val(that) {
332 	    net = NULL;
333 	    base = 0;
334 	    vwid = 0;
335       }
336 	/* Where to do the force. */
337       vvp_net_t*net;
338 	/* Value to force. */
339       vvp_vector4_t val;
340 	/* Offset of the part into the destination. */
341       unsigned base;
342 	/* Width of the destination vector. */
343       unsigned vwid;
344 
345       void run_run(void);
346       void single_step_display(void);
347 
348       static void* operator new(size_t);
349       static void operator delete(void*);
350 };
351 
run_run(void)352 void force_vector4_event_s::run_run(void)
353 {
354       count_assign_events += 1;
355 
356       unsigned wid = val.size();
357       if ((base + wid) > vwid)
358 	    wid = vwid - base;
359 
360 	// Make a mask of which bits are to be forced, 0 for unforced
361 	// bits and 1 for forced bits.
362       vvp_vector2_t mask (vvp_vector2_t::FILL0, vwid);
363       for (unsigned idx = 0 ; idx < wid ; idx += 1)
364 	    mask.set_bit(base+idx, 1);
365 
366       vvp_vector4_t tmp (vwid, BIT4_Z);
367 
368 	// vvp_net_t::force_vec4 propagates all the bits of the
369 	// forced vector value, regardless of the mask. This
370 	// ensures the unforced bits retain their current value.
371       vvp_signal_value*sig = dynamic_cast<vvp_signal_value*>(net->fil);
372       assert(sig);
373       sig->vec4_value(tmp);
374 
375       tmp.set_vec(base, val);
376       net->force_vec4(tmp, mask);
377 }
378 
single_step_display(void)379 void force_vector4_event_s::single_step_display(void)
380 {
381       cerr << "force_vector4_event: Force val=" << val
382 	   << ", vwid=" << vwid << ", base=" << base << endl;
383 }
384 
385 static const size_t FORCE4_CHUNK_COUNT = 8192 / sizeof(struct force_vector4_event_s);
386 static slab_t<sizeof(force_vector4_event_s),FORCE4_CHUNK_COUNT> force4_heap;
387 
operator new(size_t size)388 inline void* force_vector4_event_s::operator new(size_t size)
389 {
390       assert(size == sizeof(force_vector4_event_s));
391       return force4_heap.alloc_slab();
392 }
393 
operator delete(void * dptr)394 void force_vector4_event_s::operator delete(void*dptr)
395 {
396       force4_heap.free_slab(dptr);
397 }
398 
count_force4_pool(void)399 unsigned long count_force4_pool(void) { return force4_heap.pool; }
400 
401 /*
402  * This class supports the propagation of vec4 outputs from a
403  * vvp_net_t object.
404  */
405 struct propagate_vector4_event_s : public event_s {
406 	/* The default constructor. */
propagate_vector4_event_spropagate_vector4_event_s407       explicit propagate_vector4_event_s(const vvp_vector4_t&that) : val(that) {
408 	    net = NULL;
409       }
410 	/* A constructor that makes the val directly. */
propagate_vector4_event_spropagate_vector4_event_s411       propagate_vector4_event_s(const vvp_vector4_t&that, unsigned adr, unsigned wid)
412       : val(that,adr,wid) {
413 	    net = NULL;
414       }
415 
416 	/* Propagate the output of this net. */
417       vvp_net_t*net;
418 	/* value to propagate */
419       vvp_vector4_t val;
420 	/* Action */
421       void run_run(void);
422       void single_step_display(void);
423 };
424 
run_run(void)425 void propagate_vector4_event_s::run_run(void)
426 {
427       net->send_vec4(val, 0);
428 }
429 
single_step_display(void)430 void propagate_vector4_event_s::single_step_display(void)
431 {
432       cerr << "propagate_vector4_event: Propagate val=" << val << endl;
433 }
434 
435 /*
436  * This class supports the propagation of real outputs from a
437  * vvp_net_t object.
438  */
439 struct propagate_real_event_s : public event_s {
440 	/* Propagate the output of this net. */
441       vvp_net_t*net;
442 	/* value to propagate */
443       double val;
444 	/* Action */
445       void run_run(void);
446       void single_step_display(void);
447 };
448 
run_run(void)449 void propagate_real_event_s::run_run(void)
450 {
451       net->send_real(val, 0);
452 }
453 
single_step_display(void)454 void propagate_real_event_s::single_step_display(void)
455 {
456       cerr << "propagate_real_event: Propagate val=" << val << endl;
457 }
458 
459 struct assign_array_r_word_s  : public event_s {
460       vvp_array_t mem;
461       unsigned adr;
462       double val;
463       void run_run(void);
464 
465       static void* operator new(size_t);
466       static void operator delete(void*);
467 };
468 
run_run(void)469 void assign_array_r_word_s::run_run(void)
470 {
471       count_assign_events += 1;
472       mem->set_word(adr, val);
473 }
474 static const size_t ARRAY_R_W_CHUNK_COUNT = 8192 / sizeof(struct assign_array_r_word_s);
475 static slab_t<sizeof(assign_array_r_word_s),ARRAY_R_W_CHUNK_COUNT> array_r_w_heap;
476 
operator new(size_t size)477 inline void* assign_array_r_word_s::operator new(size_t size)
478 {
479       assert(size == sizeof(assign_array_r_word_s));
480       return array_r_w_heap.alloc_slab();
481 }
482 
operator delete(void * ptr)483 void assign_array_r_word_s::operator delete(void*ptr)
484 {
485       array_r_w_heap.free_slab(ptr);
486 }
487 
count_assign_arword_pool(void)488 unsigned long count_assign_arword_pool(void) { return array_r_w_heap.pool; }
489 
490 struct generic_event_s : public event_s {
491       vvp_gen_event_t obj;
492       bool delete_obj_when_done;
493       void run_run(void);
494       void single_step_display(void);
495 
496       static void* operator new(size_t);
497       static void operator delete(void*);
498 };
499 
run_run(void)500 void generic_event_s::run_run(void)
501 {
502       count_gen_events += 1;
503       if (obj) {
504 	    obj->run_run();
505 	    if (delete_obj_when_done)
506 		  delete obj;
507       }
508 }
509 
single_step_display(void)510 void generic_event_s::single_step_display(void)
511 {
512       obj->single_step_display();
513 }
514 
515 static const size_t GENERIC_CHUNK_COUNT = 131072 / sizeof(struct generic_event_s);
516 static slab_t<sizeof(generic_event_s),GENERIC_CHUNK_COUNT> generic_event_heap;
517 
operator new(size_t size)518 inline void* generic_event_s::operator new(size_t size)
519 {
520       assert(size == sizeof(generic_event_s));
521       return generic_event_heap.alloc_slab();
522 }
523 
operator delete(void * ptr)524 void generic_event_s::operator delete(void*ptr)
525 {
526       generic_event_heap.free_slab(ptr);
527 }
528 
count_gen_pool(void)529 unsigned long count_gen_pool(void) { return generic_event_heap.pool; }
530 
531 /*
532 ** These event_time_s will be required a lot, at high frequency.
533 ** Once allocated, we never free them, but stash them away for next time.
534 */
535 
536 
537 static const size_t TIME_CHUNK_COUNT = 8192 / sizeof(struct event_time_s);
538 static slab_t<sizeof(event_time_s),TIME_CHUNK_COUNT> event_time_heap;
539 
operator new(size_t size)540 inline void* event_time_s::operator new (size_t size)
541 {
542       assert(size == sizeof(struct event_time_s));
543       void*ptr = event_time_heap.alloc_slab();
544       return ptr;
545 }
546 
operator delete(void * ptr,size_t)547 inline void event_time_s::operator delete(void*ptr, size_t)
548 {
549       event_time_heap.free_slab(ptr);
550 }
551 
count_time_pool(void)552 unsigned long count_time_pool(void) { return event_time_heap.pool; }
553 
554 /*
555  * This is the head of the list of pending events. This includes all
556  * the events that have not been executed yet, and reaches into the
557  * future.
558  */
559 static struct event_time_s* sched_list = 0;
560 
561 /*
562  * This is a list of initialization events. The setup puts
563  * initializations in this list so that they happen before the
564  * simulation as a whole starts. This prevents time-0 triggers of
565  * certain events.
566  */
567 static struct event_s* schedule_init_list = 0;
568 
569 /*
570  * This is the head of the list of final events.
571  */
572 static struct event_s* schedule_final_list = 0;
573 
574 /*
575  * This flag is true until a VPI task or function finishes the
576  * simulation.
577  */
578 static bool schedule_runnable = true;
579 static bool schedule_stopped_flag  = false;
580 static bool schedule_single_step_flag = false;
581 
schedule_finish(int)582 void schedule_finish(int)
583 {
584       schedule_runnable = false;
585 }
586 
schedule_stop(int)587 void schedule_stop(int)
588 {
589       schedule_stopped_flag = true;
590 }
591 
schedule_single_step(int)592 void schedule_single_step(int)
593 {
594       schedule_single_step_flag = true;
595 }
596 
schedule_finished(void)597 bool schedule_finished(void)
598 {
599       return !schedule_runnable;
600 }
601 
schedule_stopped(void)602 bool schedule_stopped(void)
603 {
604       return schedule_stopped_flag;
605 }
606 
607 /*
608  * These are the signal handling infrastructure. The SIGINT signal
609  * leads to an implicit $stop. The SIGHUP and SIGTERM signals lead
610  * to an implicit $finish.
611  */
612 extern bool stop_is_finish;
613 
signals_handler(int signum)614 extern "C" void signals_handler(int signum)
615 {
616 #ifdef __MINGW32__
617 	// Windows implements the original UNIX semantics for signal,
618 	// so we have to re-establish the signal handler each time a
619 	// signal is caught.
620       signal(signum, &signals_handler);
621 #endif
622       if (signum != SIGINT)
623 	    stop_is_finish = true;
624       schedule_stopped_flag = true;
625 }
626 
signals_capture(void)627 static void signals_capture(void)
628 {
629 #ifndef __MINGW32__
630       signal(SIGHUP,  &signals_handler);
631 #endif
632       signal(SIGINT,  &signals_handler);
633       signal(SIGTERM, &signals_handler);
634 }
635 
signals_revert(void)636 static void signals_revert(void)
637 {
638 #ifndef __MINGW32__
639       signal(SIGHUP,  SIG_DFL);
640 #endif
641       signal(SIGINT,  SIG_DFL);
642       signal(SIGTERM, SIG_DFL);
643 }
644 
645 /*
646  * This function puts an event on the end of the pre-simulation event queue.
647  */
schedule_init_event(struct event_s * cur)648 static void schedule_init_event(struct event_s*cur)
649 {
650       if (schedule_init_list == 0) {
651             cur->next = cur;
652       } else {
653             cur->next = schedule_init_list->next;
654             schedule_init_list->next = cur;
655       }
656       schedule_init_list = cur;
657 }
658 
659 /*
660  * This function puts an event on the end of the post-simulation event queue.
661  */
schedule_final_event(struct event_s * cur)662 static void schedule_final_event(struct event_s*cur)
663 {
664       if (schedule_final_list == 0) {
665             cur->next = cur;
666       } else {
667             cur->next = schedule_final_list->next;
668             schedule_final_list->next = cur;
669       }
670       schedule_final_list = cur;
671 }
672 
673 /*
674  * This function does all the hard work of putting an event into the
675  * event queue. The event delay is taken from the event structure
676  * itself, and the structure is placed in the right place in the
677  * queue.
678  */
679 typedef enum event_queue_e { SEQ_START, SEQ_ACTIVE, SEQ_INACTIVE, SEQ_NBASSIGN,
680 			     SEQ_RWSYNC, SEQ_ROSYNC, DEL_THREAD } event_queue_t;
681 
schedule_event_(struct event_s * cur,vvp_time64_t delay,event_queue_t select_queue)682 static void schedule_event_(struct event_s*cur, vvp_time64_t delay,
683 			    event_queue_t select_queue)
684 {
685       cur->next = cur;
686       struct event_time_s*ctim = sched_list;
687 
688       if (sched_list == 0) {
689 	      /* Is the event_time list completely empty? Create the
690 		 first event_time object. */
691 	    ctim = new struct event_time_s;
692 	    ctim->delay = delay;
693 	    ctim->next  = 0;
694 	    sched_list = ctim;
695 
696       } else if (sched_list->delay > delay) {
697 
698 	      /* Am I looking for an event before the first event_time?
699 		 If so, create a new event_time to go in front. */
700 	    struct event_time_s*tmp = new struct event_time_s;
701 	    tmp->delay = delay;
702 	    tmp->next = ctim;
703 	    ctim->delay -= delay;
704 	    ctim = tmp;
705 	    sched_list = ctim;
706 
707       } else {
708 	    struct event_time_s*prev = 0;
709 
710 	    while (ctim->next && (ctim->delay < delay)) {
711 		  delay -= ctim->delay;
712 		  prev = ctim;
713 		  ctim = ctim->next;
714 	    }
715 
716 	    if (ctim->delay > delay) {
717 		  struct event_time_s*tmp = new struct event_time_s;
718 		  tmp->delay = delay;
719 		  tmp->next  = prev->next;
720 		  prev->next = tmp;
721 
722 		  tmp->next->delay -= delay;
723 		  ctim = tmp;
724 
725 	    } else if (ctim->delay == delay) {
726 
727 	    } else {
728 		  assert(ctim->next == 0);
729 		  struct event_time_s*tmp = new struct event_time_s;
730 		  tmp->delay = delay - ctim->delay;
731 		  tmp->next = 0;
732 		  ctim->next = tmp;
733 
734 		  ctim = tmp;
735 	    }
736       }
737 
738 	/* By this point, ctim is the event_time structure that is to
739 	   receive the event at hand. Put the event in to the
740 	   appropriate list for the kind of assign we have at hand. */
741 
742       struct event_s** q = 0;
743 
744       switch (select_queue) {
745 	  case SEQ_START:
746 	    q = &ctim->start;
747 	    break;
748 
749 	  case SEQ_ACTIVE:
750 	    q = &ctim->active;
751 	    break;
752 
753 	  case SEQ_INACTIVE:
754 	    assert(delay == 0);
755 	    q = &ctim->inactive;
756 	    break;
757 
758 	  case SEQ_NBASSIGN:
759 	    q = &ctim->nbassign;
760 	    break;
761 
762 	  case SEQ_RWSYNC:
763 	    q = &ctim->rwsync;
764 	    break;
765 
766 	  case SEQ_ROSYNC:
767 	    q = &ctim->rosync;
768 	    break;
769 
770 	  case DEL_THREAD:
771 	    q = &ctim->del_thr;
772 	    break;
773       }
774 
775       if (q) {
776 	    if (*q) {
777 		  /* Put the cur event on the end of the queue. */
778 		  cur->next = (*q)->next;
779 		  (*q)->next = cur;
780 	    }
781 	    *q = cur;
782       }
783 }
784 
schedule_event_push_(struct event_s * cur)785 static void schedule_event_push_(struct event_s*cur)
786 {
787       if ((sched_list == 0) || (sched_list->delay > 0)) {
788 	    schedule_event_(cur, 0, SEQ_ACTIVE);
789 	    return;
790       }
791 
792       struct event_time_s*ctim = sched_list;
793 
794       if (ctim->active == 0) {
795 	    cur->next = cur;
796 	    ctim->active = cur;
797 	    return;
798       }
799 
800       cur->next = ctim->active->next;
801       ctim->active->next = cur;
802 }
803 
schedule_vthread(vthread_t thr,vvp_time64_t delay,bool push_flag)804 void schedule_vthread(vthread_t thr, vvp_time64_t delay, bool push_flag)
805 {
806       struct vthread_event_s*cur = new vthread_event_s;
807 
808       cur->thr = thr;
809       vthread_mark_scheduled(thr);
810 
811       if (push_flag && (delay == 0)) {
812 	      /* Special case: If the delay is 0, the push_flag means
813 		 I can push this event in front of everything. This is
814 		 used by the %fork statement, for example, to perform
815 		 task calls. */
816 	    schedule_event_push_(cur);
817 
818       } else {
819 	    schedule_event_(cur, delay, SEQ_ACTIVE);
820       }
821 }
822 
schedule_t0_trigger(vvp_net_ptr_t ptr)823 void schedule_t0_trigger(vvp_net_ptr_t ptr)
824 {
825       vvp_vector4_t bit (1, BIT4_X);
826       struct assign_vector4_event_s*cur = new struct assign_vector4_event_s(bit);
827       cur->ptr = ptr;
828       schedule_event_(cur, 0, SEQ_INACTIVE);
829 }
830 
schedule_inactive(vthread_t thr)831 void schedule_inactive(vthread_t thr)
832 {
833       struct vthread_event_s*cur = new vthread_event_s;
834 
835       cur->thr = thr;
836       vthread_mark_scheduled(thr);
837       schedule_event_(cur, 0, SEQ_INACTIVE);
838 }
839 
schedule_init_vthread(vthread_t thr)840 void schedule_init_vthread(vthread_t thr)
841 {
842       struct vthread_event_s*cur = new vthread_event_s;
843 
844       cur->thr = thr;
845       vthread_mark_scheduled(thr);
846 
847       schedule_init_event(cur);
848 }
849 
schedule_final_vthread(vthread_t thr)850 void schedule_final_vthread(vthread_t thr)
851 {
852       struct vthread_event_s*cur = new vthread_event_s;
853 
854       cur->thr = thr;
855       vthread_mark_scheduled(thr);
856 
857       schedule_final_event(cur);
858 }
859 
schedule_assign_vector(vvp_net_ptr_t ptr,unsigned base,unsigned vwid,const vvp_vector4_t & bit,vvp_time64_t delay)860 void schedule_assign_vector(vvp_net_ptr_t ptr,
861 			    unsigned base, unsigned vwid,
862 			    const vvp_vector4_t&bit,
863 			    vvp_time64_t delay)
864 {
865       struct assign_vector4_event_s*cur = new struct assign_vector4_event_s(bit);
866       cur->ptr = ptr;
867       cur->base = base;
868       cur->vwid = vwid;
869       schedule_event_(cur, delay, SEQ_NBASSIGN);
870 }
871 
schedule_force_vector(vvp_net_t * net,unsigned base,unsigned vwid,const vvp_vector4_t & bit,vvp_time64_t delay)872 void schedule_force_vector(vvp_net_t*net,
873 			    unsigned base, unsigned vwid,
874 			    const vvp_vector4_t&bit,
875 			    vvp_time64_t delay)
876 {
877       struct force_vector4_event_s*cur = new struct force_vector4_event_s(bit);
878       cur->net = net;
879       cur->base = base;
880       cur->vwid = vwid;
881       schedule_event_(cur, delay, SEQ_NBASSIGN);
882 }
883 
schedule_propagate_vector(vvp_net_t * net,vvp_time64_t delay,const vvp_vector4_t & src)884 void schedule_propagate_vector(vvp_net_t*net,
885 			       vvp_time64_t delay,
886 			       const vvp_vector4_t&src)
887 {
888       struct propagate_vector4_event_s*cur
889 	    = new struct propagate_vector4_event_s(src);
890       cur->net = net;
891       schedule_event_(cur, delay, SEQ_NBASSIGN);
892 }
893 
schedule_assign_array_word(vvp_array_t mem,unsigned word_addr,unsigned off,vvp_vector4_t val,vvp_time64_t delay)894 void schedule_assign_array_word(vvp_array_t mem,
895 				unsigned word_addr,
896 				unsigned off,
897 				vvp_vector4_t val,
898 				vvp_time64_t delay)
899 {
900       struct assign_array_word_s*cur = new struct assign_array_word_s;
901       cur->mem = mem;
902       cur->adr = word_addr;
903       cur->off = off;
904       cur->val = val;
905       schedule_event_(cur, delay, SEQ_NBASSIGN);
906 }
907 
schedule_assign_array_word(vvp_array_t mem,unsigned word_addr,double val,vvp_time64_t delay)908 void schedule_assign_array_word(vvp_array_t mem,
909 				unsigned word_addr,
910 				double val,
911 				vvp_time64_t delay)
912 {
913       struct assign_array_r_word_s*cur = new struct assign_array_r_word_s;
914       cur->mem = mem;
915       cur->adr = word_addr;
916       cur->val = val;
917       schedule_event_(cur, delay, SEQ_NBASSIGN);
918 }
919 
schedule_set_vector(vvp_net_ptr_t ptr,const vvp_vector4_t & bit)920 void schedule_set_vector(vvp_net_ptr_t ptr, const vvp_vector4_t&bit)
921 {
922       struct assign_vector4_event_s*cur = new struct assign_vector4_event_s(bit);
923       cur->ptr = ptr;
924       cur->base = 0;
925       cur->vwid = 0;
926       schedule_event_(cur, 0, SEQ_ACTIVE);
927 }
928 
schedule_set_vector(vvp_net_ptr_t ptr,vvp_vector8_t bit)929 void schedule_set_vector(vvp_net_ptr_t ptr, vvp_vector8_t bit)
930 {
931       struct assign_vector8_event_s*cur = new struct assign_vector8_event_s;
932       cur->ptr = ptr;
933       cur->val = bit;
934       schedule_event_(cur, 0, SEQ_ACTIVE);
935 }
936 
schedule_set_vector(vvp_net_ptr_t ptr,double bit)937 void schedule_set_vector(vvp_net_ptr_t ptr, double bit)
938 {
939       struct assign_real_event_s*cur = new struct assign_real_event_s;
940       cur->ptr = ptr;
941       cur->val = bit;
942       schedule_event_(cur, 0, SEQ_ACTIVE);
943 }
944 
schedule_init_vector(vvp_net_ptr_t ptr,vvp_vector4_t bit)945 void schedule_init_vector(vvp_net_ptr_t ptr, vvp_vector4_t bit)
946 {
947       struct assign_vector4_event_s*cur = new struct assign_vector4_event_s(bit);
948       cur->ptr = ptr;
949       cur->base = 0;
950       cur->vwid = 0;
951       schedule_init_event(cur);
952 }
953 
schedule_init_vector(vvp_net_ptr_t ptr,vvp_vector8_t bit)954 void schedule_init_vector(vvp_net_ptr_t ptr, vvp_vector8_t bit)
955 {
956       struct assign_vector8_event_s*cur = new struct assign_vector8_event_s;
957       cur->ptr = ptr;
958       cur->val = bit;
959       schedule_init_event(cur);
960 }
961 
schedule_init_vector(vvp_net_ptr_t ptr,double bit)962 void schedule_init_vector(vvp_net_ptr_t ptr, double bit)
963 {
964       struct assign_real_event_s*cur = new struct assign_real_event_s;
965       cur->ptr = ptr;
966       cur->val = bit;
967       schedule_init_event(cur);
968 }
969 
schedule_init_propagate(vvp_net_t * net,vvp_vector4_t bit)970 void schedule_init_propagate(vvp_net_t*net, vvp_vector4_t bit)
971 {
972       struct propagate_vector4_event_s*cur = new struct propagate_vector4_event_s(bit);
973       cur->net = net;
974       schedule_init_event(cur);
975 }
976 
schedule_init_propagate(vvp_net_t * net,double bit)977 void schedule_init_propagate(vvp_net_t*net, double bit)
978 {
979       struct propagate_real_event_s*cur = new struct propagate_real_event_s;
980       cur->net = net;
981       cur->val = bit;
982       schedule_init_event(cur);
983 }
984 
schedule_del_thr(vthread_t thr)985 void schedule_del_thr(vthread_t thr)
986 {
987       struct del_thr_event_s*cur = new del_thr_event_s;
988 
989       cur->thr = thr;
990 
991       schedule_event_(cur, 0, DEL_THREAD);
992 }
993 
schedule_generic(vvp_gen_event_t obj,vvp_time64_t delay,bool sync_flag,bool ro_flag,bool delete_when_done)994 void schedule_generic(vvp_gen_event_t obj, vvp_time64_t delay,
995 		      bool sync_flag, bool ro_flag, bool delete_when_done)
996 {
997       struct generic_event_s*cur = new generic_event_s;
998 
999       cur->obj = obj;
1000       cur->delete_obj_when_done = delete_when_done;
1001       schedule_event_(cur, delay,
1002 		      sync_flag? (ro_flag?SEQ_ROSYNC:SEQ_RWSYNC) : SEQ_ACTIVE);
1003 
1004       if (sync_flag)
1005 	    vthread_delay_delete();
1006 }
1007 
1008 static bool sim_started;
1009 
schedule_functor(vvp_gen_event_t obj)1010 void schedule_functor(vvp_gen_event_t obj)
1011 {
1012       struct generic_event_s*cur = new generic_event_s;
1013 
1014       cur->obj = obj;
1015       cur->delete_obj_when_done = false;
1016       if (!sim_started) {
1017             schedule_init_event(cur);
1018       } else {
1019             schedule_event_(cur, 0, SEQ_ACTIVE);
1020       }
1021 }
1022 
schedule_at_start_of_simtime(vvp_gen_event_t obj,vvp_time64_t delay)1023 void schedule_at_start_of_simtime(vvp_gen_event_t obj, vvp_time64_t delay)
1024 {
1025       struct generic_event_s*cur = new generic_event_s;
1026 
1027       cur->obj = obj;
1028       cur->delete_obj_when_done = false;
1029       schedule_event_(cur, delay, SEQ_START);
1030 }
1031 
1032 /*
1033  * In the vvp runtime of Icarus Verilog, the SEQ_RWSYNC time step is
1034  * after all of the non-blocking assignments, so is effectively the
1035  * same as the ReadWriteSync time.
1036  */
schedule_at_end_of_simtime(vvp_gen_event_t obj,vvp_time64_t delay)1037 void schedule_at_end_of_simtime(vvp_gen_event_t obj, vvp_time64_t delay)
1038 {
1039       struct generic_event_s*cur = new generic_event_s;
1040 
1041       cur->obj = obj;
1042       cur->delete_obj_when_done = false;
1043       schedule_event_(cur, delay, SEQ_RWSYNC);
1044 }
1045 
1046 static vvp_time64_t schedule_time;
schedule_simtime(void)1047 vvp_time64_t schedule_simtime(void)
1048 { return schedule_time; }
1049 
1050 extern void vpiEndOfCompile();
1051 extern void vpiStartOfSim();
1052 extern void vpiPostsim();
1053 extern void vpiNextSimTime(void);
1054 
1055 static bool sim_at_rosync = false;
schedule_at_rosync(void)1056 bool schedule_at_rosync(void)
1057 { return sim_at_rosync; }
1058 
1059 /*
1060  * The scheduler uses this function to drain the rosync events of the
1061  * current time. The ctim object is still in the event queue, because
1062  * it is legal for a rosync callback to create other rosync
1063  * callbacks. It is *not* legal for them to create any other kinds of
1064  * events, and that is why the rosync is treated specially.
1065  *
1066  * Once all the rosync callbacks are done we can safely delete any
1067  * threads that finished during this time step.
1068  */
run_rosync(struct event_time_s * ctim)1069 static void run_rosync(struct event_time_s*ctim)
1070 {
1071       sim_at_rosync = true;
1072       while (ctim->rosync) {
1073 	    struct event_s*cur = ctim->rosync->next;
1074 	    if (cur->next == cur) {
1075 		  ctim->rosync = 0;
1076 	    } else {
1077 		  ctim->rosync->next = cur->next;
1078 	    }
1079 
1080 	    cur->run_run();
1081 	    delete cur;
1082       }
1083       sim_at_rosync = false;
1084 
1085       while (ctim->del_thr) {
1086 	    struct event_s*cur = ctim->del_thr->next;
1087 	    if (cur->next == cur) {
1088 		  ctim->del_thr = 0;
1089 	    } else {
1090 		  ctim->del_thr->next = cur->next;
1091 	    }
1092 
1093 	    cur->run_run();
1094 	    delete cur;
1095       }
1096 
1097       if (ctim->active || ctim->inactive || ctim->nbassign || ctim->rwsync) {
1098 	    cerr << "SCHEDULER ERROR: read-only sync events "
1099 		 << "created RW events!" << endl;
1100       }
1101 }
1102 
schedule_simulate(void)1103 void schedule_simulate(void)
1104 {
1105       bool run_finals;
1106       sim_started = false;
1107 
1108       schedule_time = 0;
1109 
1110       if (verbose_flag) {
1111 	    vpi_mcd_printf(1, " ...execute EndOfCompile callbacks\n");
1112       }
1113 
1114       // Execute end of compile callbacks
1115       vpiEndOfCompile();
1116 
1117       if (verbose_flag) {
1118 	    vpi_mcd_printf(1, " ...propagate initialization events\n");
1119       }
1120 
1121 	// Execute initialization events.
1122       while (schedule_init_list) {
1123 	    struct event_s*cur = schedule_init_list->next;
1124 	    if (cur->next == cur) {
1125 		  schedule_init_list = 0;
1126 	    } else {
1127 		  schedule_init_list->next = cur->next;
1128 	    }
1129 	    cur->run_run();
1130 	    delete cur;
1131       }
1132 
1133       if (verbose_flag) {
1134 	    vpi_mcd_printf(1, " ...execute StartOfSim callbacks\n");
1135       }
1136 
1137       // Execute start of simulation callbacks
1138       vpiStartOfSim();
1139 
1140       sim_started = true;
1141 
1142       signals_capture();
1143 
1144       if (verbose_flag) {
1145 	    vpi_mcd_printf(1, " ...run scheduler\n");
1146       }
1147 
1148       // If there were no compiletf, etc. errors then we are going to
1149       // process events and when done run the final blocks.
1150       run_finals = schedule_runnable;
1151 
1152       if (schedule_runnable) while (sched_list) {
1153 
1154 	    if (schedule_stopped_flag) {
1155 		  schedule_stopped_flag = false;
1156 		  stop_handler(0);
1157 		  // You can finish from the debugger without a time change.
1158 		  if (!schedule_runnable) break;
1159 		  continue;
1160 	    }
1161 
1162 	      /* ctim is the current time step. */
1163 	    struct event_time_s* ctim = sched_list;
1164 
1165 	      /* If the time is advancing, then first run the
1166 		 postponed sync events. Run them all. */
1167 	    if (ctim->delay > 0) {
1168 
1169 		  if (!schedule_runnable) break;
1170 		  schedule_time += ctim->delay;
1171 		    /* When the design is being traced (we are emitting
1172 		     * file/line information) also print any time changes. */
1173 		  if (show_file_line) {
1174 			cerr << "Advancing to simulation time: "
1175 			     << schedule_time << endl;
1176 		  }
1177 		  ctim->delay = 0;
1178 
1179 		  vpiNextSimTime();
1180 		    // Process the cbAtStartOfSimTime callbacks.
1181 		  while (ctim->start) {
1182 			struct event_s*cur = ctim->start->next;
1183 			if (cur->next == cur) {
1184 			      ctim->start = 0;
1185 			} else {
1186 			      ctim->start->next = cur->next;
1187 			}
1188 			cur->run_run();
1189 			delete (cur);
1190 		  }
1191 	    }
1192 
1193 
1194 	      /* If there are no more active events, advance the event
1195 		 queues. If there are not events at all, then release
1196 		 the event_time object. */
1197 	    if (ctim->active == 0) {
1198 		  ctim->active = ctim->inactive;
1199 		  ctim->inactive = 0;
1200 
1201 		  if (ctim->active == 0) {
1202 			ctim->active = ctim->nbassign;
1203 			ctim->nbassign = 0;
1204 
1205 			if (ctim->active == 0) {
1206 			      ctim->active = ctim->rwsync;
1207 			      ctim->rwsync = 0;
1208 
1209 				/* If out of rw events, then run the rosync
1210 				   events and delete this time step. This also
1211 				   deletes threads as needed. */
1212 			      if (ctim->active == 0) {
1213 				    run_rosync(ctim);
1214 				    sched_list = ctim->next;
1215 				    delete ctim;
1216 				    continue;
1217 			      }
1218 			}
1219 		  }
1220 	    }
1221 
1222 	      /* Pull the first item off the list. If this is the last
1223 		 cell in the list, then clear the list. Execute that
1224 		 event type, and delete it. */
1225 	    struct event_s*cur = ctim->active->next;
1226 	    if (cur->next == cur) {
1227 		  ctim->active = 0;
1228 	    } else {
1229 		  ctim->active->next = cur->next;
1230 	    }
1231 
1232 	    if (schedule_single_step_flag) {
1233 		  cur->single_step_display();
1234 		  schedule_stopped_flag = true;
1235 		  schedule_single_step_flag = false;
1236 	    }
1237 
1238 	    cur->run_run();
1239 
1240 	    delete (cur);
1241       }
1242 
1243 	// Execute final events.
1244       schedule_runnable = run_finals;
1245       while (schedule_runnable && schedule_final_list) {
1246 	    struct event_s*cur = schedule_final_list->next;
1247 	    if (cur->next == cur) {
1248 		  schedule_final_list = 0;
1249 	    } else {
1250 		  schedule_final_list->next = cur->next;
1251 	    }
1252 	    cur->run_run();
1253 	    delete cur;
1254       }
1255 
1256       signals_revert();
1257 
1258       if (verbose_flag) {
1259 	    vpi_mcd_printf(1, " ...execute Postsim callbacks\n");
1260       }
1261 
1262       // Execute post-simulation callbacks
1263       vpiPostsim();
1264 #ifdef CHECK_WITH_VALGRIND
1265       schedule_delete();
1266 #endif
1267 }
1268 
1269 #ifdef CHECK_WITH_VALGRIND
schedule_delete(void)1270 void schedule_delete(void)
1271 {
1272       vthread_event_heap.delete_pool();
1273       assign4_heap.delete_pool();
1274       assign8_heap.delete_pool();
1275       assignr_heap.delete_pool();
1276       array_w_heap.delete_pool();
1277       array_r_w_heap.delete_pool();
1278       generic_event_heap.delete_pool();
1279       event_time_heap.delete_pool();
1280 }
1281 #endif
1282