1 /*
2  * Copyright (c) 2005-2016 Stephen Williams <steve@icarus.com>
3  *
4  *    This source code is free software; you can redistribute it
5  *    and/or modify it in source code form under the terms of the GNU
6  *    General Public License as published by the Free Software
7  *    Foundation; either version 2 of the License, or (at your option)
8  *    any later version.
9  *
10  *    This program is distributed in the hope that it will be useful,
11  *    but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  *    GNU General Public License for more details.
14  *
15  *    You should have received a copy of the GNU General Public License
16  *    along with this program; if not, write to the Free Software
17  *    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18  */
19 
20 #include "delay.h"
21 #include "schedule.h"
22 #include "vpi_priv.h"
23 #include "config.h"
24 #ifdef CHECK_WITH_VALGRIND
25 #include "vvp_cleanup.h"
26 #endif
27 #include <iostream>
28 #include <cstdlib>
29 #include <list>
30 #include <cassert>
31 #include <cmath>
32 #include "ivl_alloc.h"
33 
calculate_min_delay_()34 void vvp_delay_t::calculate_min_delay_()
35 {
36       min_delay_ = rise_;
37       if (fall_ < min_delay_)
38 	    min_delay_ = fall_;
39       if (ignore_decay_) decay_ = min_delay_;
40       else if (decay_ < min_delay_)
41 	    min_delay_ = decay_;
42 }
43 
vvp_delay_t(vvp_time64_t rise,vvp_time64_t fall)44 vvp_delay_t::vvp_delay_t(vvp_time64_t rise, vvp_time64_t fall)
45 {
46       rise_ = rise;
47       fall_ = fall;
48       decay_= fall < rise? fall : rise;
49       min_delay_ = decay_;
50       ignore_decay_ = false;
51 }
52 
vvp_delay_t(vvp_time64_t rise,vvp_time64_t fall,vvp_time64_t decay)53 vvp_delay_t::vvp_delay_t(vvp_time64_t rise, vvp_time64_t fall, vvp_time64_t decay)
54 {
55       rise_ = rise;
56       fall_ = fall;
57       decay_= decay;
58       ignore_decay_ = false;
59 
60       calculate_min_delay_();
61 }
62 
set_ignore_decay()63 void vvp_delay_t::set_ignore_decay()
64 {
65       ignore_decay_ = true;
66 
67       calculate_min_delay_();
68 }
69 
~vvp_delay_t()70 vvp_delay_t::~vvp_delay_t()
71 {
72 }
73 
get_delay(vvp_bit4_t from,vvp_bit4_t to)74 vvp_time64_t vvp_delay_t::get_delay(vvp_bit4_t from, vvp_bit4_t to)
75 {
76       switch (from) {
77 	  case BIT4_0:
78 	    switch (to) {
79 		case BIT4_0: return 0;
80 		case BIT4_1: return rise_;
81 		case BIT4_X: return min_delay_;
82 		case BIT4_Z: return decay_;
83 	    }
84 	    break;
85 	  case BIT4_1:
86 	    switch (to) {
87 		case BIT4_0: return fall_;
88 		case BIT4_1: return 0;
89 		case BIT4_X: return min_delay_;
90 		case BIT4_Z: return decay_;
91 	    }
92 	    break;
93 	  case BIT4_X:
94 	    switch (to) {
95 		case BIT4_0: return fall_;
96 		case BIT4_1: return rise_;
97 		case BIT4_X: return 0;
98 		case BIT4_Z: return decay_;
99 	    }
100 	    break;
101 	  case BIT4_Z:
102 	    switch (to) {
103 		case BIT4_0: return fall_;
104 		case BIT4_1: return rise_;
105 		case BIT4_X: return min_delay_;
106 		case BIT4_Z: return 0;
107 	    }
108 	    break;
109       }
110 
111       assert(0);
112       return 0;
113 }
114 
get_min_delay() const115 vvp_time64_t vvp_delay_t::get_min_delay() const
116 {
117       return min_delay_;
118 }
119 
set_rise(vvp_time64_t val)120 void vvp_delay_t::set_rise(vvp_time64_t val)
121 {
122       rise_ = val;
123       if (val < min_delay_) {
124 	    min_delay_ = val;
125 	    if (ignore_decay_) decay_ = val;
126       } else
127 	    calculate_min_delay_();
128 }
129 
set_fall(vvp_time64_t val)130 void vvp_delay_t::set_fall(vvp_time64_t val)
131 {
132       fall_ = val;
133       if (val < min_delay_) {
134 	    min_delay_ = val;
135 	    if (ignore_decay_) decay_ = val;
136       } else
137 	    calculate_min_delay_();
138 }
139 
set_decay(vvp_time64_t val)140 void vvp_delay_t::set_decay(vvp_time64_t val)
141 {
142       assert(!ignore_decay_);
143 
144       decay_ = val;
145       if (val < min_delay_)
146 	    min_delay_ = val;
147       else
148 	    calculate_min_delay_();
149 }
150 
vvp_fun_delay(vvp_net_t * n,unsigned width,const vvp_delay_t & d)151 vvp_fun_delay::vvp_fun_delay(vvp_net_t*n, unsigned width, const vvp_delay_t&d)
152 : net_(n), delay_(d)
153 {
154       cur_real_ = 0.0;
155       if (width > 0) {
156             cur_vec4_ = vvp_vector4_t(width, BIT4_X);
157             cur_vec8_ = vvp_vector8_t(cur_vec4_, 6, 6);
158             schedule_init_propagate(net_, cur_vec4_);
159       } else {
160             schedule_init_propagate(net_, cur_real_);
161       }
162       list_ = 0;
163       type_ = UNKNOWN_DELAY;
164       initial_ = true;
165 	// Calculate the values used when converting variable delays
166 	// to simulation time units.
167       __vpiScope*scope = vpip_peek_current_scope();
168 
169       int pow = scope->time_units - scope->time_precision;
170       round_ = 1;
171       for (int lp = 0; lp < pow; lp += 1) round_ *= 10;
172 
173       pow = scope->time_precision - vpip_get_time_precision();
174       scale_ = 1;
175       for (int lp = 0; lp < pow; lp += 1) scale_ *= 10;
176 }
177 
~vvp_fun_delay()178 vvp_fun_delay::~vvp_fun_delay()
179 {
180       while (struct event_*cur = dequeue_())
181 	    delete cur;
182 }
183 
clean_pulse_events_(vvp_time64_t use_delay,const vvp_vector4_t & bit)184 bool vvp_fun_delay::clean_pulse_events_(vvp_time64_t use_delay,
185                                         const vvp_vector4_t&bit)
186 {
187       if (list_ == 0) return false;
188 
189 	/* If the most recent event and the new event have the same
190 	 * value then we need to skip the new event. */
191       if (list_->next->ptr_vec4.eeq(bit)) return true;
192 
193       clean_pulse_events_(use_delay);
194       return false;
195 }
196 
clean_pulse_events_(vvp_time64_t use_delay,const vvp_vector8_t & bit)197 bool vvp_fun_delay::clean_pulse_events_(vvp_time64_t use_delay,
198                                         const vvp_vector8_t&bit)
199 {
200       if (list_ == 0) return false;
201 
202 	/* If the most recent event and the new event have the same
203 	 * value then we need to skip the new event. */
204       if (list_->next->ptr_vec8.eeq(bit)) return true;
205 
206       clean_pulse_events_(use_delay);
207       return false;
208 }
209 
clean_pulse_events_(vvp_time64_t use_delay,double bit)210 bool vvp_fun_delay::clean_pulse_events_(vvp_time64_t use_delay,
211                                         double bit)
212 {
213       if (list_ == 0) return false;
214 
215 	/* If the most recent event and the new event have the same
216 	 * value then we need to skip the new event. */
217       if (list_->next->ptr_real == bit) return true;
218 
219       clean_pulse_events_(use_delay);
220       return false;
221 }
222 
clean_pulse_events_(vvp_time64_t use_delay)223 void vvp_fun_delay::clean_pulse_events_(vvp_time64_t use_delay)
224 {
225       assert(list_ != 0);
226 
227       do {
228 	    struct event_*cur = list_->next;
229 	      /* If this event is far enough from the event I'm about
230 	         to create, then that scheduled event is not a pulse
231 	         to be eliminated, so we're done. */
232 	    if (cur->sim_time+use_delay <= use_delay+schedule_simtime())
233 		  break;
234 
235 	    if (list_ == cur)
236 		  list_ = 0;
237 	    else
238 		  list_->next = cur->next;
239 	    delete cur;
240       } while (list_);
241 }
242 
243 /*
244  * FIXME: this implementation currently only uses the maximum delay
245  * from all the bit changes in the vectors. If there are multiple
246  * changes with different delays, then the results would be
247  * wrong. What should happen is that if there are multiple changes,
248  * multiple vectors approaching the result should be scheduled.
249  */
recv_vec4(vvp_net_ptr_t port,const vvp_vector4_t & bit,vvp_context_t)250 void vvp_fun_delay::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit,
251                               vvp_context_t)
252 {
253       if (port.port() > 0) {
254 	      // Get the integer value of the bit vector, or 0 if
255 	      // there are X or Z bits.
256 	    vvp_time64_t bval = 0;
257 	      // The following does not work correctly for negative values.
258 	      // They should be sign extended to 64 bits (1364-2001 9.7.1).
259 	    vector4_to_value(bit, bval);
260 	      // Integer values do not need to be rounded so just scale them.
261 	    vvp_time64_t val = bval * round_ * scale_;
262 
263 	    switch (port.port()) {
264 		case 1:
265 		  delay_.set_rise(val);
266 		  return;
267 		case 2:
268 		  delay_.set_fall(val);
269 		  return;
270 		case 3:
271 		  delay_.set_decay(val);
272 		  return;
273 	    }
274 	    return;
275       }
276 
277       vvp_time64_t use_delay;
278 	/* This is an initial value so it needs to be compared to all the
279 	   bits (the order the bits are changed is not deterministic). */
280       if (initial_) {
281 	    type_ = VEC4_DELAY;
282             cur_vec8_ = vvp_vector8_t(vvp_vector4_t(0, BIT4_X), 6, 6);
283 	    vvp_bit4_t cur_val = cur_vec4_.value(0);
284 	    use_delay = delay_.get_delay(cur_val, bit.value(0));
285 	    for (unsigned idx = 1 ;  idx < bit.size() ;  idx += 1) {
286 		  vvp_time64_t tmp;
287 		  tmp = delay_.get_delay(cur_val, bit.value(idx));
288 		  if (tmp > use_delay) use_delay = tmp;
289 	    }
290       } else {
291 	    assert(type_ == VEC4_DELAY);
292 
293 	      // Use as a reference for calculating the delay the
294 	      // current value of the output. Detect and handle the
295 	      // special case that the event list contains the current
296 	      // value as a zero-delay-remaining event.
297 	    const vvp_vector4_t&use_vec4 = (list_ && list_->next->sim_time == schedule_simtime())? list_->next->ptr_vec4 : cur_vec4_;
298 
299 	      /* How many bits to compare? */
300 	    unsigned use_wid = use_vec4.size();
301 	    if (bit.size() < use_wid) use_wid = bit.size();
302 
303 	      /* Scan the vectors looking for delays. Select the maximum
304 	         delay encountered. */
305 	    use_delay = delay_.get_delay(use_vec4.value(0), bit.value(0));
306 
307 	    for (unsigned idx = 1 ;  idx < use_wid ;  idx += 1) {
308 		  vvp_time64_t tmp;
309 		  tmp = delay_.get_delay(use_vec4.value(idx), bit.value(idx));
310 		  if (tmp > use_delay) use_delay = tmp;
311 	    }
312       }
313 
314       /* what *should* happen here is we check to see if there is a
315          transaction in the queue. This would be a pulse that needs to be
316          eliminated. */
317       if (clean_pulse_events_(use_delay, bit)) return;
318 
319       vvp_time64_t use_simtime = schedule_simtime() + use_delay;
320 
321 	/* And propagate it. */
322       if (use_delay == 0 && list_ == 0) {
323 	    cur_vec4_ = bit;
324 	    initial_ = false;
325 	    net_->send_vec4(cur_vec4_, 0);
326       } else {
327 	    struct event_*cur = new struct event_(use_simtime);
328 	    cur->run_run_ptr = &vvp_fun_delay::run_run_vec4_;
329 	    cur->ptr_vec4 = bit;
330 	    enqueue_(cur);
331 	    schedule_generic(this, use_delay, false);
332       }
333 }
334 
recv_vec4_pv(vvp_net_ptr_t ptr,const vvp_vector4_t & bit,unsigned base,unsigned wid,unsigned vwid,vvp_context_t ctx)335 void vvp_fun_delay::recv_vec4_pv(vvp_net_ptr_t ptr, const vvp_vector4_t&bit,
336 			         unsigned base, unsigned wid, unsigned vwid,
337                                  vvp_context_t ctx)
338 {
339       recv_vec4_pv_(ptr, bit, base, wid, vwid, ctx);
340 }
341 
342 /* See the recv_vec4 comment above. */
recv_vec8(vvp_net_ptr_t port,const vvp_vector8_t & bit)343 void vvp_fun_delay::recv_vec8(vvp_net_ptr_t port, const vvp_vector8_t&bit)
344 {
345       assert(port.port() == 0);
346 
347       vvp_time64_t use_delay;
348 	/* This is an initial value so it needs to be compared to all the
349 	   bits (the order the bits are changed is not deterministic). */
350       if (initial_) {
351 	    type_ = VEC8_DELAY;
352             cur_vec4_ = vvp_vector4_t(0, BIT4_X);
353 	    vvp_bit4_t cur_val = cur_vec8_.value(0).value();
354 	    use_delay = delay_.get_delay(cur_val, bit.value(0).value());
355 	    for (unsigned idx = 1 ;  idx < bit.size() ;  idx += 1) {
356 		  vvp_time64_t tmp;
357 		  tmp = delay_.get_delay(cur_val, bit.value(idx).value());
358 		  if (tmp > use_delay) use_delay = tmp;
359 	    }
360       } else {
361 	    assert(type_ == VEC8_DELAY);
362 
363 	      // Use as a reference for calculating the delay the
364 	      // current value of the output. Detect and handle the
365 	      // special case that the event list contains the current
366 	      // value as a zero-delay-remaining event.
367 	    const vvp_vector8_t&use_vec8 = (list_ && list_->next->sim_time == schedule_simtime())? list_->next->ptr_vec8 : cur_vec8_;
368 
369 	      /* How many bits to compare? */
370 	    unsigned use_wid = use_vec8.size();
371 	    if (bit.size() < use_wid) use_wid = bit.size();
372 
373 	      /* Scan the vectors looking for delays. Select the maximum
374 	         delay encountered. */
375 	    use_delay = delay_.get_delay(use_vec8.value(0).value(),
376 	                                 bit.value(0).value());
377 
378 	    for (unsigned idx = 1 ;  idx < use_wid ;  idx += 1) {
379 		  vvp_time64_t tmp;
380 		  tmp = delay_.get_delay(use_vec8.value(idx).value(),
381 		                         bit.value(idx).value());
382 		  if (tmp > use_delay) use_delay = tmp;
383 	    }
384       }
385 
386       /* what *should* happen here is we check to see if there is a
387          transaction in the queue. This would be a pulse that needs to be
388          eliminated. */
389       if (clean_pulse_events_(use_delay, bit)) return;
390 
391       vvp_time64_t use_simtime = schedule_simtime() + use_delay;
392 
393 	/* And propagate it. */
394       if (use_delay == 0 && list_ == 0) {
395 	    cur_vec8_ = bit;
396 	    initial_ = false;
397 	    net_->send_vec8(cur_vec8_);
398       } else {
399 	    struct event_*cur = new struct event_(use_simtime);
400 	    cur->ptr_vec8 = bit;
401 	    cur->run_run_ptr = &vvp_fun_delay::run_run_vec8_;
402 	    enqueue_(cur);
403 	    schedule_generic(this, use_delay, false);
404       }
405 }
406 
recv_vec8_pv(vvp_net_ptr_t ptr,const vvp_vector8_t & bit,unsigned base,unsigned wid,unsigned vwid)407 void vvp_fun_delay::recv_vec8_pv(vvp_net_ptr_t ptr, const vvp_vector8_t&bit,
408 			         unsigned base, unsigned wid, unsigned vwid)
409 {
410       recv_vec8_pv_(ptr, bit, base, wid, vwid);
411 }
412 
recv_real(vvp_net_ptr_t port,double bit,vvp_context_t)413 void vvp_fun_delay::recv_real(vvp_net_ptr_t port, double bit,
414                               vvp_context_t)
415 {
416       if (port.port() > 0) {
417 	    /* If the port is not 0, then this is a delay value that
418 	    should be rounded and converted to an integer delay. */
419 	    vvp_time64_t val = 0;
420 	    if (bit > -0.5) {
421 		  val = (vvp_time64_t) (bit * round_ + 0.5) * scale_;
422 	    } else if (bit != bit) {
423 		    // For a NaN we use the default (0).
424 	    } else {
425 		  vvp_vector4_t vec4(8*sizeof(vvp_time64_t),
426 		                     floor(-bit * round_ + 0.5) * -1 * scale_);
427 		  vector4_to_value(vec4, val);
428 	    }
429 
430 	    switch (port.port()) {
431 		case 1:
432 		  delay_.set_rise(val);
433 		  return;
434 		case 2:
435 		  delay_.set_fall(val);
436 		  return;
437 		case 3:
438 		  delay_.set_decay(val);
439 		  return;
440 	    }
441 	    return;
442       }
443 
444       if (initial_) {
445 	    type_ = REAL_DELAY;
446             cur_vec4_ = vvp_vector4_t(0, BIT4_X);
447             cur_vec8_ = vvp_vector8_t(cur_vec4_, 6, 6);
448       } else assert(type_ == REAL_DELAY);
449 
450       vvp_time64_t use_delay;
451       use_delay = delay_.get_min_delay();
452 
453       /* Eliminate glitches. */
454       if (clean_pulse_events_(use_delay, bit)) return;
455 
456       /* This must be done after cleaning pulses to avoid propagating
457        * an incorrect value. */
458       if (cur_real_ == bit) return;
459 
460       vvp_time64_t use_simtime = schedule_simtime() + use_delay;
461 
462       if (use_delay == 0 && list_ == 0) {
463 	    cur_real_ = bit;
464 	    initial_ = false;
465 	    net_->send_real(cur_real_, 0);
466       } else {
467 	    struct event_*cur = new struct event_(use_simtime);
468 	    cur->run_run_ptr = &vvp_fun_delay::run_run_real_;
469 	    cur->ptr_real = bit;
470 	    enqueue_(cur);
471 
472 	    schedule_generic(this, use_delay, false);
473       }
474 }
475 
run_run()476 void vvp_fun_delay::run_run()
477 {
478       vvp_time64_t sim_time = schedule_simtime();
479       if (list_ == 0 || list_->next->sim_time > sim_time)
480 	    return;
481 
482       struct event_*cur = dequeue_();
483       if (cur == 0)
484 	    return;
485 
486       (this->*(cur->run_run_ptr))(cur);
487       initial_ = false;
488       delete cur;
489 }
490 
run_run_vec4_(struct event_ * cur)491 void vvp_fun_delay::run_run_vec4_(struct event_*cur)
492 {
493       cur_vec4_ = cur->ptr_vec4;
494       net_->send_vec4(cur_vec4_, 0);
495 }
496 
run_run_vec8_(struct vvp_fun_delay::event_ * cur)497 void vvp_fun_delay::run_run_vec8_(struct vvp_fun_delay::event_*cur)
498 {
499       cur_vec8_ = cur->ptr_vec8;
500       net_->send_vec8(cur_vec8_);
501 }
502 
run_run_real_(struct vvp_fun_delay::event_ * cur)503 void vvp_fun_delay::run_run_real_(struct vvp_fun_delay::event_*cur)
504 {
505       cur_real_ = cur->ptr_real;
506       net_->send_real(cur_real_, 0);
507 }
508 
vvp_fun_modpath(vvp_net_t * net,unsigned width)509 vvp_fun_modpath::vvp_fun_modpath(vvp_net_t*net, unsigned width)
510 : net_(net), src_list_(0), ifnone_list_(0)
511 {
512       cur_vec4_ = vvp_vector4_t(width, BIT4_X);
513       schedule_init_propagate(net_, cur_vec4_);
514 }
515 
~vvp_fun_modpath()516 vvp_fun_modpath::~vvp_fun_modpath()
517 {
518 	// Delete the source probes.
519       while (src_list_) {
520 	    vvp_fun_modpath_src*tmp = src_list_;
521 	    src_list_ = tmp->next_;
522 	    delete tmp;
523       }
524       while (ifnone_list_) {
525 	    vvp_fun_modpath_src*tmp = ifnone_list_;
526 	    ifnone_list_ = tmp->next_;
527 	    delete tmp;
528       }
529 }
530 
add_modpath_src(vvp_fun_modpath_src * that,bool ifnone)531 void vvp_fun_modpath::add_modpath_src(vvp_fun_modpath_src*that, bool ifnone)
532 {
533       assert(that->next_ == 0);
534       if (ifnone) {
535 	    that->next_ = ifnone_list_;
536 	    ifnone_list_ = that;
537       } else {
538 	    that->next_ = src_list_;
539 	    src_list_ = that;
540       }
541 }
542 
delay_from_edge(vvp_bit4_t a,vvp_bit4_t b,vvp_time64_t array[12])543 static vvp_time64_t delay_from_edge(vvp_bit4_t a, vvp_bit4_t b,
544                                            vvp_time64_t array[12])
545 {
546       typedef delay_edge_t bit4_table4[4];
547       static const bit4_table4 edge_table[4] = {
548 	    { DELAY_EDGE_01, DELAY_EDGE_01, DELAY_EDGE_0z, DELAY_EDGE_0x },
549 	    { DELAY_EDGE_10, DELAY_EDGE_10, DELAY_EDGE_1z, DELAY_EDGE_1x },
550 	    { DELAY_EDGE_z0, DELAY_EDGE_z1, DELAY_EDGE_z0, DELAY_EDGE_zx },
551 	    { DELAY_EDGE_x0, DELAY_EDGE_x1, DELAY_EDGE_xz, DELAY_EDGE_x0 }
552       };
553 
554       return array[ edge_table[a][b] ];
555 }
556 
recv_vec4(vvp_net_ptr_t port,const vvp_vector4_t & bit,vvp_context_t)557 void vvp_fun_modpath::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit,
558                                 vvp_context_t)
559 {
560 	/* Only the first port is used. */
561       if (port.port() > 0)
562 	    return;
563 
564       if (cur_vec4_.eeq(bit))
565 	    return;
566 
567 	/* Select a time delay source that applies. Notice that there
568 	   may be multiple delay sources that apply, so collect all
569 	   the candidates into a list first. */
570       list<vvp_fun_modpath_src*>candidate_list;
571       vvp_time64_t candidate_wake_time = 0;
572       for (vvp_fun_modpath_src*cur = src_list_ ;  cur ;  cur=cur->next_) {
573 	      /* Skip paths that are disabled by conditions. */
574 	    if (cur->condition_flag_ == false)
575 		  continue;
576 
577 	    if (candidate_list.empty()) {
578 		  candidate_list.push_back(cur);
579 		  candidate_wake_time = cur->wake_time_;
580 	    } else if (cur->wake_time_ == candidate_wake_time) {
581 		  candidate_list.push_back(cur);
582 	    } else if (cur->wake_time_ > candidate_wake_time) {
583 		  candidate_list.assign(1, cur);
584 		  candidate_wake_time = cur->wake_time_;
585 	    } else {
586 		  continue; /* Skip this entry. */
587 	    }
588       }
589 
590 	/* Only add the ifnone delay if it has a later wake_time_ or
591 	 * if there are no normal delays. */
592       vvp_time64_t ifnone_wake_time = candidate_wake_time;
593       for (vvp_fun_modpath_src*cur = ifnone_list_ ;  cur ;  cur=cur->next_) {
594 	    if (candidate_list.empty()) {
595 		  candidate_list.push_back(cur);
596 		  ifnone_wake_time = cur->wake_time_;
597 	    } else if (cur->wake_time_ == ifnone_wake_time &&
598 	               ifnone_wake_time > candidate_wake_time) {
599 		  candidate_list.push_back(cur);
600 	    } else if (cur->wake_time_ > ifnone_wake_time) {
601 		  candidate_list.assign(1, cur);
602 		  ifnone_wake_time = cur->wake_time_;
603 	    } else {
604 		  continue; /* Skip this entry. */
605 	    }
606       }
607 
608 	/* Handle the special case that there are no delays that
609 	   match. This may happen, for example, if the set of
610 	   conditional delays is incomplete, leaving some cases
611 	   uncovered. In that case, just pass the data without delay */
612       if (candidate_list.empty()) {
613 	    cur_vec4_ = bit;
614 	    schedule_generic(this, 0, false);
615 	    return;
616       }
617 
618 	/* Now given that we have a list of candidate delays, find for
619 	   each if the 12 numbers the minimum from all the
620 	   candidates. This minimum set becomes the chosen delay to
621 	   use. */
622       vvp_time64_t out_at[12];
623       vvp_time64_t now = schedule_simtime();
624 
625       typedef list<vvp_fun_modpath_src*>::const_iterator iter_t;
626 
627       iter_t cur = candidate_list.begin();
628       vvp_fun_modpath_src*src = *cur;
629 
630       for (unsigned idx = 0 ;  idx < 12 ;  idx += 1) {
631 	    out_at[idx] = src->wake_time_ + src->delay_[idx];
632 	    if (out_at[idx] <= now)
633 		  out_at[idx] = 0;
634 	    else
635 		  out_at[idx] -= now;
636       }
637 
638       for (++ cur ; cur != candidate_list.end() ; ++ cur ) {
639 	    src = *cur;
640 	    for (unsigned idx = 0 ;  idx < 12 ;  idx += 1) {
641 		  vvp_time64_t tmp = src->wake_time_ + src->delay_[idx];
642 		  if (tmp <= now)
643 			tmp = 0;
644 		  else
645 			tmp -= now;
646 		  if (tmp < out_at[idx])
647 			out_at[idx] = tmp;
648 	    }
649       }
650 
651 	/* Given the scheduled output time, create an output event. */
652       vvp_time64_t use_delay = delay_from_edge(cur_vec4_.value(0),
653 					       bit.value(0),
654 					       out_at);
655 
656 	/* FIXME: This bases the edge delay on only the least
657 	   bit. This is WRONG! I need to find all the possible delays,
658 	   and schedule an event for each partial change. Hard! */
659       for (unsigned idx = 1 ;  idx < bit.size() ;  idx += 1) {
660 	    vvp_time64_t tmp = delay_from_edge(cur_vec4_.value(idx),
661 					       bit.value(idx),
662 					       out_at);
663 	      /* If the current and new bit values match then no delay
664 	       * is needed for this bit. */
665 	    if (cur_vec4_.value(idx) == bit.value(idx)) continue;
666 	    assert(tmp == use_delay);
667       }
668 
669       cur_vec4_ = bit;
670       schedule_generic(this, use_delay, false);
671 }
672 
run_run()673 void vvp_fun_modpath::run_run()
674 {
675       net_->send_vec4(cur_vec4_, 0);
676 }
677 
vvp_fun_modpath_src(vvp_time64_t del[12])678 vvp_fun_modpath_src::vvp_fun_modpath_src(vvp_time64_t del[12])
679 {
680       for (unsigned idx = 0 ;  idx < 12 ;  idx += 1)
681 	    delay_[idx] = del[idx];
682 
683       next_ = 0;
684       wake_time_ = 0;
685       condition_flag_ = true;
686 }
687 
~vvp_fun_modpath_src()688 vvp_fun_modpath_src::~vvp_fun_modpath_src()
689 {
690 }
691 
get_delay12(vvp_time64_t val[12]) const692 void vvp_fun_modpath_src::get_delay12(vvp_time64_t val[12]) const
693 {
694       for (unsigned idx = 0 ;  idx < 12 ;  idx += 1)
695 	    val[idx] = delay_[idx];
696 }
697 
put_delay12(const vvp_time64_t val[12])698 void vvp_fun_modpath_src::put_delay12(const vvp_time64_t val[12])
699 {
700       for (unsigned idx = 0 ;  idx < 12 ;  idx += 1)
701 	    delay_[idx] = val[idx];
702 }
703 
recv_vec4(vvp_net_ptr_t port,const vvp_vector4_t & bit,vvp_context_t)704 void vvp_fun_modpath_src::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit,
705                                     vvp_context_t)
706 {
707       if (port.port() == 0) {
708 	      // The modpath input...
709 	    if (test_vec4(bit))
710 		  wake_time_ = schedule_simtime();
711 
712       } else if (port.port() == 1) {
713 	      // The modpath condition input...
714 	    if (bit.value(0) == BIT4_1)
715 		  condition_flag_ = true;
716 	    else
717 		  condition_flag_ = false;
718       }
719 }
720 
test_vec4(const vvp_vector4_t &)721 bool vvp_fun_modpath_src::test_vec4(const vvp_vector4_t&)
722 {
723       return true;
724 }
725 
vvp_fun_modpath_edge(vvp_time64_t del[12],bool pos,bool neg)726 vvp_fun_modpath_edge::vvp_fun_modpath_edge(vvp_time64_t del[12],
727 					   bool pos, bool neg)
728 : vvp_fun_modpath_src(del)
729 {
730       old_value_ = BIT4_X;
731       posedge_ = pos;
732       negedge_ = neg;
733 }
734 
test_vec4(const vvp_vector4_t & bit)735 bool vvp_fun_modpath_edge::test_vec4(const vvp_vector4_t&bit)
736 {
737       vvp_bit4_t tmp = old_value_;
738       old_value_ = bit.value(0);
739 
740       int edge_flag = edge(tmp, old_value_);
741       if (edge_flag > 0) return posedge_;
742       if (edge_flag < 0) return negedge_;
743       return false;
744 }
745 
746 
747 /*
748  * All the below routines that begin with
749  * modpath_src_* belong the internal function
750  * of an vpiModPathIn object. This is used to
751  * make some specific delays path operations
752  *
753  */
modpath_src_get(int,vpiHandle ref)754 static int modpath_src_get(int, vpiHandle ref)
755 {
756       struct __vpiModPathSrc*obj =dynamic_cast<__vpiModPathSrc*>(ref);
757       assert(obj);
758       return 0;
759 }
760 
modpath_src_get_value(vpiHandle ref,p_vpi_value)761 static void modpath_src_get_value(vpiHandle ref, p_vpi_value)
762 {
763       struct __vpiModPathSrc* modpathsrc = dynamic_cast<__vpiModPathSrc*>(ref);
764       assert(modpathsrc);
765       return;
766 }
767 
modpath_src_put_value(vpiHandle ref,s_vpi_value *,int)768 static vpiHandle modpath_src_put_value(vpiHandle ref, s_vpi_value *, int )
769 {
770       struct __vpiModPathSrc* modpathsrc = dynamic_cast<__vpiModPathSrc*>(ref);
771       assert(modpathsrc);
772       return 0;
773 }
774 
modpath_src_get_handle(int code,vpiHandle ref)775 static vpiHandle modpath_src_get_handle(int code, vpiHandle ref)
776 {
777       struct __vpiModPathSrc*rfp = dynamic_cast<__vpiModPathSrc*>(ref);
778       assert(rfp);
779 
780       switch (code) {
781 
782 	case vpiScope:
783 	  return rfp->dest->scope;
784 
785 	  case vpiModule:
786 	      { __vpiScope*scope = rfp->dest->scope;
787 		while (scope && scope->get_type_code() != vpiModule)
788 		      scope = scope->scope;
789 		assert(scope);
790 		return scope;
791 	      }
792 
793 	    // Handles to path term objects should really be obtained via
794 	    // the vpi_iterate and vpi_scan functions. Continue to allow
795 	    // them to be obtained here for backwards compatibility with
796 	    // older versions of Icarus Verilog.
797 
798 	  case vpiModPathIn:
799 	    return &rfp->path_term_in;
800 
801 	  case vpiModPathOut:
802 	    return &rfp->dest->path_term_out;
803       }
804       return 0;
805 }
806 
modpath_src_iterate(int code,vpiHandle ref)807 static vpiHandle modpath_src_iterate(int code, vpiHandle ref)
808 {
809       struct __vpiModPathSrc*rfp = dynamic_cast<__vpiModPathSrc*>(ref);
810       assert(rfp);
811 
812 	// Module paths with multiple sources or destinations are
813 	// currently represented by a separate modpath object for
814 	// each source/destination combination, so there is only
815 	// ever one input path term and one output path term.
816       switch (code) {
817 	  case vpiModPathIn: {
818 	    vpiHandle*args = (vpiHandle*)calloc(1, sizeof(vpiHandle*));
819 	    args[0] = &rfp->path_term_in;
820 	    return vpip_make_iterator(1, args, true);
821 	  }
822 	  case vpiModPathOut: {
823 	    vpiHandle*args = (vpiHandle*)calloc(1, sizeof(vpiHandle*));
824 	    args[0] = &rfp->dest->path_term_out;
825 	    return vpip_make_iterator(1, args, true);
826 	  }
827       }
828       return 0;
829 }
830 
modpath_src_index(vpiHandle ref,int)831 static vpiHandle modpath_src_index ( vpiHandle ref, int)
832 {
833       assert(ref->get_type_code() == vpiModPathIn);
834       return 0;
835 }
836 
837 
838 /*
839  * This routine will put specific dimension of delay[] values
840  * into a vpiHandle. In this case, we will put
841  * specific delays values in a vpiModPathIn object
842  *
843  */
modpath_src_put_delays(vpiHandle ref,p_vpi_delay delays)844 static void modpath_src_put_delays (vpiHandle ref, p_vpi_delay delays)
845 {
846       vvp_time64_t tmp[12];
847       int idx;
848       struct __vpiModPathSrc * src = dynamic_cast<__vpiModPathSrc*>(ref) ;
849       assert(src) ;
850 
851       vvp_fun_modpath_src *fun = dynamic_cast<vvp_fun_modpath_src*>(src->net->fun);
852       assert( fun );
853 
854       typedef unsigned char map_array_t[12];
855 	// Only the first six entries are used for the less than twelve maps.
856       static const map_array_t map_1 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
857       static const map_array_t map_2 = {0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0};
858       static const map_array_t map_3 = {0, 1, 2, 0, 2, 1, 0, 0, 0, 0, 0, 0};
859       static const map_array_t map_6 = {0, 1, 2, 3, 4, 5, 0, 0, 0, 0, 0, 0};
860       static const map_array_t map12 = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
861 
862       const map_array_t*use_map = 0;
863       switch (delays->no_of_delays) {
864 	  case 1:
865 	    use_map = &map_1;
866 	    break;
867 	  case 2:
868 	    use_map = &map_2;
869 	    break;
870 	  case 3:
871 	    use_map = &map_3;
872 	    break;
873 	  case 6:
874 	    use_map = &map_6;
875 	    break;
876 	  case 12:
877 	    use_map = &map12;
878 	    break;
879 	  default:
880 	    assert(0);
881 	    break;
882       }
883 
884       if (delays->time_type == vpiSimTime) {
885 	    for (idx = 0 ; idx < 12 ; idx += 1) {
886 		  tmp[idx] = vpip_timestruct_to_time(delays->da+use_map[0][idx]);
887 	    }
888       } else {
889 	      // You cannot create a modpath with a negative delay so set it
890 	      // to zero per 1364-2005 section 14.3.1.
891 	    for (idx = 0 ; idx < delays->no_of_delays ; idx += 1) {
892 		  if (delays->da[idx].real < 0.0) delays->da[idx].real = 0.0;
893 	    }
894 	    for (idx = 0 ; idx < 12 ; idx += 1) {
895 		  tmp[idx] = vpip_scaled_real_to_time64(delays->da[use_map[0][idx]].real,
896 							src->dest->scope);
897 	    }
898       }
899 
900       /* Now define the to-from-x delays if needed. */
901       if (delays->no_of_delays <= 6) {
902 	      /* 0->x is the minimum of 0->z and 0->1. */
903 	    tmp[DELAY_EDGE_0x] = tmp[DELAY_EDGE_0z] < tmp[DELAY_EDGE_01] ?
904 	                           tmp[DELAY_EDGE_0z] : tmp[DELAY_EDGE_01];
905 	      /* x->1 is the maximum of z->1 and 0->1. */
906 	    tmp[DELAY_EDGE_x1] = tmp[DELAY_EDGE_z1] > tmp[DELAY_EDGE_01] ?
907 	                           tmp[DELAY_EDGE_z1] : tmp[DELAY_EDGE_01];
908 	      /* 1->x is the minimum of 1->z and 1->0. */
909 	    tmp[DELAY_EDGE_1x] = tmp[DELAY_EDGE_1z] < tmp[DELAY_EDGE_10] ?
910 	                           tmp[DELAY_EDGE_1z] : tmp[DELAY_EDGE_10];
911 	      /* x->0 is the maximum of z->0 and 1->0. */
912 	    tmp[DELAY_EDGE_x0] = tmp[DELAY_EDGE_z0] > tmp[DELAY_EDGE_10] ?
913 	                           tmp[DELAY_EDGE_z0] : tmp[DELAY_EDGE_10];
914 	      /* x->z is the maximum of 1->z and 0->z. */
915 	    tmp[DELAY_EDGE_xz] = tmp[DELAY_EDGE_1z] > tmp[DELAY_EDGE_0z] ?
916 	                           tmp[DELAY_EDGE_1z] : tmp[DELAY_EDGE_0z];
917 	      /* z->x is the minimum of z->1 and z->0. */
918 	    tmp[DELAY_EDGE_zx] = tmp[DELAY_EDGE_z1] < tmp[DELAY_EDGE_z0] ?
919 	                           tmp[DELAY_EDGE_z1] : tmp[DELAY_EDGE_z0];
920       }
921 
922       fun->put_delay12(tmp);
923 }
924 
925 /*
926  * This routine will retrieve the delay[12] values
927  * of a vpiHandle. In this case, he will get an
928  * specific delays values from a vpiModPathIn
929  * object
930  *
931  */
932 
modpath_src_get_delays(vpiHandle ref,p_vpi_delay delays)933 static void modpath_src_get_delays ( vpiHandle ref, p_vpi_delay delays )
934 {
935       struct __vpiModPathSrc*src = dynamic_cast<__vpiModPathSrc*>(ref) ;
936       assert(src);
937 
938       vvp_fun_modpath_src *fun = dynamic_cast<vvp_fun_modpath_src*>(src->net->fun);
939       assert(fun);
940 
941       int idx;
942       vvp_time64_t tmp[12];
943       fun->get_delay12(tmp);
944 
945       switch (delays->no_of_delays) {
946 	  case 1:
947 	  case 2:
948 	  case 3:
949 	  case 6:
950 	  case 12:
951 	    break;
952 
953 	  default:
954 	    assert(0);
955 	    break;
956       }
957 
958       if (delays->time_type == vpiSimTime) {
959 	    for (idx = 0; idx < delays->no_of_delays; idx += 1) {
960 		  vpip_time_to_timestruct(delays->da+idx, tmp[idx]);
961 	    }
962       } else {
963 	    for (idx = 0; idx < delays->no_of_delays; idx += 1) {
964 		  delays->da[idx].real = vpip_time_to_scaled_real(tmp[idx], src->dest->scope);
965 	    }
966       }
967 }
968 
pathterm_get(int code,vpiHandle ref)969 static int pathterm_get(int code, vpiHandle ref)
970 {
971       struct __vpiModPathTerm*obj = dynamic_cast<__vpiModPathTerm*>(ref);
972       assert(obj);
973 
974       switch (code) {
975 	  case vpiEdge:
976 	    return obj->edge;
977 	  default:
978 	    return 0;
979       }
980 }
981 
pathterm_get_handle(int code,vpiHandle ref)982 static vpiHandle pathterm_get_handle(int code, vpiHandle ref)
983 {
984       struct __vpiModPathTerm*obj = dynamic_cast<__vpiModPathTerm*>(ref);
985       assert(obj);
986 
987       switch (code) {
988 	  case vpiExpr:
989 	    return obj->expr;
990 	  default:
991 	    return 0;
992       }
993 }
994 
995 /*
996 * The __vpiModPathSrc class is what the VPI client sees as a
997 * vpiModPath object. The __vpiModPath structure contains items that
998 * are common to a bunch of modpaths, including the destination term.
999 */
__vpiModPathSrc()1000 inline __vpiModPathSrc::__vpiModPathSrc()
1001 { }
1002 
get_type_code(void) const1003 int __vpiModPathSrc::get_type_code(void) const
1004 { return vpiModPath; }
1005 
vpi_get(int code)1006 int __vpiModPathSrc::vpi_get(int code)
1007 { return modpath_src_get(code, this); }
1008 
vpi_get_value(p_vpi_value val)1009 void __vpiModPathSrc::vpi_get_value(p_vpi_value val)
1010 { modpath_src_get_value(this, val); }
1011 
vpi_put_value(p_vpi_value val,int flags)1012 vpiHandle __vpiModPathSrc::vpi_put_value(p_vpi_value val, int flags)
1013 { return modpath_src_put_value(this, val, flags); }
1014 
vpi_handle(int code)1015 vpiHandle __vpiModPathSrc::vpi_handle(int code)
1016 { return modpath_src_get_handle(code, this); }
1017 
vpi_iterate(int code)1018 vpiHandle __vpiModPathSrc::vpi_iterate(int code)
1019 { return modpath_src_iterate(code, this); }
1020 
vpi_index(int idx)1021 vpiHandle __vpiModPathSrc:: vpi_index(int idx)
1022 { return modpath_src_index(this, idx); }
1023 
vpi_get_delays(p_vpi_delay del)1024 void __vpiModPathSrc::vpi_get_delays(p_vpi_delay del)
1025 { modpath_src_get_delays(this, del); }
1026 
vpi_put_delays(p_vpi_delay del)1027 void __vpiModPathSrc::vpi_put_delays(p_vpi_delay del)
1028 { modpath_src_put_delays(this, del); }
1029 
modpath_src_free_object(vpiHandle ref)1030 static int modpath_src_free_object( vpiHandle ref )
1031 {
1032       delete ref;
1033       return 1 ;
1034 }
1035 
free_object_fun(void)1036 __vpiHandle::free_object_fun_t __vpiModPathSrc::free_object_fun(void)
1037 { return &modpath_src_free_object; }
1038 
1039 
__vpiModPathTerm()1040 inline __vpiModPathTerm::__vpiModPathTerm()
1041 { }
1042 
get_type_code(void) const1043 int __vpiModPathTerm::get_type_code(void) const
1044 { return vpiPathTerm; }
1045 
vpi_get(int code)1046 int __vpiModPathTerm::vpi_get(int code)
1047 { return pathterm_get(code, this); }
1048 
vpi_handle(int code)1049 vpiHandle __vpiModPathTerm::vpi_handle(int code)
1050 { return pathterm_get_handle(code, this); }
1051 
initialize_path_term(struct __vpiModPathTerm & obj)1052 static void initialize_path_term(struct __vpiModPathTerm&obj)
1053 {
1054       obj.expr = 0;
1055       obj.edge = vpiNoEdge;
1056 }
1057 
1058 /*
1059  * This function will construct a vpiModPath Object.
1060  * give a respective "net", and will point to his
1061  * respective functor
1062  */
1063 
1064 #ifdef CHECK_WITH_VALGRIND
1065 static struct __vpiModPath**mp_list = 0;
1066 static unsigned mp_count = 0;
1067 #endif
1068 
vpip_make_modpath(vvp_net_t * net)1069 struct __vpiModPath* vpip_make_modpath(vvp_net_t *net)
1070 {
1071       struct __vpiModPath*obj = new __vpiModPath;
1072       obj->scope = vpip_peek_current_scope ( );
1073 
1074       initialize_path_term(obj->path_term_out);
1075       obj->input_net = net ;
1076 
1077 #ifdef CHECK_WITH_VALGRIND
1078       mp_count += 1;
1079       mp_list = (struct __vpiModPath **) realloc(mp_list,
1080                 mp_count*sizeof(struct __vpiModPath **));
1081       mp_list[mp_count-1] = obj;
1082 #endif
1083       return obj;
1084 }
1085 
1086 #ifdef CHECK_WITH_VALGRIND
modpath_delete()1087 void modpath_delete()
1088 {
1089       for (unsigned idx = 0; idx < mp_count; idx += 1) {
1090 	    delete mp_list[idx];
1091       }
1092       free(mp_list);
1093       mp_list = 0;
1094       mp_count = 0;
1095 }
1096 #endif
1097 
1098 /*
1099  * This function will construct a vpiModPathIn
1100  * ( struct __vpiModPathSrc ) Object. will give
1101  * a delays[12] values, and point to the specified functor
1102  *
1103  */
1104 
vpip_make_modpath_src(struct __vpiModPath * path,vvp_net_t * net)1105 struct __vpiModPathSrc* vpip_make_modpath_src(struct __vpiModPath*path,
1106                                               vvp_net_t *net)
1107 {
1108       struct __vpiModPathSrc *obj = new __vpiModPathSrc;
1109 
1110       obj->dest = path;
1111       obj->type = 0;
1112       obj->net = net;
1113       initialize_path_term(obj->path_term_in);
1114 
1115       return obj;
1116 }
1117