1 /* Copyright (c) 1991-2007 Pragmatic C Software Corp. */
2 
3 /*
4    This program is free software; you can redistribute it and/or modify it
5    under the terms of the GNU General Public License as published by the
6    Free Software Foundation; either version 2 of the License, or (at your
7    option) any later version.
8 
9    This program is distributed in the hope that it will be useful, but
10    WITHOUT ANY WARRANTY; without even the implied warranty of
11    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12    General Public License for more details.
13 
14    You should have received a copy of the GNU General Public License along
15    with this program; if not, write to the Free Software Foundation, Inc.,
16    59 Temple Place, Suite 330, Boston, MA, 02111-1307.
17 
18    We are selling our new Verilog compiler that compiles to X86 Linux
19    assembly language.  It is at least two times faster for accurate gate
20    level designs and much faster for procedural designs.  The new
21    commercial compiled Verilog product is called CVC.  For more information
22    on CVC visit our website at www.pragmatic-c.com/cvc.htm or contact
23    Andrew at avanvick@pragmatic-c.com
24 
25  */
26 
27 
28 /*
29  * Verilog simulation routines
30  */
31 
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <string.h>
35 #include <time.h>
36 
37 #include <signal.h>
38 
39 #ifdef __DBMALLOC__
40 #include "../malloc.h"
41 #endif
42 
43 #include "v.h"
44 #include "cvmacros.h"
45 
46 
47 /* local prototypes */
48 static void process_pnd0s(void);
49 static void exec_slotend_dv(void);
50 static void process_all_netchgs(void);
51 static void free_chgedvars(void);
52 static void eval_netchg_lds(register struct net_t *, int32, int32, int32);
53 static void process_mipd_nchg_ev(register struct tev_t *);
54 static void emit_nchglds_trmsg(struct net_t *, struct net_pin_t *);
55 static int32 is2_chg_match(int32, int32, int32);
56 static void std_chg_logic_gate(register struct gate_t *, register word32);
57 static void acc_chg_bufnot(struct gate_t *, word32);
58 static void acc_stichg_bufnot(register struct gate_t *, word32);
59 static void acc_chg_4igate(register struct gate_t *, word32);
60 static void acc_stichg_4igate(register struct gate_t *, word32);
61 static void trace_gunchg(struct gate_t *, word32);
62 static void std_chg_udp_gate(register struct gate_t *, register word32);
63 static void std_chg_bufif_gate(register struct gate_t *, register word32);
64 static void std_chg_mos_gate(register struct gate_t *, register word32);
65 static void std_chg_cmos_gate(register struct gate_t *, register word32);
66 static void prop_gatechg(register struct gate_t *, register int32, int32);
67 static void evtr_prop_gatechg(register struct gate_t *, register word32,
68  int32);
69 static word32 get_showcancele_val(struct gate_t *);
70 static i_tev_ndx schedule_1gev(register struct gate_t *, word64, word64, int32);
71 static i_tev_ndx reschedule_1gev(i_tev_ndx, word64, word64, word32, int32);
72 static void emit_pulsewarn(struct gate_t *, struct tev_t *, word64 *,
73  word64 *, char *);
74 static void process_gatechg_ev(register struct tev_t *);
75 static void acc_evchg_gate_outwire(register struct gate_t *);
76 static void emit_gev_trace(struct gate_t *, struct tev_t *);
77 static void change_gate_outwire(register struct gate_t *);
78 static void trace_chg_gateout(struct gate_t *, struct expr_t *);
79 static void gate_st_bit(union pck_u, int32, int32, int32, register word32,
80  register word32);
81 static void gate_st_scalval(register word32 *, register word32, register word32);
82 static int32 chg_mos_instate(register struct gate_t *, word32);
83 static int32 chg_cmos_instate(register struct gate_t *, word32);
84 static void eval_tranif_onoff(struct gate_t *);
85 static void evtr_eval_conta_rhs_ld(struct net_pin_t *);
86 static void schedule_1caev(struct conta_t *, word64, word64, struct xstk_t *);
87 static void reschedule_1caev(i_tev_ndx, word64, word64, struct xstk_t *);
88 static void process_conta_ev(register struct tev_t *tevp);
89 static void process_wire_ev(register struct tev_t *tevp);
90 static void process_nbpa_ev(struct tev_t *tevp);
91 static int32 force_inhibit_wireassign(struct net_t *, register int32,
92  struct itree_t *);
93 static void process_trpthdst_ev(register struct tev_t *);
94 static int32 filter_edge_expr(register struct dce_expr_t *, register word32 *,
95  register word32 *);
96 static void trigger_evctrl(struct delctrl_t *, register i_tev_ndx);
97 static int32 stfilter_dce_chg(register struct net_t *,
98  register struct dcevnt_t *, word32 *, word32 *, int32);
99 static int32 vccb_vec_standval_filter(register struct net_t *,
100  register struct dcevnt_t *, word32 *, word32 *, int32);
101 static int32 vccb_scal_standval_filter(register struct net_t *,
102  register struct dcevnt_t *, word32 *, word32 *, int32);
103 static int32 filter_dce_chg(register struct net_t *, register struct dcevnt_t *,
104  word32 *, word32 *, int32);
105 static int32 scal_stfilter_dce_chg(register struct net_t *,
106  register struct dcevnt_t *, word32 *, word32 *, int32);
107 static void bld_xmrsrc_ref(char *, struct net_t *);
108 static void bld_srcfilter_ref(char *, word32, word32, struct expr_t *);
109 static int32 filter_bitchange(register word32, register byte *,
110  register word32, struct expr_t *);
111 static void add_tchk_chged(struct chktchg_t *);
112 static void process_all_tchk_violations(void);
113 static void process_notify(struct net_t *);
114 static char *bld_tchk_srcdump(char *, struct tchk_t *, word64 *, word64 *,
115  word64 *, word64 *);
116 static void init_stime(void);
117 static void reinit_stime(void);
118 static void init_wires(void);
119 static void init_itinsts(struct itree_t *);
120 static void init_sched_thd(struct mod_t *);
121 static void gate_initeval(struct gate_t *);
122 static void init_udp(struct gate_t *);
123 static void init_logic_gate(struct gate_t *);
124 static void init_bufif_gate(struct gate_t *);
125 static void init_tranif_gate(struct gate_t *);
126 static void conta_initeval(struct conta_t *, struct conta_t *);
127 static int32 move_to_time0(void);
128 static int32 move_time(void);
129 static void chk_event_consist(void);
130 static void add_ovfetim(word64, i_tev_ndx, struct tev_t *);
131 static struct bt_t *alloc_btnod(int32);
132 static struct bt_t *find_fringe(word64);
133 static struct bt_t *insert_fringe(struct bt_t *, word64, i_tev_ndx);
134 static void splitinsert_nonfringe(void);
135 static void ovflow_into_wheel(void);
136 static void divide_fringe_node(struct bt_t *);
137 static void divide_internal_node(struct bt_t *);
138 static void remove_empty_upwards(void);
139 static void mv_subtree_towheel(struct bt_t *);
140 static void mv_to_wheel(word64, struct telhdr_t *);
141 static struct telhdr_t *tfind_btnode_after(struct bt_t *, word64);
142 static void dmp_twheel(void);
143 static int32 dmp_events(register i_tev_ndx);
144 static void dmp_btree(struct bt_t *);
145 static void dmp2_btree(struct bt_t *, int32);
146 static void dmp_btnode(struct bt_t *, int32);
147 static char *to_evtronam(char *, char *, struct itree_t *, struct task_t *);
148 /* SJM UNUSED static void chk_schd_dces(void); */
149 static void chk_tev_list(register i_tev_ndx);
150 
151 /* extern prototypes defined elsewhere */
152 extern void __pv_sim(void);
153 extern void __get_cor_range(register int32, union intptr_u, register int32 *,
154  register int32 *);
155 extern void __set_gchg_func(struct gate_t *);
156 extern void __vpi_set_chg_proc(struct gate_t *);
157 extern int32 __gate_is_acc(struct gate_t *);
158 extern void __add_nchglst_el(register struct net_t *);
159 extern void __add_dmpv_chglst_el(struct net_t *);
160 extern void __eval_conta_rhs_ld(register struct net_pin_t *);
161 extern void __eval_tranif_ld(register struct gate_t *, register int32);
162 extern void __add_select_nchglst_el(register struct net_t *, register int32,
163  register int32);
164 extern void __wakeup_delay_ctrls(register struct net_t *, register int32,
165  register int32);
166 extern void __process_npp_timofchg(struct net_t *,
167  register struct net_pin_t *);
168 
169 extern char *__to_evtrcanam(char *, struct conta_t *, struct itree_t *);
170 extern void __evtr_resume_msg(void);
171 extern void __process_getpat(struct conta_t *);
172 extern char *__to_evtrwnam(char *, struct net_t *, int32, int32,
173  struct itree_t *);
174 extern char *__to_evtrpnam(char *, struct mod_pin_t *, int32,
175  struct itree_t *);
176 extern void __init_sim(void);
177 extern void __reinit_sim(void);
178 extern void __insert_event(register i_tev_ndx);
179 extern void __free_btree(struct bt_t *);
180 extern void __free_telhdr_tevs(register struct telhdr_t *);
181 extern void __free_1tev(i_tev_ndx);
182 extern void __free_xtree(struct expr_t *);
183 extern void __call_misctfs_simstart(void);
184 extern void __vpi_startsim_trycall(void);
185 extern void __call_misctfs_endreset(void);
186 extern void __vpi_endreset_trycall(void);
187 extern void __do_interactive_loop(void);
188 extern void __process_thrd_ev(register struct tev_t *);
189 extern char *__to_timstr(char *, word64 *);
190 extern void __setdel_call_misctf(i_tev_ndx);
191 extern void __process_putpdel_ev(i_tev_ndx);
192 extern void __process_vpidrv_ev(i_tev_ndx);
193 extern void __process_vpi_varputv_ev(i_tev_ndx);
194 extern void __delay_callback(i_tev_ndx);
195 extern void __sync_call_misctf(struct tev_t *);
196 extern void __exec_monit(struct dceauxlst_t *, int32);
197 extern void __exec_fmonits(void);
198 extern void __exec_strobes(void);
199 extern void __exec_rosync_misctf(void);
200 extern void __vpi_del_rosync_call(void);
201 extern void __vpi_del_nxtsimtim_trycall(void);
202 extern void __do_dmpvars_baseline(char *);
203 extern void __turnoff_all_dumpvars(void);
204 extern void __turnon_all_dumpvars(void);
205 extern void __do_dmpvars_chg(void);
206 extern int32 __tilde_creat(char *);
207 extern int32 __my_creat(char *);
208 extern void __setup_dmpvars(void);
209 extern int32 __move_to_npprefloc(struct net_pin_t *);
210 extern char *__to_wtnam(char *, struct net_t *);
211 extern char *__to_tetyp(char *, word32);
212 extern char *__var_tostr(char *, struct net_t *, int32, int32, int32);
213 extern char *__to_npptyp(char *, struct net_pin_t *);
214 extern int32 __eval_logic_gate(struct gate_t *, word32, int32 *);
215 extern void __ld_bit(register word32 *, register word32 *,
216  register struct net_t *, int32);
217 extern int32 __correct_forced_newwireval(struct net_t *, word32 *, word32 *);
218 extern int32 __forced_inhibit_bitassign(struct net_t *, struct expr_t *,
219  struct expr_t *);
220 extern void __chg_st_bit(struct net_t *, int32, register word32, register word32);
221 extern word32 __wrd_redxor(word32);
222 extern int32 __eval_udp(register struct gate_t *, word32, int32 *, int32);
223 extern int32 __eval_bufif_gate(register struct gate_t *, word32, int32 *);
224 extern void __eval_pmos_gate(register word32);
225 extern void __eval_rpmos_gate(register word32);
226 extern void __eval_nmos_gate(register word32);
227 extern void __eval_rnmos_gate(register word32);
228 extern void __eval_cmos_gate(struct gate_t *);
229 extern int32 __get_acc_class(struct gate_t *);
230 extern void __hizstrengate_getdel(word64 *, register struct gate_t *);
231 extern void __get_del(register word64 *, register union del_u, word32);
232 extern int32 __em_suppr(int32);
233 extern char *__to_ginam(char *, struct gate_t *, word32, int32);
234 extern char *__to_gonam(char *, struct gate_t *, word32);
235 extern char *__msg_blditree(char *, struct itree_t *, struct task_t *);
236 extern char *__schop(char *, char *);
237 extern char *__msg2_blditree(char *, struct itree_t *);
238 extern void __mdr_assign_or_sched(register struct expr_t *);
239 extern void __exec_conta_assign(struct expr_t *, register word32 *,
240  register word32 *, int32);
241 extern char *__gstate_tostr(char *, struct gate_t *, int32);
242 extern void __immed_eval_trifchan(struct gate_t *);
243 extern char *__to_gassign_str(char *, struct expr_t *);
244 extern char *__msgexpr_tostr(char *, struct expr_t *);
245 extern void __lhsbsel(register word32 *, register int32, word32);
246 extern struct xstk_t *__ndst_eval_xpr(struct expr_t *);
247 extern struct xstk_t *__eval2_xpr(register struct expr_t *);
248 extern void __sgn_xtnd_widen(struct xstk_t *, int32);
249 extern void __sizchg_widen(register struct xstk_t *, int32);
250 extern void __narrow_sizchg(register struct xstk_t *, int32);
251 extern void __fix_widened_toxs(register struct xstk_t *, int32);
252 extern void __st_perinst_val(union pck_u, int32, register word32 *,
253  register word32 *);
254 extern void __grow_xstk(void);
255 extern void __grow_tevtab(void);
256 extern void __chg_xstk_width(struct xstk_t *, int32);
257 extern void __st_standval(register byte *, register struct xstk_t *, byte);
258 extern void __stren_exec_ca_concat(struct expr_t *, byte *, int32);
259 extern void __exec_ca_concat(struct expr_t *, register word32 *,
260  register word32 *, int32);
261 extern int32 __wide_vval_is0(register word32 *, int32);
262 extern int32 __vval_is1(register word32 *, int32);
263 extern char *__regab_tostr(char *, word32 *, word32 *, int32, int32, int32);
264 extern char *__st_regab_tostr(char *, byte *, int32);
265 extern char *__bld_lineloc(char *, word32, int32);
266 extern void __my_free(char *, int32);
267 extern char *__my_malloc(int32);
268 extern char *__my_realloc(char *, int32 , int32);
269 extern char *__to_vvnam(char *, word32);
270 extern char *__xregab_tostr(char *, word32 *, word32 *, int32, struct expr_t *);
271 extern void __exec2_proc_assign(struct expr_t *, register word32 *,
272  register word32 *);
273 extern int32 __unnormalize_ndx(struct net_t *, int32);
274 extern char *__to_mpnam(char *, char *);
275 extern void __eval_tran_1bit(register struct net_t *, register int32);
276 extern int32 __match_push_targ_to_ref(word32, struct gref_t *);
277 extern void __assign_qcaf(struct dcevnt_t *);
278 extern void __pvc_call_misctf(struct dcevnt_t *);
279 extern void __cbvc_callback(struct dcevnt_t *, struct cbrec_t *, struct h_t *);
280 extern void __exec_vpi_gateoutcbs(int32);
281 extern void __add_ev_to_front(register i_tev_ndx);
282 extern int32 __get_dcewid(struct dcevnt_t *, struct net_t *);
283 extern void __ld_wire_sect(word32 *, word32 *, struct net_t *, register int32,
284  register int32);
285 extern void __ld_wire_val(register word32 *, register word32 *, struct net_t *);
286 extern char *__to_tcnam(char *, word32);
287 extern void __vpi_tchkerr_trycall(struct tchk_t *, struct itree_t *);
288 extern void __chg_st_val(struct net_t *, register word32 *, register word32 *);
289 extern void __adds(char *);
290 extern void __chg_xprline_size(int32);
291 extern char *__to_edgenam(char *, word32);
292 extern void __trunc_exprline(int32, int32);
293 extern char *__pv_stralloc(char *);
294 extern void __init_interactive(void);
295 extern void __my_fclose(FILE *);
296 extern void __init_all_trchans(void);
297 extern void __init_instdownport_contas(struct itree_t *, struct itree_t *);
298 extern void __init_instupport_contas(struct itree_t *);
299 extern struct thread_t *__alloc_thrd(void);
300 extern void __ld_perinst_val(register word32 *, register word32 *, union pck_u,
301  int32);
302 extern int32 __sim_sigint_handler(void);
303 extern void __dmp_event_tab(void);
304 extern void __dmp1_nplstel(struct mod_t *, struct net_t *, struct net_pin_t *);
305 extern void __push_wrkitstk(struct mod_t *, int32);
306 extern void __pop_wrkitstk(void);
307 extern void __sched_mipd_nchg(struct net_t *, int32, struct mipd_t *);
308 extern void __exec_var_decl_init_assigns(void);
309 
310 extern void __cv_msg(char *, ...);
311 extern void __cvsim_msg(char *, ...);
312 extern void __tr_msg(char *, ...);
313 extern void __dbg_msg(char *, ...);
314 extern void __pv_err(int32, char *, ...);
315 extern void __pv_warn(int32, char *,...);
316 extern void __gfwarn(int32, word32, int32, char *, ...);
317 extern void __pv_terr(int32, char *, ...);
318 extern void __arg_terr(char *, int32);
319 extern void __case_terr(char *, int32);
320 extern void __misc_terr(char *, int32);
321 extern void __ia_warn(int32, char *, ...);
322 extern void __my_fprintf(FILE *, char *, ...);
323 
324 extern word32 __masktab[];
325 extern byte __stren_map_tab[];
326 extern byte __hizstren_del_tab[];
327 
328 /*
329  * ROUTINES TO RUN SIMULATION
330  */
331 
332 /*
333  * actually run the simulation
334  * need better way to handle p0 (events that must go at end) queue
335  *
336  * LOOKATME - set up so never check entering interact unless one event
337  * processed (except -s entry before start of sim)
338  */
__pv_sim(void)339 extern void __pv_sim(void)
340 {
341  register struct tev_t *tevp;
342  i_tev_ndx tevp2i;
343  struct tev_t *tevp2;
344 
345  /* unless quiet mode, need blank line before sim writing */
346  __cv_msg("\n");
347  /* possible that no events scheduled at 0, must really move time to 0 */
348  /* this assumes wrap works ? */
349  __simtime = 0xffffffffffffffffULL;
350  __cur_twi = -1;
351 
352  /* move time to 0 */
353  move_to_time0();
354 
355  /* now have timing wheel - can run vpi sim controls that are like */
356  /* system task execs */
357  __can_exec = TRUE;
358 
359  /* do not call vpiStartOfSim routine if resetting */
360  if (__now_resetting)
361   {
362    __now_resetting = FALSE;
363    if (__tfrec_hdr != NULL) __call_misctfs_endreset();
364    if (__have_vpi_actions) __vpi_endreset_trycall();
365 
366    /* if no events after reset - nothing to do so terminate */
367    if (__num_twhevents == 0 && __btqroot == NULL && __cur_te_hdri == -1
368      && __p0_te_hdri == -1)
369     {
370      __pv_warn(614,
371      "no pending statements or events after reset to time 0 - nothing to do");
372      return;
373     }
374   }
375  else
376   {
377    /* no sim (variables) d.s. and time before here */
378    /* notice these routines cannot cause inside entry of debugger */
379    /* so can call from here - just scan and register */
380    if (__tfrec_hdr != NULL) __call_misctfs_simstart();
381    if (__have_vpi_actions) __vpi_startsim_trycall();
382 
383    /* if no events after initializationand PLI start of sim - nothing to do */
384    if (__num_twhevents == 0 && __btqroot == NULL && __cur_te_hdri == -1
385      && __p0_te_hdri == -1)
386     {
387      __pv_warn(614,
388       "no pending statements or events after initialization - nothing to do");
389      return;
390     }
391   }
392 
393  /* enter immediately if -s option, . here just starts sim */
394  __cur_tevpi = -1;
395 
396  /* if this is at least 100 entering from debugger reset */
397  if (__dbg_stop_before >= 100)
398   {
399    if (__dbg_stop_before != 101) { __dbg_stop_before = 0; goto no_stop; }
400    __dbg_stop_before = 0;
401    goto stop;
402   }
403 
404  /* else use -s option to decide if stop before sim */
405  if (__stop_before_sim)
406   {
407    /* if no interactive ignore stop before sim with warning */
408    if (__no_iact)
409     {
410      __pv_warn(628,
411       "-s option ignored - +nointeractive disabled interactive mode");
412      goto no_stop;
413     }
414 stop:
415    /* interactive loop expects int32 (^c) signal to be ignored */
416    __do_interactive_loop();
417   }
418 
419 no_stop:
420  /* set up during simulation control c signal handler - can set flag only */
421 #if defined(INTSIGS)
422  signal(SIGINT, __sim_sigint_handler);
423 #else
424  signal(SIGINT, (void (*)()) __sim_sigint_handler);
425 #endif
426 
427  /* SJM 09/30/04 - execute all new Verilog 2001 variable decl assigns */
428  /* as the first step in simulation - do not need any events */
429  /* SJM 09/30/04 - LOOKATME - could build and schedule separate init block */
430  __exec_var_decl_init_assigns();
431 
432  /* repeat this loop for every time */
433  __processing_pnd0s = FALSE;
434  for (;;)
435   {
436    /* execute events until current time event list empty */
437    /* events never added here (maybe to pound 0) */
438    __cur_tevpi = __cur_te_hdri;
439    for (; __cur_tevpi != -1; __cur_tevpi = __tevtab[__cur_tevpi].tenxti)
440     {
441      tevp = &(__tevtab[__cur_tevpi]);
442      /* canceled because of inertial delay reschedule */
443      if (tevp->te_cancel)
444       { __num_cancel_tevents++; __num_twhevents--; continue; }
445 
446      /* every event has associated itree element */
447      __push_itstk(tevp->teitp);
448      /* notice before event executed, cur. itp set from event */
449      switch ((byte) tevp->tetyp) {
450       case TE_THRD:
451        __process_thrd_ev(tevp);
452        break;
453       /* for gates and 1 bit continous assigns */
454       case TE_CA: process_conta_ev(tevp); break;
455       case TE_G: process_gatechg_ev(tevp); break;
456       case TE_WIRE: process_wire_ev(tevp); break;
457       case TE_BIDPATH: process_trpthdst_ev(tevp); break;
458       case TE_MIPD_NCHG: process_mipd_nchg_ev(tevp); break;
459       case TE_NBPA:
460        /* 10/27/00 SJM - this is rhs delay that has elapsed - never rep form */
461        /* non blocking proc assign, jump to #0 queue to process */
462        alloc_tev_(tevp2i, TE_NBPA, tevp->teitp, __simtime);
463        /* this moves entire nb records - since not needed here */
464        tevp2 = &(__tevtab[tevp2i]);
465        /* if present ptr to constant index lhs expr. copy also copied */
466        tevp2->tu.tenbpa = tevp->tu.tenbpa;
467        tevp->tu.tenbpa = NULL;
468        __num_proc_tevents--;
469        /* notice tevp not counted and contents freed */
470        /* AIV 06/28/05 - if option set add to the end of the nb #0 list */
471        if (!__nb_sep_queue)
472         {
473          if (__p0_te_hdri == -1) __p0_te_hdri = __p0_te_endi = tevp2i;
474          else
475           {
476            __tevtab[__p0_te_endi].tenxti = tevp2i;
477            __p0_te_endi = tevp2i;
478           }
479         }
480        else
481         {
482          /* AIV 07/05/05 - to match XL need nb te list that only processed */
483          /* when all pnd 0 done effectively adds another section to current */
484          /* time event queue */
485          if (__nb_te_hdri == -1) __nb_te_hdri = __nb_te_endi = tevp2i;
486          else
487           { __tevtab[__nb_te_endi].tenxti = tevp2i; __nb_te_endi = tevp2i; }
488         }
489        break;
490       case TE_TFSETDEL:
491        /* RELEASE remove ---
492        if (__debug_flg && __ev_tracing)
493         {
494          __tr_msg("-- processing tf_ set delay misctf call at %s\n",
495   	  __to_timstr(__xs, &__simtime));
496         }
497        --- */
498        __setdel_call_misctf(__cur_tevpi);
499        break;
500       case TE_TFPUTPDEL: __process_putpdel_ev(__cur_tevpi); break;
501       case TE_VPIPUTVDEL: __process_vpi_varputv_ev(__cur_tevpi); break;
502       case TE_VPIDRVDEL: __process_vpidrv_ev(__cur_tevpi); break;
503       case TE_VPICBDEL: __delay_callback(__cur_tevpi); break;
504       /* sync event only pound 0 */
505       default: __case_terr(__FILE__, __LINE__);
506      }
507      __num_proc_tevents++;
508      __num_twhevents--;
509      __pop_itstk();
510 
511      /* DBG remove - notice between events itree stack must be empty ---
512      if (__itspi != -1) __misc_terr(__FILE__, __LINE__);
513      --- */
514 
515      /* notice adding to front adds after current which is really done */
516      /* see if pending control c, if add to front, next is right header */
517      if (__pending_enter_iact) __do_interactive_loop();
518     }
519 
520    /* done with normal events, free and remove from time wheel */
521    if (__cur_te_hdri != -1)
522     {
523      __tevtab[__cur_te_endi].tenxti = __tefreelsti;
524      __tefreelsti = __cur_te_hdri;
525      __twheel[__cur_twi]->te_hdri = -1;
526      __twheel[__cur_twi]->num_events = 0;
527      __cur_te_hdri = __cur_te_endi = -1;
528     }
529 
530    process_pnd0s();
531 
532    /* AIV 07/05/05 - added processing of separate after all pnd0's non */
533    /* blocking events processing nb events can add new pnd0's that are */
534    /* processed as added and new nb's that are saved and added to do */
535    /* after all current level nbs and pnd0s done */
536    if (__nb_te_hdri != -1)
537     {
538      for (;;)
539       {
540        __p0_te_hdri = __nb_te_hdri;
541        __p0_te_endi = __nb_te_endi;
542        __nb_te_hdri = __nb_te_endi = -1;
543 
544        /* notice the move to pnd 0 queue events add to free list in */
545        /* process pnd0 routine */
546        process_pnd0s();
547        if (__nb_te_hdri == -1) break;
548      }
549     }
550    /* --- DBG remove
551    if (__debug_flg && __ev_tracing
552     && (__nchg_futhdr != NULL || __p0_te_hdri != -1))
553      __tr_msg("-- processing #0 end of slot events\n");
554    --- */
555 
556    /* final step do slot end timing checks and monitoring */
557    /* cannot schedule any events from here */
558    if (__slotend_action != 0)
559     {
560      if ((__slotend_action & SE_TCHK_VIOLATION) != 0)
561       process_all_tchk_violations();
562      if ((__slotend_action & SE_DUMPVARS) != 0) exec_slotend_dv();
563      if (__monit_active && ((__slotend_action
564       & (SE_MONIT_TRIGGER | SE_MONIT_CHG)) != 0))
565       __exec_monit(__monit_dcehdr,
566        (int32) (__slotend_action & SE_MONIT_CHG) != 0);
567 
568      /* LOOKATME - should monitoroff (on) effect fmonitor (think no) */
569      if ((__slotend_action & SE_FMONIT_TRIGGER) != 0) __exec_fmonits();
570      if ((__slotend_action & SE_STROBE) != 0) __exec_strobes();
571      if ((__slotend_action & SE_TFROSYNC) != 0) __exec_rosync_misctf();
572      if ((__slotend_action & SE_VPIROSYNC) != 0) __vpi_del_rosync_call();
573      __slotend_action = 0;
574     }
575    /* contrl c here serviced at beginning of next time slot */
576    /* if no more events done */
577    if (!move_time()) break;
578 
579    /* call backs from vpi cb NextSimTime (after debugger entered) */
580    if (__have_vpi_actions) __vpi_del_nxtsimtim_trycall();
581   }
582 }
583 
584 /*
585  *  process all net changes - this can only enter pnd0 events at now
586  * next process net changes, if any new pnd0's process
587  * may then add new net changes that in turn can add pnd0's
588  * if no 0 delay loop (must catch) will eventually terminate
589  * can add normal delays but will occur in future
590  */
process_pnd0s(void)591 static void process_pnd0s(void)
592 {
593  register struct tev_t *tevp;
594 
595  for (__processing_pnd0s = TRUE, __cur_tevpi = -1;;)
596   {
597    if (__nchg_futhdr != NULL) process_all_netchgs();
598 
599    /* needed in case PLI tf_dostop or vpi_control(vpiStop called */
600    if (__pending_enter_iact) __do_interactive_loop();
601 
602    /* no pending net changes and no more pound 0 events, can move time */
603    if (__p0_te_hdri == -1) break;
604 
605    /* every event has associated itree element */
606    __cur_tevpi = __p0_te_hdri;
607    for (; __cur_tevpi != -1; __cur_tevpi = __tevtab[__cur_tevpi].tenxti)
608     {
609      tevp = &(__tevtab[__cur_tevpi]);
610 
611      /* canceled because interactive thread disabled */
612      /* but pound 0 events not counted as timing wheel events */
613      if (tevp->te_cancel) { __num_cancel_tevents++; continue; }
614 
615      /* notice, pnd0 never canceled since can just replace guts */
616      __push_itstk(tevp->teitp);
617 
618      /* notice before event executed, cur. itp set from event */
619      switch ((byte) tevp->tetyp) {
620       case TE_THRD:
621        __process_thrd_ev(tevp);
622        break;
623        break;
624       case TE_CA: process_conta_ev(tevp); break;
625       /* for gates and 1 bit continous assigns */
626       case TE_G: process_gatechg_ev(tevp); break;
627       case TE_WIRE: process_wire_ev(tevp); break;
628       case TE_BIDPATH: process_trpthdst_ev(tevp); break;
629       /* #0 here is normal 0 delay - start as no delay */
630       case TE_MIPD_NCHG: process_mipd_nchg_ev(tevp); break;
631       case TE_NBPA: process_nbpa_ev(tevp); break;
632       case TE_TFSETDEL: __setdel_call_misctf(__cur_tevpi); break;
633 
634       case TE_TFPUTPDEL: __process_putpdel_ev(__cur_tevpi); break;
635       case TE_VPIPUTVDEL: __process_vpi_varputv_ev(__cur_tevpi); break;
636       case TE_VPIDRVDEL: __process_vpidrv_ev(__cur_tevpi); break;
637       case TE_VPICBDEL: __delay_callback(__cur_tevpi); break;
638       case TE_SYNC: __sync_call_misctf(tevp); break;
639       default: __case_terr(__FILE__, __LINE__);
640       }
641      __num_proc_tevents++;
642      /* when put into pnd0 list, no inc. of number of twheel events */
643      __pop_itstk();
644      /* here cur_tevpi done so any add to front after it */
645      if (__pending_enter_iact) __do_interactive_loop();
646     }
647 
648    /* all #0 events for this time slot processed but may be new net chgs */
649    if (__p0_te_hdri != -1)
650     {
651      __tevtab[__p0_te_endi].tenxti = __tefreelsti;
652      __tefreelsti = __p0_te_hdri;
653      __p0_te_hdri = __p0_te_endi = -1;
654     }
655    __cur_tevpi = -1;
656   }
657  __processing_pnd0s = FALSE;
658 }
659 
660 /*
661  * routine to exec dump vars functions
662  *
663  * notice only normal dumpvars will change to over file size limit state
664  * from then on until flush or limit change will not do any dumping
665  * or for that matter recording.
666  */
exec_slotend_dv(void)667 static void exec_slotend_dv(void)
668 {
669  /* must only emit time once in all processing */
670  __dv_time_emitted = FALSE;
671  /* first execute any dumpall */
672  if ((__slotend_action & SE_DUMPALL) != 0)
673   {
674    /* if over limit silently do nothing */
675    /* dump all is indpendent of normal dumpvars processing */
676    if (__dv_state != DVST_OVERLIMIT)
677     __do_dmpvars_baseline("$dumpall");
678   }
679 
680  switch ((byte) __dv_state) {
681   case DVST_DUMPING:
682    /* if encountered dumpoff, handle here */
683    if ((__slotend_action & SE_DUMPOFF) != 0)
684     {
685      /* remove any pending changes */
686      if (__dv_chgnethdr != NULL) free_chgedvars();
687      __do_dmpvars_baseline("$dumpoff");
688      __dv_state = DVST_NOTDUMPING;
689      __turnoff_all_dumpvars();
690      break;
691     }
692    /* if no changes this time slot, nothing to do */
693    /* on file over dump limit, will return */
694    if (__dv_chgnethdr != NULL) __do_dmpvars_chg();
695    break;
696   case DVST_NOTDUMPING:
697    /* if not dumpon and no dump on action do nothing here */
698    if ((__slotend_action & SE_DUMPON) != 0)
699     {
700      /* start with baseline dump */
701      __do_dmpvars_baseline("$dumpon");
702      __dv_state = DVST_DUMPING;
703      /* turn on dump change recording and dumping for next time slot */
704      __turnon_all_dumpvars();
705     }
706    break;
707   /* if not set up only way for pending to be on if need to setup */
708   case DVST_NOTSETUP:
709 
710    /* try to open the file */
711    if ((__dv_fd = __tilde_creat(__dv_fnam)) == -1)
712     {
713      if (strcmp(__dv_fnam, DFLTDVFNAM) == 0)
714       {
715 bad_dvfnam:
716        __pv_err(759,
717 	"cannot open $dumpvars output file at %s - $dumpvars not executed",
718 	 __to_timstr(__xs, &__simtime));
719        /* not setup but dv seen stops any future setup */
720        __dv_seen = TRUE;
721        __dv_state = DVST_NOTSETUP;
722        return;
723       }
724      else
725       {
726        __pv_warn(589, "cannot open $dumpvars output file %s trying %s",
727 	__dv_fnam, DFLTDVFNAM);
728        strcpy(__dv_fnam, DFLTDVFNAM);
729        if ((__dv_fd = __my_creat(__dv_fnam)) == -1) goto bad_dvfnam;
730       }
731     }
732    /* write the file reference header and setup dv "events" on wires */
733    __setup_dmpvars();
734    __do_dmpvars_baseline("$dumpvars");
735    if (__verbose)
736     {
737      __cv_msg(
738       "  $dumpvars setup complete at %s - variables dumped to file %s.\n",
739       __to_timstr(__xs, &__simtime), __dv_fnam);
740     }
741    if (__dv_state != DVST_OVERLIMIT)
742     {
743      __dv_state = DVST_DUMPING;
744      /* enable dmpv change recording */
745     }
746    /* more setup now disabled */
747    __dv_seen = TRUE;
748    break;
749   case DVST_OVERLIMIT:
750    if (__dv_chgnethdr != NULL) __misc_terr(__FILE__, __LINE__);
751    break;
752   default: __case_terr(__FILE__, __LINE__);
753  }
754 }
755 
756 /*
757  * reset and free all changed vars when dump all needed
758  *
759  * in case of dump all - from dv state change may be some changed
760  * vars that need to be reset
761  */
free_chgedvars(void)762 static void free_chgedvars(void)
763 {
764  register struct dvchgnets_t *dvchgnp;
765  struct dvchgnets_t *dvchg_last;
766 
767  dvchg_last = NULL;
768  /* need to find end to free because putting on front */
769  for (dvchgnp = __dv_chgnethdr; dvchgnp != NULL; dvchgnp = dvchgnp->dvchgnxt)
770   dvchg_last = dvchgnp;
771  /* nothing was on list if last nil */
772  if (dvchg_last != NULL)
773   {
774    dvchg_last->dvchgnxt = __dv_netfreelst;
775    __dv_netfreelst = __dv_chgnethdr;
776    __dv_chgnethdr = NULL;
777   }
778 }
779 
780 /*
781  * AFTER CHANGE PROPOGATION FROM RHS ROUTINES
782  */
783 
784 /*
785  * go through processing all nets (maybe a select) that changed
786  * a pass may create more net changes that are in turn processed
787  * until list empty
788  * this is heuristic to try to cause breath first processing
789  * when done future net change list empty
790  *
791  * if net (probably reg) has no load and no dcelst els not added to chg list
792  *
793  * SJM - 06/19/00 - now only save up and process structural net changes
794  * event controls must be checked (and for force/release) done immediately
795  */
process_all_netchgs(void)796 static void process_all_netchgs(void)
797 {
798  register struct net_t *np;
799  register struct nchglst_t *nchglp, *last_nchglp;
800  struct nchglst_t *sav_nchglp;
801  int32 num_this_pass, num_passes, total_num;
802 
803  num_passes = 0;
804  total_num = 0;
805  for (;;)
806   {
807    if (__nchg_futhdr == NULL) break;
808 
809    /* point nchglp to all pending net chg elements for processing */
810    nchglp = __nchg_futhdr;
811    /* save head so can free at end of pass */
812    sav_nchglp = nchglp;
813    /* empty future so all net change elements added here will be put on */
814    /* end of list and processed when all these done - breadth first */
815    __nchg_futend = __nchg_futhdr = NULL;
816    last_nchglp = NULL;
817    for (num_this_pass = 0; nchglp != NULL; nchglp = nchglp->nchglnxt)
818     {
819      np = nchglp->chgnp;
820      /* must eval. in definition itree loc. */
821      __push_itstk(nchglp->nchg_itp);
822      /* SJM 04/19/01 - must turn off all changed to allow load propagation */
823      /* switch channel changes to be added to next pass change list */
824      /* turn off all changed - if get here know has n lds */
825      np->nchgaction[__inum] &= (~NCHG_ALL_CHGED);
826 
827      /* DEBUG remove ---
828      {
829       struct net_pin_t *npp;
830 
831       __dbg_msg("*** dumping loads for net %s (itp=%s)***\n",
832        np->nsym->synam, __msg2_blditree(__xs, __inst_ptr));
833       for (npp = np->nlds; npp != NULL; npp = npp->npnxt)
834        {
835         __dmp1_nplstel(__inst_mod, np, npp);
836        }
837       __dbg_msg("*** end of loads ***\n");
838      }
839     --- */
840 
841      /* SJM 07/24/00 - propagate changes to dces for wires at end of queue */
842      /* new algorithm - for regs immediate propagate, for wires end of queue */
843      /* LOOKATME - think event controls should be before lds */
844      if (np->ntyp < NONWIRE_ST && np->dcelst != NULL)
845       __wakeup_delay_ctrls(np, nchglp->bi1, nchglp->bi2);
846 
847      /* SJM 07/24/00 - for wires with no lds but only dces still need */
848      /* to record nothing to do here */
849      if (np->nlds != NULL)
850       eval_netchg_lds(np, nchglp->bi1, nchglp->bi2, nchglp->delayed_mipd);
851 
852      last_nchglp = nchglp;
853      num_this_pass++;
854      __pop_itstk();
855     }
856    total_num += num_this_pass;
857    if (++num_passes > 1000 && (num_passes % 1000) == 0)
858     {
859      if (__pending_enter_iact)
860       {
861        __ia_warn(1604,
862          "interactive mode probably entered from zero delay oscillation - no scheduling");
863        __do_interactive_loop();
864       }
865     }
866 
867    /* know last nchg lp set since routine not called if at least one */
868    /* SJM 08/02/01 - add if to keep lint happy */
869    if (last_nchglp != NULL) last_nchglp->nchglnxt = __nchgfreelst;
870    __nchgfreelst = sav_nchglp;
871    /* LINUX DBG - add me */
872    /* chk_nchgnlst(__nchgfreelst); */
873    /* --- */
874   }
875  if (__debug_flg && __ev_tracing)
876   {
877    __tr_msg("-- net change event scheduling %d processed in %d passes\n",
878     total_num, num_passes);
879   }
880  __num_netchges += total_num;
881 }
882 
883 /*
884  * check nchg free list
885  */
886 /* UNUSED LINUX DEBUG ---
887 void chk_nchgnlst(struct nchglst_t *hdr)
888 {
889  register struct nchglst_t *nchglp;
890  int32 ndx;
891 
892  ndx = 0;
893  for (nchglp = hdr; nchglp != NULL; nchglp = nchglp->nchglnxt)
894   {
895    if ((void *) nchglp > (void *) 0x13257400)
896     {
897      __tr_msg("problem at index %d\n", ndx);
898      __misc_terr(__FILE__, __LINE__);
899     }
900    if (nchglp->nchglnxt > (void *) 0x13257400)
901     {
902      __tr_msg("problem at index %d\n", ndx);
903      __misc_terr(__FILE__, __LINE__);
904     }
905    ndx++;
906   }
907 }
908 --- */
909 
910 /*
911  * after changed net (wire or reg) go through loads evaluating the
912  * load net's drivers assigning to the load net a new value
913  *
914  * bit range passed and used to eliminate fan-out for other bit here
915  * all ranges here normalized high to low form
916  * notice will neve get to event trigger through this path (through cause)
917  * this is called with current itstk set to wire targ. (maybe target of xmr)
918  *
919  * the driver evaluations caused by this cause any changed wires to be
920  * added to a list which is then used to provide the next pass of wire
921  * loads
922  */
eval_netchg_lds(register struct net_t * np,int32 chgi1,int32 chgi2,int32 is_delayed_mipd)923 static void eval_netchg_lds(register struct net_t *np, int32 chgi1, int32 chgi2,
924  int32 is_delayed_mipd)
925 {
926  register struct net_pin_t *npp;
927  register struct npaux_t *npauxp;
928  register int32 bi;
929  int32 nd_itpop;
930  struct mod_t *downmdp;
931  struct mod_pin_t *mpp;
932  struct itree_t *itp;
933  struct inst_t *ip;
934  struct gate_t *gp;
935  struct mipd_t *mipdp;
936 
937  if (__ev_tracing)
938   {
939    __evtr_resume_msg();
940    __tr_msg("-- evaluating loads of reg/wire %s\n",
941     __to_evtrwnam(__xs, np, chgi1, chgi2, __inst_ptr));
942   }
943 
944  /* must process all loads on net */
945  for (npp = np->nlds; npp != NULL; npp = npp->npnxt)
946   {
947    /* immediately filter out - npp's that require particular inst. */
948    if (npp->npproctyp == NP_PROC_FILT
949     && npp->npaux->npu.filtitp != __inst_ptr)
950     continue;
951 
952    /* first need non empty union with 2 ranges */
953    /* case 1: all bits of changed or driven bits unknown */
954    if ((npauxp = npp->npaux) == NULL || npauxp->nbi1 == -1 || chgi1 == -1)
955     goto got_match;
956    /* case 2: range of npp is IS form */
957    if (npauxp->nbi1 == -2)
958     {
959      if (is2_chg_match(npauxp->nbi2.xvi, chgi1, chgi2))
960       goto got_match;
961      continue;
962     }
963    /* case 3: must check to see if net chg in range */
964    if (chgi1 < npauxp->nbi2.i || chgi2 > npauxp->nbi1) continue;
965 
966 got_match:
967    /* process various xmr special cases */
968    /* know any instance filtering done before here */
969    /* move from definition target xmr loc. back to ref. loc */
970    /* for vpi_ just pushed same inst. on to stack again since no ref. loc. */
971    if (npp->npproctyp != NP_PROC_INMOD)
972     {
973      /* SJM 04/17/03 - if XMR path does not match, do not eval */
974      if (!__move_to_npprefloc(npp)) continue;
975      nd_itpop = TRUE;
976     }
977    else nd_itpop = FALSE;
978 
979    /* maybe some tracing info */
980    if (__ev_tracing) emit_nchglds_trmsg(np, npp);
981 
982    switch ((byte) npp->npntyp) {
983     case NP_ICONN:
984      /* notice iconn load (rhs) causes assign to down lhs mdprt */
985      /* but iconn driver assigns from down rhs to iconn lhs for out port */
986      __immed_assigns++;
987      /* SJM 09/08/01 - can now remove this consistency check */
988      /* DBG remove ---
989      if (npp->elnpp.eii >= __inst_ptr->itip->imsym->el.emdp->minum)
990       __misc_terr(__FILE__, __LINE__);
991      --- */
992 
993      itp = &(__inst_ptr->in_its[npp->elnpp.eii]);
994      ip = itp->itip;
995      downmdp = ip->imsym->el.emdp;
996      /* SJM 09/08/01 - can now remove this consistency check */
997      /* DBG remove ---
998      if (npp->obnum >= downmdp->mpnum) __misc_terr(__FILE__, __LINE__);
999      --- */
1000      mpp = &(downmdp->mpins[npp->obnum]);
1001      /* assign from rhs up rhs iconn to lhs down mpp ref. for input port */
1002      /* notice down always take only 4 args, down do not have first mpp */
1003      (*mpp->mpaf.mpp_downassgnfunc)(mpp->mpref, ip->ipins[npp->obnum], itp);
1004      break;
1005     case NP_PB_ICONN:
1006      /* notice iconn load (rhs) causes assign to down lhs mdprt */
1007      /* but iconn driver assigns from down rhs to iconn lhs for out port */
1008      __immed_assigns++;
1009      /* SJM 09/08/01 - can now remove this consistency check */
1010      /* DBG remove ---
1011      if (npp->elnpp.eii >= __inst_ptr->itip->imsym->el.emdp->minum)
1012       __misc_terr(__FILE__, __LINE__);
1013      --- */
1014      itp = &(__inst_ptr->in_its[npp->elnpp.eii]);
1015      ip = itp->itip;
1016      downmdp = ip->imsym->el.emdp;
1017      /* SJM 09/08/01 - can now remove this consistency check */
1018      /* DBG remove ---
1019      if (npp->obnum >= downmdp->mpnum) __misc_terr(__FILE__, __LINE__);
1020      --- */
1021      mpp = &(downmdp->mpins[npp->obnum]);
1022      mpp = &(mpp->pbmpps[npp->pbi]);
1023      /* assign from rhs up rhs iconn to lhs down mpp ref. for input port */
1024      /* notice down always take only 4 args, down do not have first mpp */
1025      (*mpp->mpaf.mpp_downassgnfunc)(mpp->mpref,
1026       ip->pb_ipins_tab[npp->obnum][npp->pbi], itp);
1027      break;
1028     case NP_MDPRT:
1029      /* top of itstk determines which module inst this is and */
1030      /* which up instance port to assign to for output port */
1031      __immed_assigns++;
1032      downmdp = npp->elnpp.emdp;
1033      /* DBG remove --- */
1034      if (npp->obnum >= downmdp->mpnum) __misc_terr(__FILE__, __LINE__);
1035      /* --- */
1036      mpp = &(downmdp->mpins[npp->obnum]);
1037      itp = __inst_ptr->up_it;
1038      /* DBG remove - bug if trying to assign output of top module --- */
1039      if (itp == NULL) __misc_terr(__FILE__, __LINE__);
1040      /* --- */
1041      /* assign from rhs down mpp ref. to up lhs iconn for output port */
1042      /* for input port, assign from rhs up iconn to down mod port */
1043      /* notice up always take only 3 args, down have extra 1st arg mpp */
1044      (*mpp->mpaf.mpp_upassgnfunc)(__inst_ptr->itip->ipins[npp->obnum],
1045       mpp->mpref, itp);
1046      break;
1047     case NP_PB_MDPRT:
1048      /* top of itstk determines which module inst this is and */
1049      /* which up instance port to assign to for output port */
1050      __immed_assigns++;
1051      downmdp = npp->elnpp.emdp;
1052      mpp = &(downmdp->mpins[npp->obnum]);
1053      mpp = &(mpp->pbmpps[npp->pbi]);
1054      itp = __inst_ptr->up_it;
1055      (*mpp->mpaf.mpp_upassgnfunc)(
1056       __inst_ptr->itip->pb_ipins_tab[npp->obnum][npp->pbi],
1057       mpp->mpref, itp);
1058      break;
1059     case NP_GATE:
1060      /* evaluate load that is gate input and probably schedule gate chg */
1061      gp = npp->elnpp.egp;
1062      (*gp->gchg_func)(gp, npp->obnum);
1063      break;
1064     case NP_CONTA:
1065      /* know input that changed is always port 0 (only input) */
1066      /* SJM - 09/18/02 - for per bit rhs concat form same net pin type */
1067      __eval_conta_rhs_ld(npp);
1068      break;
1069     case NP_TRANIF:
1070      __eval_tranif_ld(npp->elnpp.egp, (int32) npp->obnum);
1071      break;
1072     case NP_TCHG:
1073      __process_npp_timofchg(np, npp);
1074      break;
1075     case NP_MIPD_NCHG:
1076      /* SJM 07/09/01 - for MIPD inserted between net(s) that connect to port */
1077      /* and loads, net pin t that causes schedule before processing rest */
1078 
1079      /* DBG remove --- */
1080      if (np->nlds != npp) __misc_terr(__FILE__, __LINE__);
1081      /* --- */
1082 
1083      /* SJM 07/13/01 if mipd net load processing already delayed, skip sched */
1084      /* and stop processing, algorithm is to store port conn net val and */
1085      /* sched ev, then ev processing routine puts on nchg list as if store */
1086      /* had happened after del - can't propagate MIPD nchges until normal */
1087      /* nchg loop */
1088 
1089      if (is_delayed_mipd) break;
1090 
1091      /* scalar is special case */
1092      if (!np->n_isavec)
1093       {
1094        mipdp = &(npp->elnpp.emipdbits[0]);
1095        if (mipdp->no_mipd) break;
1096        __sched_mipd_nchg(np, -1, mipdp);
1097       }
1098      else if (chgi1 == -1)
1099       {
1100        for (bi = np->nwid - 1; bi >= 0; bi--)
1101         {
1102          mipdp = &(npp->elnpp.emipdbits[bi]);
1103          /* SJM 07/24/05 - must process all bits even if middle no mipd */
1104          if (mipdp->no_mipd) continue;
1105          __sched_mipd_nchg(np, bi, mipdp);
1106         }
1107       }
1108      else
1109       {
1110        for (bi = chgi1; bi >= chgi2; bi--)
1111         {
1112          mipdp = &(npp->elnpp.emipdbits[bi]);
1113          /* SJM 07/24/05 - must process all bits even if middle no mipd */
1114          if (mipdp->no_mipd) continue;
1115          __sched_mipd_nchg(np, bi, mipdp);
1116         }
1117       }
1118      if (nd_itpop) __pop_itstk();
1119      /* notice must return since because of MIPD wasn't really changed */
1120      /* works because mipd npp always first on list */
1121      return;
1122      /* pull driver only illlegal here */
1123     default: __case_terr(__FILE__, __LINE__);
1124    }
1125    if (nd_itpop) __pop_itstk();
1126   }
1127 }
1128 
1129 /*
1130  * process MIPD event
1131  *
1132  * simple proc that just processes all but first MIPD delay schedule NPP
1133  * routine here since same as eval nchgs
1134  *
1135  * all MIPD events do is delay propagation from changed net(s) connected
1136  * to input or inout ports to its loads by delay amount - tricky part is
1137  * path-src delay value calculation
1138  */
process_mipd_nchg_ev(struct tev_t * tevp)1139 static void process_mipd_nchg_ev(struct tev_t *tevp)
1140 {
1141  register struct net_t *np;
1142  register struct mipd_t *mipdp;
1143  int32 bi, bi2;
1144 
1145  np = tevp->tu.tenp->tenu.np;
1146  bi = tevp->tu.tenp->nbi;
1147 
1148  if (__ev_tracing)
1149   {
1150    __evtr_resume_msg();
1151    __tr_msg("-- tracing MIPD event for %s\n",
1152     __to_evtrwnam(__xs, np, bi, bi, __inst_ptr));
1153   }
1154 
1155  /* turn off mipd sheduled event */
1156  bi2 = (bi == -1) ? 0 : bi;
1157  mipdp = &(np->nlds->elnpp.emipdbits[bi2]);
1158  mipdp->mipdschd_tevs[__inum] = -1;
1159 
1160  /* add to nchg list if net not already all changed */
1161  /* dmpvars bits chg not turned on since dumpvar happens when net changes */
1162  /* also can't use normal macro, because never need reg dce wakeup */
1163  if (!np->n_isavec)
1164   {
1165    if ((np->nchgaction[__inum] & NCHG_ALL_CHGED) == 0)
1166     {
1167      __add_nchglst_el(np);
1168      /* SJM 19/01/02 - T because this is 2nd delayed event one so must */
1169      /* not schedule */
1170      /* BEWARE - this assumes last element added to end of list */
1171      __nchg_futend->delayed_mipd = TRUE;
1172     }
1173   }
1174  else
1175   {
1176    if ((np->nchgaction[__inum] & NCHG_ALL_CHGED) == 0)
1177     {
1178      __add_select_nchglst_el(np, bi, bi);
1179      /* SJM 19/01/02 - T because this is 2nd delayed event one so must */
1180      /* not schedule */
1181      /* BEWARE - this assumes last element added to end of list */
1182      __nchg_futend->delayed_mipd = TRUE;
1183     }
1184   }
1185 
1186  /* free mipd event auxialiary field here since bit and wire extracted */
1187  __my_free((char *) tevp->tu.tenp, sizeof(struct tenp_t));
1188  tevp->tu.tenp = NULL;
1189 }
1190 
1191 /*
1192  * emit eval netchg lds trace message
1193  */
emit_nchglds_trmsg(struct net_t * np,struct net_pin_t * npp)1194 static void emit_nchglds_trmsg(struct net_t *np, struct net_pin_t *npp)
1195 {
1196  int32 i1, i2;
1197  struct npaux_t *npauxp;
1198  char s1[RECLEN], s2[RECLEN];
1199 
1200  __evtr_resume_msg();
1201  if (__debug_flg)
1202   {
1203    if ((npauxp = npp->npaux) == NULL) i1 = i2 = -1;
1204    else __get_cor_range(npauxp->nbi1, npauxp->nbi2, &i1, &i2);
1205    __tr_msg("-- after %s %s changed to %s processing %s\n",
1206     __to_wtnam(s1, np), __to_evtrwnam(__xs, np, i1, i2, __inst_ptr),
1207    __var_tostr(__xs2, np, i1, i2, BHEX), __to_npptyp(s2, npp));
1208   }
1209 }
1210 
1211 /*
1212  * return T if is IS2 form bit range (depends on itree place) matches
1213  * T if bit inside npi1..npi2
1214  * will overlap unless either high changed below low of range
1215  * or low changed above high of range
1216  *
1217  * SJM 10/12/04 - changed to pass contab ndx instead of ptr since contab
1218  * realloced
1219  */
is2_chg_match(int32 nbi2_xvi,int32 npi1,int32 npi2)1220 static int32 is2_chg_match(int32 nbi2_xvi, int32 npi1, int32 npi2)
1221 {
1222  int32 i1;
1223  word32 *wp;
1224 
1225  wp = &(__contab[nbi2_xvi]);
1226  wp = &(wp[2*__inum]);
1227  /* if value x - force match since unknown - only can happen for procedural */
1228  if (wp[1] != 0L) return(TRUE);
1229  i1 = (int32) wp[0];
1230  /* know form here h:0 */
1231  return(npi1 >= i1 && npi2 <= i1);
1232 }
1233 
1234 /*
1235  * get an possibly correctd for is2 form bit index (part select always split)
1236  */
__get_cor_range(register int32 oi1,union intptr_u oi2,register int32 * i1,register int32 * i2)1237 extern void __get_cor_range(register int32 oi1, union intptr_u oi2,
1238  register int32 *i1, register int32 *i2)
1239 {
1240  register word32 *wp;
1241 
1242  if (oi1 != -2) { *i1 = oi1; *i2 = oi2.i; }
1243  else
1244   {
1245    /* SJM 10/12/04 - because contab realloced, must be ndx base of IS */
1246    wp = &(__contab[oi2.xvi]);
1247    *i1 = *i2 = (int32) wp[2*__inum];
1248   }
1249 }
1250 
1251 /*
1252  * DECLARATIVE EVENT SCHEDULING/PROCESSING ROUTINES
1253  */
1254 
1255 /*
1256  * GATE EVENT ROUTINES
1257  */
1258 
1259 /*
1260  * GENERAL COMMENTS FOR ALL ROUTINES:
1261  *
1262  * evaluate a gate because gate input changed and the gate input is
1263  * a load of the changed wire
1264  *
1265  * when gate gp input i of inst. cur. itp changes, eval. gate and maybe
1266  * schedule output change if has delay and output changed
1267  * after changing input in gate/inst. state vector
1268  *
1269  * notice 1 bit conta's are transformed to gates during fix up
1270  * so changes handled here
1271  *
1272  * after these if inputs differ all of old gate value, new gate val,
1273  * old gate strength and new gate strength set
1274  */
1275 
1276 /* --- unused non proc call gate eval routine
1277 static void eval_gatein_ld(struct gate_t *gp, int32 i)
1278 {
1279  switch ((byte) gp->g_class) {
1280   case GC_LOGIC: std_chg_logic_gate(gp, i); break;
1281   case GC_UDP: std_chg_udp_gate(gp, i); break;
1282   case GC_BUFIF: std_chg_bufif_gate(gp, i); break;
1283   case GC_MOS: std_chg_mos_gate(gp, i); break;
1284   case GC_CMOS: std_chg_cmos_gate(gp, i); break;
1285   default: __case_terr(__FILE__, __LINE__);
1286  }
1287 }
1288 --- */
1289 
1290 /*
1291  * evaluate a logic - std not optimized version
1292  */
std_chg_logic_gate(register struct gate_t * gp,register word32 i)1293 static void std_chg_logic_gate(register struct gate_t *gp, register word32 i)
1294 {
1295  int32 out_chg;
1296 
1297  if (!__eval_logic_gate(gp, i, &out_chg))
1298   { if (__ev_tracing) trace_gunchg(gp, i); return; }
1299 
1300  if (__ev_tracing) evtr_prop_gatechg(gp, i, out_chg);
1301  else
1302   {
1303    if (gp->g_delrep == DT_NONE) { if (out_chg) change_gate_outwire(gp); }
1304    else prop_gatechg(gp, out_chg, FALSE);
1305   }
1306 }
1307 
1308 /*
1309  * accelerated 2 input gate (all XL style simple expressions)
1310  *
1311  * if no delay and not ev trace does all inline, if delay call normal prop
1312  * both ports must be constant bit select or scalar
1313  * inputs can not be strength for this most optimized routine
1314  */
acc_chg_bufnot(struct gate_t * gp,word32 i)1315 static void acc_chg_bufnot(struct gate_t *gp, word32 i)
1316 {
1317  register word32 ouwrd, uwrd, ngav, ngbv;
1318  int32 out_chg, gatid, biti;
1319  word32 igav, igbv;
1320  struct expr_t *xp;
1321  struct net_t *np;
1322 
1323  xp = gp->gpins[1];
1324  if (xp->optyp == ID)
1325   ld_scalval_(&igav, &igbv, xp->lu.sy->el.enp->nva.bp);
1326  else
1327   {
1328    __ld_bit(&igav, &igbv, xp->lu.x->lu.sy->el.enp,
1329     (int32) __contab[xp->ru.x->ru.xvi]);
1330   }
1331 
1332  __new_inputval = igav | (igbv << 1);
1333  /* eval changed input and store in gstate if needed */
1334  /* 12/19/99 SJM - notice buf or not still packed into 1 byte but vars */
1335  /* only packed into word32 as smallest */
1336  ouwrd = (word32) gp->gstate.bp[__inum];
1337 
1338  /* input for not is bits 0 and 2 */
1339  uwrd = ouwrd & ~(0x5L);
1340  uwrd |= (igav | (igbv << 2));
1341  /* input change did not change gate */
1342  if (uwrd == ouwrd) { if (__ev_tracing) trace_gunchg(gp, i); return; }
1343  gp->gstate.bp[__inum] = (byte) uwrd;
1344 
1345  /* value for not is bits 1 and 3 */
1346  __old_gateval = ((uwrd >> 1) & 1L) | ((uwrd >> 2) & 2L);
1347 
1348  /* evaluate - not and buf always convert z to x */
1349  gatid = gp->gmsym->el.eprimp->gateid;
1350  ngbv = (uwrd >> 2) & 1L;
1351  if (gatid == G_NOT) ngav = !(uwrd & 1L) | ngbv;
1352  else if (gatid == G_BUF) ngav = (uwrd & 1L) | ngbv;
1353  /* but cont. ASSIGN passes z */
1354  else ngav = (uwrd & 1L);
1355 
1356  __new_gateval = ngav | (ngbv << 1);
1357  /* set to T (non 0) if not equal if changed (different) */
1358  out_chg = (__old_gateval != __new_gateval);
1359  /* if tracing must use std trace store-propagate routine */
1360  if (__ev_tracing) { evtr_prop_gatechg(gp, i, out_chg); return; }
1361  /* handle delay case using normal gate chg */
1362  /* third param means acc possible because called from acc routine */
1363  if (gp->g_delrep != DT_NONE) { prop_gatechg(gp, out_chg, TRUE); return; }
1364 
1365  /* immediate fast assign for accelerated */
1366  /* inline steps in store gate output value */
1367  if (!out_chg) return;
1368 
1369  /* g pdst on if wire driven by gate is path dest. or has delay */
1370  if (gp->g_pdst) { change_gate_outwire(gp); return; }
1371 
1372  /* non delay acc immediate assign code */
1373  xp = gp->gpins[0];
1374  /* update state with computed output value is bits 1 and 3 */
1375  uwrd = uwrd & ~(0x2L) & ~(0x8L);
1376  uwrd |= ((ngav << 1) | (ngbv << 3));
1377  gp->gstate.bp[__inum] = (byte) uwrd;
1378 
1379  /* accelerated assign to pin 0 (output) */
1380  if (xp->optyp == ID)
1381   {
1382    np = xp->lu.sy->el.enp;
1383    if (np->frc_assgn_allocated)
1384     {
1385      igav = ngav; igbv = ngbv;
1386      if (!__correct_forced_newwireval(np, &igav, &igbv))
1387       goto try_trace;
1388     }
1389    /* here since avoiding value store, need to add net change el. */
1390    chg_st_scalval_(np->nva.bp, ngav, ngbv);
1391    /* not 0 for mask ands is T */
1392    /* if lhs chged and no lds/dces and not entire inst changed, record it */
1393    if (__lhs_changed) record_nchg_(np);
1394   }
1395  else
1396   {
1397    np = xp->lu.x->lu.sy->el.enp;
1398    biti = (int32) __contab[xp->ru.x->ru.xvi];
1399 
1400    /* if the 1 bit is forced nothing to do else normal assign */
1401    if (np->frc_assgn_allocated
1402     && __forced_inhibit_bitassign(np, xp->lu.x, xp->ru.x)) goto try_trace;
1403    /* notice this adds the net chg element if needed */
1404    __chg_st_bit(np, biti, ngav, ngbv);
1405   }
1406 try_trace:
1407  if (__ev_tracing) trace_chg_gateout(gp, xp);
1408 }
1409 
1410 /*
1411  * accelerated 1 input (maybe stren) gate (all XL style simple expressions)
1412  *
1413  * if no delay and not ev trace does all inline, if delay call normal prop
1414  * both ports must be constant bit select or scalar
1415  * here inputs may be strength (removed), but cannot drive stren
1416  *
1417  * only difference is slow if for accessing value from strength
1418  */
acc_stichg_bufnot(register struct gate_t * gp,word32 i)1419 static void acc_stichg_bufnot(register struct gate_t *gp, word32 i)
1420 {
1421  register struct expr_t *xp;
1422  register word32 ouwrd, uwrd, ngav, ngbv;
1423  register struct net_t *np;
1424  int32 out_chg, gatid, biti;
1425  word32 igav, igbv;
1426 
1427  xp = gp->gpins[1];
1428  if (xp->optyp == ID)
1429   {
1430    np = xp->lu.sy->el.enp;
1431    if (np->n_stren)
1432     {
1433      uwrd = (word32) np->nva.bp[__inum];
1434      igav = uwrd & 1L;
1435      igbv = (uwrd >> 1) & 1L;
1436     }
1437    else ld_scalval_(&igav, &igbv, np->nva.bp);
1438   }
1439  else __ld_bit(&igav, &igbv, xp->lu.x->lu.sy->el.enp,
1440   (int32) __contab[xp->ru.x->ru.xvi]);
1441 
1442  __new_inputval = igav | (igbv << 1);
1443  /* eval changed input and store in gstate if needed */
1444  ouwrd = (word32) gp->gstate.bp[__inum];
1445  /* input for not is bits 0 and 2 */
1446  uwrd = ouwrd & ~(0x5L);
1447  uwrd |= (igav | (igbv << 2));
1448  /* input change did not change gate */
1449  if (uwrd == ouwrd) { if (__ev_tracing) trace_gunchg(gp, i); return; }
1450  gp->gstate.bp[__inum] = (byte) uwrd;
1451 
1452  /* value for not is bits 1 and 3 */
1453  __old_gateval = ((uwrd >> 1) & 1L) | ((uwrd >> 2) & 2L);
1454 
1455  /* evaluate - not and buf always convert z to x */
1456  gatid = gp->gmsym->el.eprimp->gateid;
1457  ngbv = (uwrd >> 2) & 1L;
1458  if (gatid == G_NOT) ngav = !(uwrd & 1L) | ngbv;
1459  else if (gatid == G_BUF) ngav = (uwrd & 1L) | ngbv;
1460  /* but cont. ASSIGN passes z */
1461  else ngav = (uwrd & 1L);
1462 
1463  __new_gateval = ngav | (ngbv << 1);
1464  /* set to T (non 0) if not equal if changed (different) */
1465  out_chg = (__old_gateval != __new_gateval);
1466  /* if tracing must use std trace store-propagate routine */
1467  if (__ev_tracing) { evtr_prop_gatechg(gp, i, out_chg); return; }
1468  /* handle delay case using normal gate chg */
1469  /* thrd param T because being called from acc routine */
1470  if (gp->g_delrep != DT_NONE) { prop_gatechg(gp, out_chg, TRUE); return; }
1471 
1472  /* immediate fast assign for accelerated */
1473  /* inline steps in store gate output value */
1474  if (!out_chg) return;
1475 
1476  /* g pdst on if wire driven by gate is path dest. or has delay */
1477  if (gp->g_pdst) { change_gate_outwire(gp); return; }
1478 
1479  /* non delay acc assign code */
1480  xp = gp->gpins[0];
1481  /* update state with computed output value is bits 1 and 3 */
1482  uwrd = uwrd & ~(0x2L) & ~(0x8L);
1483  uwrd |= ((ngav << 1) | (ngbv << 3));
1484  gp->gstate.bp[__inum] = (byte) uwrd;
1485  /* accelerated assign to pin 0 (output) */
1486  if (xp->optyp == ID)
1487   {
1488    np = xp->lu.sy->el.enp;
1489    if (np->frc_assgn_allocated)
1490     {
1491      igav = ngav; igbv = ngbv;
1492      if (!__correct_forced_newwireval(np, &igav, &igbv))
1493       goto try_trace;
1494     }
1495    /* here since avoiding value store, need to add net change el. */
1496    chg_st_scalval_(np->nva.bp, ngav, ngbv);
1497 
1498    /* not 0 for mask ands is T */
1499    /* if lhs chged and no lds/dces and not entire inst changed, record it */
1500    if (__lhs_changed) record_nchg_(np);
1501   }
1502  else
1503   {
1504    np = xp->lu.x->lu.sy->el.enp;
1505    biti = (int32) __contab[xp->ru.x->ru.xvi];
1506    /* if the 1 bit is forced nothing to do else normal assign */
1507    if (np->frc_assgn_allocated
1508     && __forced_inhibit_bitassign(np, xp->lu.x, xp->ru.x)) goto try_trace;
1509    /* notice this adds the net chg element if needed */
1510    __chg_st_bit(np, biti, ngav, ngbv);
1511   }
1512 try_trace:
1513  if (__ev_tracing) trace_chg_gateout(gp, xp);
1514 }
1515 
1516 /*
1517  * acceleated up to 3 input (4 pints) gate with no delay
1518  *
1519  * could unwind to separate for each gate type
1520  * if no delay and not ev trace does all inline, if delay call normal prop
1521  * both ports must be constant bit select or scalar
1522  * inputs must not be stren wires
1523  * degenerate 2 input gate not accelerated
1524  */
acc_chg_4igate(register struct gate_t * gp,word32 i)1525 static void acc_chg_4igate(register struct gate_t *gp, word32 i)
1526 {
1527  register struct expr_t *xp;
1528  register word32 ouwrd, uwrd, ngav, ngbv, gwid;
1529  struct net_t *np;
1530  int32 out_chg, biti, bi;
1531  word32 gav, gbv, mask;
1532 
1533  xp = gp->gpins[i];
1534  if (xp->optyp == ID) ld_scalval_(&gav, &gbv, xp->lu.sy->el.enp->nva.bp);
1535  else __ld_bit(&gav, &gbv, xp->lu.x->lu.sy->el.enp,
1536   (int32) __contab[xp->ru.x->ru.xvi]);
1537  bi = i - 1;
1538  gwid = gp->gpnum;
1539  __new_inputval = gav | (gbv << 1);
1540 
1541  /* eval changed input and store in gstate if needed */
1542  ouwrd = (word32) gp->gstate.bp[__inum];
1543  uwrd = ouwrd & ~(1L << bi) & ~(1L << (gwid + bi));
1544  uwrd |= ((gav << bi) | (gbv << (gwid + bi)));
1545  /* input change did not change gate */
1546  if (uwrd == ouwrd) { if (__ev_tracing) trace_gunchg(gp, i); return; }
1547  gp->gstate.bp[__inum] = (byte) uwrd;
1548 
1549  /* mask off a/b output bit - now gav/gbv all inputs */
1550  mask = __masktab[gwid - 1];
1551  gav = uwrd & mask;
1552  gbv = (uwrd >> gwid) & mask;
1553  /* works since know n ins at least 1 - b shifts 1 less, goes b bit */
1554  __old_gateval = ((uwrd >> (gwid - 1)) & 1L) | ((uwrd >> (2*gwid - 2)) & 2L);
1555 
1556  /* evaluate gate */
1557  /* LOOKATME - could split and copy for each pin/gate combination */
1558  ngav = ngbv = 1L;
1559  switch ((byte) gp->gmsym->el.eprimp->gateid) {
1560   case G_BITREDAND:
1561    /* if even 1 0 value in any used bit, result is 0 */
1562    if (gbv == 0L) { ngav = (gav != mask) ? 0L : 1L; ngbv = 0L; }
1563    else if ((gav | gbv) != mask) ngav = ngbv = 0L;
1564    break;
1565   case G_NAND:
1566    /* if even 1 0 value in any used bit, result is 1 */
1567    if (gbv == 0L) { ngav = (gav != mask) ? 1L : 0L; ngbv = 0L; }
1568    else if ((gav | gbv) != mask) ngbv = 0L;
1569    break;
1570   case G_BITREDOR:
1571    /* if even 1 1 value in any used bit, result is 1 */
1572    if (gbv == 0L) { ngav = (gav != 0L) ? 1L : 0L; ngbv = 0L; }
1573    else if ((gav & ~gbv) != 0L) ngbv = 0L;
1574    break;
1575   case G_NOR:
1576    /* if even 1 1 value in any used bit, result is 0 */
1577    if (gbv == 0L) { ngav = (gav != 0L) ? 0L : 1L; ngbv = 0L; }
1578    else if ((gav & ~gbv) != 0L) ngav = ngbv = 0L;
1579    break;
1580   case G_BITREDXOR:
1581    if (gbv == 0L) { ngbv = 0L; ngav = __wrd_redxor(gav); }
1582    break;
1583   case G_REDXNOR:
1584    if (gbv == 0L) { ngbv = 0L; ngav = !__wrd_redxor(gav); }
1585    break;
1586   default: __case_terr(__FILE__, __LINE__);
1587  }
1588  __new_gateval = ngav | (ngbv << 1);
1589  /* set to T (non 0) if not equal if changed (different) */
1590  out_chg = (__old_gateval != __new_gateval);
1591  /* if tracing must use std trace store-propagate routine */
1592  if (__ev_tracing) { evtr_prop_gatechg(gp, i, out_chg); return; }
1593  /* handle delay case using normal gate chg */
1594  /* thrd param T because being called from acc routine */
1595  if (gp->g_delrep != DT_NONE) { prop_gatechg(gp, out_chg, TRUE); return; }
1596 
1597  /* immediate fast assign for accelerated */
1598  /* inline steps in store gate output value */
1599  if (!out_chg) return;
1600 
1601  /* g pdst on if wire driven by gate is path dest. or has delay */
1602  if (gp->g_pdst) { change_gate_outwire(gp); return; }
1603 
1604  /* non delay acc immediate assign code */
1605  xp = gp->gpins[0];
1606  /* mask off separated value bits to update output value in uwrd */
1607  uwrd = uwrd & ~(1L << (gwid - 1)) & ~(1L << (2*gwid - 1));
1608  /* works because ngav and ngbv exactly 1 low bit */
1609  uwrd |= ((ngav << (gwid - 1)) | (ngbv << (2*gwid - 1)));
1610  /* must store twice because update of input may not change output */
1611  gp->gstate.bp[__inum] = (byte) uwrd;
1612  /* accelerated assign to pin 0 (output) */
1613  if (xp->optyp == ID)
1614   {
1615    np = xp->lu.sy->el.enp;
1616    if (np->frc_assgn_allocated)
1617     {
1618      gav = ngav; gbv = ngbv;
1619      if (!__correct_forced_newwireval(np, &gav, &gbv))
1620       goto try_trace;
1621     }
1622    /* here since avoiding value store, need to add net change el. */
1623    chg_st_scalval_(np->nva.bp, ngav, ngbv);
1624    if (__lhs_changed) record_nchg_(np);
1625   }
1626  else
1627   {
1628    np = xp->lu.x->lu.sy->el.enp;
1629    biti = (int32) __contab[xp->ru.x->ru.xvi];
1630    /* if the 1 bit is forced nothing to do else normal assign */
1631    if (np->frc_assgn_allocated
1632     && __forced_inhibit_bitassign(np, xp->lu.x, xp->ru.x)) goto try_trace;
1633    /* this adds the nchg el if needed */
1634    __chg_st_bit(np, biti, ngav, ngbv);
1635   }
1636 try_trace:
1637  if (__ev_tracing) trace_chg_gateout(gp, xp);
1638 }
1639 
1640 /*
1641  * accelerated up to 4 input gate
1642  *
1643  * could unwind to separate for each gate type
1644  * if no delay and not ev trace does all inline, if delay call normal prop
1645  * both ports must be constant bit select or scalar
1646  *
1647  * inputs can be strength wires (removed) but cannot driver stren
1648  */
acc_stichg_4igate(register struct gate_t * gp,word32 i)1649 static void acc_stichg_4igate(register struct gate_t *gp, word32 i)
1650 {
1651  register struct expr_t *xp;
1652  register word32 ouwrd, uwrd, ngav, ngbv, gwid;
1653  struct net_t *np;
1654  int32 out_chg, biti, bi;
1655  word32 gav, gbv, mask;
1656 
1657  xp = gp->gpins[i];
1658  if (xp->optyp == ID)
1659   {
1660    np = xp->lu.sy->el.enp;
1661    if (np->n_stren)
1662     {
1663      uwrd = (word32) np->nva.bp[__inum];
1664      gav = uwrd & 1L;
1665      gbv = (uwrd >> 1) & 1L;
1666     }
1667    else ld_scalval_(&gav, &gbv, np->nva.bp);
1668   }
1669  else __ld_bit(&gav, &gbv, xp->lu.x->lu.sy->el.enp,
1670    (int32) __contab[xp->ru.x->ru.xvi]);
1671  bi = i - 1;
1672  gwid = gp->gpnum;
1673  __new_inputval = gav | (gbv << 1);
1674 
1675  /* eval changed input and store in gstate if needed */
1676  ouwrd = (word32) gp->gstate.bp[__inum];
1677  uwrd = ouwrd & ~(1L << bi) & ~(1L << (gwid + bi));
1678  uwrd |= ((gav << bi) | (gbv << (gwid + bi)));
1679  /* input change did not change gate */
1680  if (uwrd == ouwrd) { if (__ev_tracing) trace_gunchg(gp, i); return; }
1681  gp->gstate.bp[__inum] = (byte) uwrd;
1682 
1683  /* mask off a/b output bit - now gav/gbv all inputs */
1684  mask = __masktab[gwid - 1];
1685  gav = uwrd & mask;
1686  gbv = (uwrd >> gwid) & mask;
1687  /* works since know n ins at least 1 - b shifts 1 less, goes b bit */
1688  __old_gateval = ((uwrd >> (gwid - 1)) & 1L) | ((uwrd >> (2*gwid - 2)) & 2L);
1689 
1690  /* evaluate gate */
1691  /* LOOKATME - could split and copy for each pin/gate combination */
1692  ngav = ngbv = 1L;
1693  switch ((byte) gp->gmsym->el.eprimp->gateid) {
1694   case G_BITREDAND:
1695    /* if even 1 0 value in any used bit, result is 0 */
1696    if (gbv == 0L) { ngav = (gav != mask) ? 0L : 1L; ngbv = 0L; }
1697    else if ((gav | gbv) != mask) ngav = ngbv = 0L;
1698    break;
1699   case G_NAND:
1700    /* if even 1 0 value in any used bit, result is 1 */
1701    if (gbv == 0L) { ngav = (gav != mask) ? 1L : 0L; ngbv = 0L; }
1702    else if ((gav | gbv) != mask) ngbv = 0L;
1703    break;
1704   case G_BITREDOR:
1705    /* if even 1 1 value in any used bit, result is 1 */
1706    if (gbv == 0L) { ngav = (gav != 0L) ? 1L : 0L; ngbv = 0L; }
1707    else if ((gav & ~gbv) != 0L) ngbv = 0L;
1708    break;
1709   case G_NOR:
1710    /* if even 1 1 value in any used bit, result is 0 */
1711    if (gbv == 0L) { ngav = (gav != 0L) ? 0L : 1L; ngbv = 0L; }
1712    else if ((gav & ~gbv) != 0L) ngav = ngbv = 0L;
1713    break;
1714   case G_BITREDXOR:
1715    if (gbv == 0L) { ngbv = 0L; ngav = __wrd_redxor(gav); }
1716    break;
1717   case G_REDXNOR:
1718    if (gbv == 0L) { ngbv = 0L; ngav = !__wrd_redxor(gav); }
1719    break;
1720   default: __case_terr(__FILE__, __LINE__);
1721  }
1722  __new_gateval = ngav | (ngbv << 1);
1723  /* set to T (non 0) if not equal if changed (different) */
1724  out_chg = (__old_gateval != __new_gateval);
1725  /* if tracing must use std trace store-propagate routine */
1726  if (__ev_tracing) { evtr_prop_gatechg(gp, i, out_chg); return; }
1727  /* handle delay case using normal gate chg */
1728  /* thrd param T because being called from acc routine */
1729  if (gp->g_delrep != DT_NONE) { prop_gatechg(gp, out_chg, TRUE); return; }
1730 
1731  /* immediate fast assign for accelerated */
1732  /* inline steps in store gate output value */
1733  if (!out_chg) return;
1734 
1735  /* g pdst on if wire driven by gate is path dest. or has delay */
1736  if (gp->g_pdst) { change_gate_outwire(gp); return; }
1737 
1738  /* non delay acc immediate assign code */
1739  xp = gp->gpins[0];
1740  /* mask off separated value bits to update output value in uwrd */
1741  uwrd = uwrd & ~(1L << (gwid - 1)) & ~(1L << (2*gwid - 1));
1742  /* works because ngav and ngbv exactly 1 low bit */
1743  uwrd |= ((ngav << (gwid - 1)) | (ngbv << (2*gwid - 1)));
1744  /* must store twice because update of input may not change output */
1745  gp->gstate.bp[__inum] = (byte) uwrd;
1746  /* accelerated assign to pin 0 (output) */
1747  if (xp->optyp == ID)
1748   {
1749    np = xp->lu.sy->el.enp;
1750    if (np->frc_assgn_allocated)
1751     {
1752      gav = ngav; gbv = ngbv;
1753      if (!__correct_forced_newwireval(np, &gav, &gbv))
1754       goto try_trace;
1755     }
1756    /* here since avoiding value store, need to add net change el. */
1757    chg_st_scalval_(np->nva.bp, ngav, ngbv);
1758    if (__lhs_changed) record_nchg_(np);
1759   }
1760  else
1761   {
1762    np = xp->lu.x->lu.sy->el.enp;
1763    biti = (int32) __contab[xp->ru.x->ru.xvi];
1764    /* if the 1 bit is forced nothing to do else normal assign */
1765    if (np->frc_assgn_allocated
1766     && __forced_inhibit_bitassign(np, xp->lu.x, xp->ru.x)) goto try_trace;
1767    /* this adds the nchg el if needed */
1768    __chg_st_bit(np, biti, ngav, ngbv);
1769   }
1770 try_trace:
1771  if (__ev_tracing) trace_chg_gateout(gp, xp);
1772 }
1773 
1774 /*
1775  * write gate value unchanged when input changes trace msg
1776  */
trace_gunchg(struct gate_t * gp,word32 i)1777 static void trace_gunchg(struct gate_t *gp, word32 i)
1778 {
1779  char s1[RECLEN];
1780 
1781  __tr_msg("-- %s %s %s input %u value unchanged\n",
1782   gp->gmsym->synam, (gp->g_class == GC_UDP) ? "udp" : "gate",
1783   to_evtronam(s1, gp->gsym->synam, __inst_ptr, (struct task_t *) NULL), i);
1784 }
1785 
1786 /*
1787  * evaluate a udp - std not optimized version
1788  */
std_chg_udp_gate(register struct gate_t * gp,register word32 i)1789 static void std_chg_udp_gate(register struct gate_t *gp, register word32 i)
1790 {
1791  register int32 is_edge;
1792  int32 out_chg;
1793 
1794  __cur_udp = gp->gmsym->el.eudpp;
1795  is_edge = (__cur_udp->utyp == U_EDGE) ? TRUE : FALSE;
1796  if (!__eval_udp(gp, i, &out_chg, is_edge))
1797   { if (__ev_tracing) trace_gunchg(gp, i); return; }
1798 
1799  if (__ev_tracing) evtr_prop_gatechg(gp, i, out_chg);
1800  else
1801   {
1802    if (gp->g_delrep == DT_NONE)
1803     { if (out_chg) change_gate_outwire(gp); return; }
1804    prop_gatechg(gp, out_chg, FALSE);
1805   }
1806 }
1807 
1808 /*
1809  * evaluate a bufif gate - std not optimized version
1810  */
std_chg_bufif_gate(register struct gate_t * gp,register word32 i)1811 static void std_chg_bufif_gate(register struct gate_t *gp, register word32 i)
1812 {
1813  int32 out_chg;
1814 
1815  /* this sets __new_gateval to strength if out changed T */
1816  if (!__eval_bufif_gate(gp, i, &out_chg))
1817   { if (__ev_tracing) trace_gunchg(gp, i); return; }
1818 
1819  if (__ev_tracing) evtr_prop_gatechg(gp, i, out_chg);
1820  else
1821   {
1822    if (gp->g_delrep == DT_NONE)
1823     { if (out_chg) change_gate_outwire(gp); return; }
1824    prop_gatechg(gp, out_chg, FALSE);
1825   }
1826 }
1827 
1828 /*
1829  * evaluate a mos gate - std not optimized version
1830  *
1831  * g resist here is for real resistive gate not flag for acc
1832  */
std_chg_mos_gate(register struct gate_t * gp,register word32 i)1833 static void std_chg_mos_gate(register struct gate_t *gp, register word32 i)
1834 {
1835  register int32 out_chg, gid;
1836 
1837  /* this sets __new_gateval to strength if out changed T */
1838  if (!chg_mos_instate(gp, i))
1839   { if (__ev_tracing) trace_gunchg(gp, i); return; }
1840 
1841  out_chg = TRUE;
1842  gid = gp->gmsym->el.eprimp->gateid;
1843  switch (gid) {
1844   case G_NMOS: __eval_nmos_gate(gp->gstate.wp[__inum]); break;
1845   case G_RNMOS: __eval_rnmos_gate(gp->gstate.wp[__inum]); break;
1846   case G_PMOS: __eval_pmos_gate(gp->gstate.wp[__inum]); break;
1847   case G_RPMOS: __eval_rpmos_gate(gp->gstate.wp[__inum]); break;
1848   default: __case_terr(__FILE__, __LINE__);
1849  }
1850  if (__new_gateval == __old_gateval) out_chg = FALSE;
1851 
1852  if (__ev_tracing) evtr_prop_gatechg(gp, i, out_chg);
1853  else
1854   {
1855    if (gp->g_delrep == DT_NONE)
1856     { if (out_chg) change_gate_outwire(gp); return; }
1857    prop_gatechg(gp, out_chg, FALSE);
1858   }
1859 }
1860 
1861 /*
1862  * evaluate a cmos gate - std not optimized version
1863  */
std_chg_cmos_gate(register struct gate_t * gp,register word32 i)1864 static void std_chg_cmos_gate(register struct gate_t *gp, register word32 i)
1865 {
1866  register int32 out_chg;
1867 
1868  if (!chg_cmos_instate(gp, i))
1869   { if (__ev_tracing) trace_gunchg(gp, i); return; }
1870 
1871  /* this sets __new_gateval to strength if out changed T */
1872  __eval_cmos_gate(gp);
1873  if (__new_gateval == __old_gateval) out_chg = FALSE; else out_chg = TRUE;
1874 
1875  if (__ev_tracing) evtr_prop_gatechg(gp, i, out_chg);
1876  else
1877   {
1878    if (gp->g_delrep == DT_NONE)
1879     { if (out_chg) change_gate_outwire(gp); return; }
1880    prop_gatechg(gp, out_chg, FALSE);
1881   }
1882 }
1883 
1884 /*
1885  * routine used during prep to determine and set gate in change routine
1886  * called for all including udp but not trans
1887  */
__set_gchg_func(struct gate_t * gp)1888 extern void __set_gchg_func(struct gate_t *gp)
1889 {
1890  int32 acc_class;
1891 
1892  switch ((byte) gp->g_class) {
1893   case GC_LOGIC:
1894    /* accelerate class is 2 for buf/not and 3 for any up to 4 logic gate */
1895    /* 0 for cannot accelerate */
1896    if (!__accelerate) gp->gchg_func = std_chg_logic_gate;
1897    else
1898     {
1899      acc_class = __get_acc_class(gp);
1900      switch ((byte) acc_class) {
1901       case ACC_STD: gp->gchg_func = std_chg_logic_gate; break;
1902       case ACC_BUFNOT: gp->gchg_func = acc_chg_bufnot; break;
1903       case ACC_STIBUFNOT: gp->gchg_func = acc_stichg_bufnot; break;
1904       case ACC_4IGATE: gp->gchg_func = acc_chg_4igate; break;
1905       case ACC_ST4IGATE: gp->gchg_func = acc_stichg_4igate; break;
1906       default: __case_terr(__FILE__, __LINE__);
1907      }
1908     }
1909    break;
1910   case GC_UDP: gp->gchg_func = std_chg_udp_gate; break;
1911   case GC_BUFIF: gp->gchg_func = std_chg_bufif_gate; break;
1912   case GC_MOS: gp->gchg_func = std_chg_mos_gate; break;
1913   case GC_CMOS: gp->gchg_func = std_chg_cmos_gate; break;
1914   default: __case_terr(__FILE__, __LINE__);
1915  }
1916 }
1917 
1918 /*
1919  * routine to turn off acceleration for logic gates when gate out terminal
1920  * value change call back registered
1921  */
__logic_acc_off(struct gate_t * gp)1922 extern void __logic_acc_off(struct gate_t *gp)
1923 {
1924  if (gp->g_class == GC_LOGIC && gp->gchg_func != std_chg_logic_gate)
1925   gp->gchg_func = std_chg_logic_gate;
1926 }
1927 
1928 /*
1929  * for gate that because of added vpi driver needs to be chaned to fi>1
1930  * set the standard unoptimized gate assign routine
1931  */
__vpi_set_chg_proc(struct gate_t * gp)1932 extern void __vpi_set_chg_proc(struct gate_t *gp)
1933 {
1934  /* if non logic gate, never optimized so can just use the std */
1935  if (gp->g_class == GC_LOGIC)
1936   {
1937    if (gp->gchg_func != std_chg_logic_gate)
1938     {
1939      gp->gchg_func = std_chg_logic_gate;
1940     }
1941   }
1942 }
1943 
1944 /*
1945  * return T if gate has accelerated action routine
1946  */
__gate_is_acc(struct gate_t * gp)1947 extern int32 __gate_is_acc(struct gate_t *gp)
1948 {
1949  if (gp->gchg_func == acc_chg_bufnot || gp->gchg_func == acc_stichg_bufnot
1950   || gp->gchg_func == acc_chg_4igate || gp->gchg_func == acc_stichg_4igate)
1951   return(TRUE);
1952  return(FALSE);
1953 }
1954 
1955 /*
1956  * propagate the gate change - normal version called when event tracing off
1957  * complicated because of spike analysis
1958  * this works for strength
1959  * this assumed old and new gateval globals set before here
1960  *
1961  * if gate or udp drives highz[01] strength used to access delay but gate
1962  * must drive actual value which is converted when assigned to wire
1963  * outchg for highz[01] not changed since same hiz will be same 0 or 1
1964  *
1965  * show cancel e analysis uses gate output not possible hiz wire since 0/1
1966  * glitch will just map to hiz spikes
1967  *
1968  * only called if know has delay and know old gateval and new gateval
1969  * globals set
1970  */
prop_gatechg(register struct gate_t * gp,register int32 outchg,int32 is_acc)1971 static void prop_gatechg(register struct gate_t *gp, register int32 outchg,
1972  int32 is_acc)
1973 {
1974  i_tev_ndx tevpi;
1975  word64 gdel, schtim;
1976  struct tev_t *tevp;
1977 
1978  /* no pending scheduled event */
1979  if ((tevpi = gp->schd_tevs[__inum]) == -1)
1980   {
1981    /* case 1a: output changed */
1982    if (outchg)
1983     {
1984      /* if 0 or 1 and hiz strength gate need to use to hiz delay */
1985      if (gp->g_hasst && (__new_gateval & 2) == 0
1986       && __hizstren_del_tab[gp->g_stval] == 1)
1987       __hizstrengate_getdel(&gdel, gp);
1988      else __get_del(&gdel, gp->g_du, gp->g_delrep);
1989 
1990      schtim = __simtime + gdel;
1991      schedule_1gev(gp, gdel, schtim, is_acc);
1992     }
1993    /* if output did not change, nothing to do */
1994    return;
1995   }
1996 
1997  /* need time of new value scheduled change for this analysis */
1998  if (gp->g_hasst && (__new_gateval & 2) == 0
1999   && __hizstren_del_tab[gp->g_stval] == 1) __hizstrengate_getdel(&gdel, gp);
2000  else __get_del(&gdel, gp->g_du, gp->g_delrep);
2001 
2002  schtim = __simtime + gdel;
2003 
2004  /* pending event */
2005  tevp = &(__tevtab[tevpi]);
2006  /* new and old same - scheduled different - real pulse/glitch */
2007  if (!outchg)
2008   {
2009    if (__warn_cancel_e && !__no_warns && !__em_suppr(592))
2010     emit_pulsewarn(gp, tevp, &(tevp->etime), &schtim, "drives glitch");
2011 
2012    /* if spike, suppress future but schedule to x at currently scheduled */
2013    if (__show_cancel_e)
2014     {
2015      if (__showe_onevent)
2016       { tevp->outv = get_showcancele_val(gp); return; }
2017 
2018      /* immediate assign then cancel */
2019 on_detect_show_x:
2020      __new_gateval = get_showcancele_val(gp);
2021      if (tevp->gev_acc) acc_evchg_gate_outwire(gp);
2022      else change_gate_outwire(gp);
2023      /* newly scheduled to same so no event */
2024      tevp->te_cancel = TRUE;
2025      __inertial_cancels++;
2026      gp->schd_tevs[__inum] = -1;
2027      return;
2028     }
2029    /* newly scheduled to same so no event */
2030    tevp->te_cancel = TRUE;
2031    __inertial_cancels++;
2032    gp->schd_tevs[__inum] = -1;
2033    return;
2034   }
2035  /* new schedule to same value case */
2036  /* know that delay same and later so just discard new event */
2037  /* done silently here - trace message only */
2038  if (tevp->outv == (byte) __new_gateval) return;
2039 
2040  /* normal inertial reschedule */
2041  if (__warn_cancel_e && !__no_warns && !__em_suppr(592))
2042   emit_pulsewarn(gp, tevp, &(tevp->etime), &schtim, "unstable");
2043 
2044  if (__show_cancel_e)
2045   {
2046    if (__showe_onevent) { tevp->outv = get_showcancele_val(gp); return; }
2047    goto on_detect_show_x;
2048   }
2049  /* reschedule - handles cancel */
2050  tevpi = reschedule_1gev(tevpi, gdel, schtim, __new_gateval, is_acc);
2051 }
2052 
2053 /*
2054  * propagate the gate change
2055  * complicated because of show cancel e analysis
2056  *
2057  * notice new gateval and old gateval set and has strength value if present
2058  * see prop gatechg routines for more comments
2059  * unlike prop_gatechg, this must be called for DT_NONE assigns
2060  * all events scheduled from here must not set event accelerate bit
2061  *
2062  * SJM 11/27/00 - this always calls or schedules change gate outwire where
2063  * the gate terminal call back is checked so do not need separate code
2064  */
evtr_prop_gatechg(register struct gate_t * gp,register word32 i,int32 outchg)2065 static void evtr_prop_gatechg(register struct gate_t *gp, register word32 i,
2066  int32 outchg)
2067 {
2068  i_tev_ndx tevpi;
2069  word64 gdel, schtim;
2070  struct tev_t *tevp;
2071  char vs1[10], vs2[10], vs3[10];
2072  char s1[RECLEN], s2[RECLEN];
2073 
2074  __tr_msg("-- %s gate %s input %d changed to %s:\n",
2075   gp->gmsym->synam, to_evtronam(s1, gp->gsym->synam, __inst_ptr,
2076   (struct task_t *) NULL), i, __to_ginam(vs1, gp, __new_inputval, i));
2077 
2078  /* case 0: gate has no delay - not even #0 */
2079  if (gp->g_delrep == DT_NONE)
2080   {
2081    if (!outchg) { __tr_msg(" NODEL, NOCHG\n"); return; }
2082 
2083    /* this assigns or schedules the 1 bit net change */
2084    __tr_msg(" NODEL <OV=%s, NV=%s>\n",
2085     __to_gonam(vs1, gp, __old_gateval), __to_gonam(vs2, gp, __new_gateval));
2086    change_gate_outwire(gp);
2087    return;
2088   }
2089 
2090  /* need time of new value scheduled change for this analysis */
2091  if (gp->g_hasst && (__new_gateval & 2) == 0
2092   && __hizstren_del_tab[gp->g_stval] == 1) __hizstrengate_getdel(&gdel, gp);
2093  else __get_del(&gdel, gp->g_du, gp->g_delrep);
2094  schtim = __simtime + gdel;
2095 
2096  /* case 1: no pending scheduled event */
2097  if ((tevpi = gp->schd_tevs[__inum]) == -1)
2098   {
2099    /* output did not change */
2100    if (!outchg)
2101     {
2102      /* no net change and must cancel any already scheduled event */
2103      __tr_msg(" DEL, NOCHG <OV=%s>\n",
2104       __to_gonam(vs1, gp, __old_gateval));
2105      return;
2106     }
2107    __tr_msg(" DEL, SCHD AT %s <OV=%s, NSV=%s>\n",
2108     __to_timstr(s1, &schtim), __to_gonam(vs1, gp, __old_gateval),
2109     __to_gonam(vs2, gp, __new_gateval));
2110    /* schedule */
2111    schedule_1gev(gp, gdel, schtim, FALSE);
2112    return;
2113   }
2114  /* pending event */
2115  tevp = &(__tevtab[tevpi]);
2116  /* new and old same - scheduled different - real pluse/glitch */
2117  if (!outchg)
2118   {
2119    if (__warn_cancel_e && !__no_warns && !__em_suppr(592))
2120     emit_pulsewarn(gp, tevp, &(tevp->etime), &schtim, "drives glitch");
2121 
2122    /* if spike on set to x at time of previous change not inertial */
2123    if (__show_cancel_e)
2124     {
2125      if (__showe_onevent) sprintf(s2, "%s (on event)", __to_timstr(__xs,
2126       &(tevp->etime)));
2127      else sprintf(s2, "%s (on detect)", __to_timstr(__xs, &__simtime));
2128      __tr_msg(
2129       " DEL, PEND AT %s PULSE <OV=NSV=%s, OSV=%s SHOWING X AT %s MAYBE SWITCHED>\n",
2130       __to_timstr(s1, &(tevp->etime)), __to_gonam(vs1, gp, __old_gateval),
2131       __to_gonam(vs2, gp, tevp->outv), s2);
2132 
2133      if (__showe_onevent)
2134       { tevp->outv = get_showcancele_val(gp); return; }
2135 
2136      /* immediate assign then cancel */
2137 on_detect_show_x:
2138      __new_gateval = get_showcancele_val(gp);
2139      if (tevp->gev_acc) acc_evchg_gate_outwire(gp);
2140      else change_gate_outwire(gp);
2141      /* newly scheduled to same so no event */
2142      tevp->te_cancel = TRUE;
2143      __inertial_cancels++;
2144      gp->schd_tevs[__inum] = -1;
2145      return;
2146     }
2147    /* newly scheduled to same (pulse) so no event */
2148    tevp->te_cancel = TRUE;
2149    __inertial_cancels++;
2150    gp->schd_tevs[__inum] = -1;
2151    /* SJM 01/21/02 - msg unclear since new sched value was missing */
2152    __tr_msg(" DEL, PEND, PULSE, INERTIAL CANCEL AT %s <OV=%s, OSV=%s NSV=%s>\n",
2153     __to_timstr(s1, &(tevp->etime)), __to_gonam(vs1, gp, __old_gateval),
2154     __to_gonam(vs2, gp, tevp->outv), __to_gonam(vs3, gp, __new_gateval));
2155    return;
2156   }
2157 
2158  /* new schedule to same value case */
2159  /* know that delay same and later so just discard new event */
2160  /* done silently here - trace message only */
2161  if (tevp->outv == (byte) __new_gateval)
2162   {
2163    __tr_msg(
2164     " DEL, MODEL ANOMALLY IGNORE SCHED TO SAME <OSV=NSV=%s> OLD AT %s NEW %s\n",
2165     __to_gonam(vs1, gp, __new_gateval), __to_timstr(s1, &(tevp->etime)),
2166     __to_timstr(s2, &schtim));
2167    return;
2168   }
2169 
2170  /* normal inertial reschedule */
2171  if (__warn_cancel_e && !__no_warns && !__em_suppr(592))
2172   emit_pulsewarn(gp, tevp, &(tevp->etime), &schtim, "unstable");
2173 
2174  if (__show_cancel_e)
2175   {
2176    if (__showe_onevent) __to_timstr(s2, &schtim);
2177    else __to_timstr(s2, &__simtime);
2178    if (__showe_onevent) sprintf(s1, "%s (on event)", __to_timstr(__xs,
2179     &(tevp->etime)));
2180    else sprintf(s1, "%s (on detect)", __to_timstr(s1, &__simtime));
2181    __tr_msg(
2182     " DEL, PEND AT %s, UNSTABLE <OV=%s, OSV=%s, NSV=%s SHOWING X AT %s MAYBE SWITCHED>\n",
2183     __to_timstr(s1, &(tevp->etime)), __to_gonam(vs1, gp, __old_gateval),
2184     __to_gonam(vs2, gp, tevp->outv), __to_gonam(vs3, gp, __new_gateval), s2);
2185 
2186    if (__showe_onevent) { tevp->outv = get_showcancele_val(gp); return; }
2187    goto on_detect_show_x;
2188   }
2189 
2190  __tr_msg(" DEL, PEND, UNSTABLE RESCHD <OV=%s, OSV=%s AT %s, NSV=%s AT %s>\n",
2191   __to_gonam(vs1, gp, __old_gateval),
2192   __to_gonam(vs2, gp, tevp->outv), __to_timstr(s1, &(tevp->etime)),
2193   __to_gonam(vs3, gp, __new_gateval), __to_timstr(s2, &schtim));
2194  tevpi = reschedule_1gev(tevpi, gdel, schtim, __new_gateval, FALSE);
2195 }
2196 
2197 /*
2198  * compute show cancel x value depending on gate class and strength
2199  *
2200  * idea is to change event change to value but time still start of
2201  * region after place where gate may or may not have switched
2202  */
get_showcancele_val(struct gate_t * gp)2203 static word32 get_showcancele_val(struct gate_t *gp)
2204 {
2205  int32 nd_stren = FALSE;
2206 
2207  switch ((byte) gp->g_class) {
2208   /* these never have strength value */
2209   case GC_LOGIC: case GC_UDP:
2210    if (gp->g_hasst) nd_stren = TRUE;
2211    break;
2212   case GC_BUFIF: nd_stren = TRUE; break;
2213   case GC_MOS: case GC_CMOS:
2214    /* LOOKATME - since mos gates pass strength for now driving strong x */
2215    /* maybe could take strength from input? and leave z */
2216    return(ST_STRONGX);
2217   /* for tranif input spike sched. only, x is turned off (0) */
2218   case GC_TRANIF: return(0);
2219   default: __case_terr(__FILE__, __LINE__);
2220  }
2221  /* notice no need to correct for possible highz strength since value x */
2222  /* SJM 08/07/01 - this works because with val x need both 0 and 1 strens */
2223  if (nd_stren) return((gp->g_stval << 2) | 3);
2224  return(3);
2225 }
2226 
2227 /*
2228  * emit an object name for tracing with path
2229  * cannot use __xs in here
2230  */
to_evtronam(char * s,char * onam,struct itree_t * teitp,struct task_t * tskp)2231 static char *to_evtronam(char *s, char *onam, struct itree_t *teitp,
2232  struct task_t *tskp)
2233 {
2234  char s1[RECLEN], s2[RECLEN];
2235 
2236  sprintf(s, "%s.%s", __msg_blditree(s2, teitp, tskp), __schop(s1, onam));
2237  return(s);
2238 }
2239 
2240 /*
2241  * schedule 1 gate event
2242  * expects __new_gateval to contain value to schedule to
2243  */
schedule_1gev(register struct gate_t * gp,word64 gdel,word64 schtim,int32 is_acc)2244 static i_tev_ndx schedule_1gev(register struct gate_t *gp, word64 gdel,
2245  word64 schtim, int32 is_acc)
2246 {
2247  register i_tev_ndx tevpi;
2248  register struct tev_t *tevp;
2249 
2250  alloc_tev_(tevpi, TE_G, __inst_ptr, schtim);
2251  if (gdel == 0ULL)
2252   {
2253    /* this is #0, but must still build tev */
2254    if (__p0_te_hdri == -1) __p0_te_hdri = __p0_te_endi = tevpi;
2255    else { __tevtab[__p0_te_endi].tenxti = tevpi; __p0_te_endi = tevpi; }
2256   }
2257  else __insert_event(tevpi);
2258 
2259  gp->schd_tevs[__inum] = tevpi;
2260  tevp = &(__tevtab[tevpi]);
2261  tevp->tu.tegp = gp;
2262  /* if logic or udp, no strength, event processing assign will handle */
2263  /* if bufif, mos or cmos, know has strength, if tranif conducting state */
2264  tevp->outv = (byte) __new_gateval;
2265  /* acc. store only if gate is acc. and no conn. wire delay (resist off) */
2266  /* is acc T only if called from acc routine because acc routine never */
2267  /* possible for real resistive mos or tran */
2268  /* for mos with delay, can get here with g pdst on but is acc off */
2269  tevp->gev_acc = (is_acc && !gp->g_pdst);
2270  return(tevpi);
2271 }
2272 
2273 /*
2274  * take event and new value and either update if time same or cancel and
2275  * create new event if later
2276  */
reschedule_1gev(i_tev_ndx tevpi,word64 gdel,word64 newtim,word32 newoutv,int32 is_acc)2277 static i_tev_ndx reschedule_1gev(i_tev_ndx tevpi, word64 gdel, word64 newtim,
2278  word32 newoutv, int32 is_acc)
2279 {
2280  struct tev_t *tevp;
2281 
2282  tevp = &(__tevtab[tevpi]);
2283  /* if del == 0 (pnd0), will always be same time reschedule */
2284  if (gdel == 0ULL)
2285   {
2286    /* new value replaces old - must also be in event */
2287    __newval_rescheds++;
2288    tevp->outv = (byte) newoutv;
2289    return(tevpi);
2290   }
2291 
2292  /* case 3c-3 - more in future, cancel and reschedule */
2293  tevp->te_cancel = TRUE;
2294  __inertial_cancels++;
2295  tevpi = schedule_1gev(tevp->tu.tegp, gdel, newtim, is_acc);
2296  tevp->outv = (byte) newoutv;
2297  return(tevpi);
2298 }
2299 
2300 /*
2301  * emit the pulse (inertial reschedule) warning if not turned off
2302  * types are drives for normal spike and unstable for change that does not
2303  * do anything but interfere with transition
2304  */
emit_pulsewarn(struct gate_t * gp,struct tev_t * tevp,word64 * etim,word64 * newetim,char * sptnam)2305 static void emit_pulsewarn(struct gate_t *gp, struct tev_t *tevp,
2306  word64 *etim, word64 *newetim, char *sptnam)
2307 {
2308  char s1[RECLEN], s2[RECLEN], s3[10], s4[10], s5[10];
2309 
2310  /* must turn on spike analysis */
2311  if (__show_cancel_e)
2312   {
2313    if (__showe_onevent) strcpy(s1, " - edge event to x");
2314    else strcpy(s1, " - now detect to x");
2315   }
2316  else strcpy(s1, "");
2317 
2318  sprintf(s2, "old %s, scheduled %s, new %s%s",
2319   __to_gonam(s3, gp, __old_gateval), __to_gonam(s4, gp, tevp->outv),
2320   __to_gonam(s5, gp, __new_gateval), s1);
2321  /* notice spike means new and old the same */
2322  __gfwarn(592, gp->gsym->syfnam_ind, gp->gsym->sylin_cnt,
2323   "%s gate %s.%s %s (edge at %s replaced by new at %s) - %s",
2324   gp->gmsym->synam, __msg2_blditree(s1, tevp->teitp), gp->gsym->synam,
2325    sptnam, __to_timstr(__xs, etim), __to_timstr(__xs2, newetim), s2);
2326 }
2327 
2328 word32 __pow3tab[] = { 1, 3, 9, 27, 81, 243, 729, 2187, 6561, 19683, 59049 };
2329 
2330 /*
2331  * process a gate change event - new level has reached output
2332  * know thing connected to gate output is 1 bit (bit select if needed)
2333  * but maybe has strength
2334  */
process_gatechg_ev(register struct tev_t * tevp)2335 static void process_gatechg_ev(register struct tev_t *tevp)
2336 {
2337  register struct gate_t *gp;
2338 
2339  /* notice event here emitted in change gate outwire */
2340  gp = tevp->tu.tegp;
2341  __new_gateval = tevp->outv;
2342  if (__ev_tracing) emit_gev_trace(gp, tevp);
2343  gp->schd_tevs[__inum] = -1;
2344  /* this handle added the 1 bit net change */
2345  if (tevp->gev_acc) acc_evchg_gate_outwire(gp);
2346  else change_gate_outwire(gp);
2347 }
2348 
2349 /*
2350  * accelerated routine for assigning a xl type simple gate event
2351  *
2352  * if assigned to net fi>1 (maybe from run time vpi_put_value) or has
2353  * delay can not use accelerated event processing
2354  * this requires new value in new gateval global
2355  */
acc_evchg_gate_outwire(register struct gate_t * gp)2356 static void acc_evchg_gate_outwire(register struct gate_t *gp)
2357 {
2358  register struct expr_t *xp;
2359  register word32 uwrd, ngav, ngbv;
2360  register struct net_t *lhsnp;
2361  int32 biti, gwid;
2362  word32 igav, igbv;
2363 
2364  __immed_assigns++;
2365  ngav = __new_gateval & 1L;
2366  ngbv = __new_gateval >> 1;
2367  xp = gp->gpins[0];
2368  uwrd = (word32) gp->gstate.bp[__inum];
2369  if ((gwid = gp->gpnum) < 3)
2370   {
2371    /* update state with computed output value is bits 1 and 3 */
2372    uwrd = uwrd & ~(0xaL);
2373    uwrd |= ((ngav << 1) | (ngbv << 3));
2374   }
2375  else
2376   {
2377    /* mask off separated value bits to update output value in uwrd */
2378    uwrd = uwrd & ~(1L << (gwid - 1)) & ~(1L << (2*gwid - 1));
2379    /* works because ngav and ngbv exactly 1 low bit */
2380    uwrd |= ((ngav << (gwid - 1)) | (ngbv << (2*gwid - 1)));
2381   }
2382  gp->gstate.bp[__inum] = (byte) uwrd;
2383 
2384  /* accelerated assign to pin 0 (output) */
2385  if (xp->optyp == ID)
2386   {
2387    lhsnp = xp->lu.sy->el.enp;
2388    if (lhsnp->frc_assgn_allocated)
2389     {
2390      igav = ngav; igbv = ngbv;
2391      if (!__correct_forced_newwireval(lhsnp, &igav, &igbv))
2392       goto try_trace;
2393     }
2394    /* here since avoiding value store, need to add net change el. */
2395    chg_st_scalval_(lhsnp->nva.bp, ngav, ngbv);
2396    if (__lhs_changed) record_nchg_(lhsnp);
2397   }
2398  else
2399   {
2400    lhsnp = xp->lu.x->lu.sy->el.enp;
2401    biti = (int32) __contab[xp->ru.x->ru.xvi];
2402    /* if the 1 bit is forced nothing to do else normal assign */
2403    if (lhsnp->frc_assgn_allocated
2404     && __forced_inhibit_bitassign(lhsnp, xp->lu.x, xp->ru.x)) goto try_trace;
2405    /* notice this adds the net chg element if needed */
2406    __chg_st_bit(lhsnp, biti, ngav, ngbv);
2407   }
2408 try_trace:
2409  if (__ev_tracing) trace_chg_gateout(gp, xp);
2410 }
2411 
2412 /*
2413  * emit gate event process trace message
2414  */
emit_gev_trace(struct gate_t * gp,struct tev_t * tevp)2415 static void emit_gev_trace(struct gate_t *gp, struct tev_t *tevp)
2416 {
2417  char s1[RECLEN], s2[RECLEN], vs1[10];
2418 
2419  __evtr_resume_msg();
2420  if (gp->gpins[0]->x_multfi) strcpy(s2, "this driver of multiple:");
2421  else strcpy(s2, "the fi=1 driver:");
2422  __tr_msg("-- %s gate %s processing store event to output, %s %s\n",
2423   gp->gmsym->synam, to_evtronam(s1, gp->gsym->synam, tevp->teitp,
2424   (struct task_t *) NULL), s2, __to_gonam(vs1, gp, __new_gateval));
2425 }
2426 
2427 /*
2428  * add a net change record when entire net changes(usually scalar)
2429  *
2430  * add to end of next pass, netchg list elements
2431  * notice this needs itstk of target wire for xmr
2432  */
__add_nchglst_el(register struct net_t * np)2433 extern void __add_nchglst_el(register struct net_t *np)
2434 {
2435  register struct nchglst_t *nchglp;
2436 
2437  if (__nchgfreelst == NULL)
2438   nchglp = (struct nchglst_t *) __my_malloc(sizeof(struct nchglst_t));
2439  else
2440   {
2441    nchglp = __nchgfreelst;
2442    __nchgfreelst = __nchgfreelst->nchglnxt;
2443 
2444    /* DBG LINUX ADDME ??? */
2445    /* chk_nchgnlst(__nchgfreelst); */
2446    /* --- */
2447   }
2448 
2449  /* only turn on bit if all changed, each subrange goes on by itself */
2450  nchglp->chgnp = np;
2451  nchglp->nchg_itp = __inst_ptr;
2452  nchglp->bi1 = -1;
2453  nchglp->delayed_mipd = FALSE;
2454 
2455  /* all needed change info for this time slot now records */
2456  np->nchgaction[__inum] |= NCHG_ALL_CHGED;
2457 
2458  nchglp->nchglnxt = NULL;
2459  /* LOOKATME - maybe add dummy list element on front to avoid comparison */
2460  if (__nchg_futend != NULL)
2461   { __nchg_futend->nchglnxt = nchglp; __nchg_futend = nchglp; }
2462  else __nchg_futhdr = __nchg_futend = nchglp;
2463 
2464  /* DBG remove --- */
2465  if (__debug_flg && __ev_tracing)
2466   {
2467    __tr_msg("-- added net change element %s\n",
2468     __to_evtrwnam(__xs, np, -1, -1, __inst_ptr));
2469   }
2470  /* --- */
2471 }
2472 
2473 /*
2474  * add a net change record select range (usually bit) changed
2475  *
2476  * add to end of next pass, netchg list elements
2477  * notice this needs inst. loc of target wire for xmr
2478  */
__add_select_nchglst_el(register struct net_t * np,register int32 i1,register int32 i2)2479 extern void __add_select_nchglst_el(register struct net_t *np, register int32 i1,
2480  register int32 i2)
2481 {
2482  register struct nchglst_t *nchglp;
2483 
2484  if (__nchgfreelst == NULL)
2485   nchglp = (struct nchglst_t *) __my_malloc(sizeof(struct nchglst_t));
2486  else
2487   {
2488    nchglp = __nchgfreelst;
2489    __nchgfreelst = __nchgfreelst->nchglnxt;
2490    /* DBG LINUX ADDME */
2491    /* chk_nchgnlst(__nchgfreelst); */
2492    /* --- */
2493   }
2494 
2495  /* only turn on bit if all changed, each subrange goes on by itself */
2496  nchglp->chgnp = np;
2497  nchglp->nchg_itp = __inst_ptr;
2498  nchglp->bi1 = i1;
2499  nchglp->bi2 = i2;
2500  /* AIV 04/30/07 - was not init the delay_mipd flag */
2501  nchglp->delayed_mipd = FALSE;
2502 
2503  /* here since range not marked as all changed so will match ranges */
2504 
2505  /* link on end since good heuristic to process in change order */
2506  nchglp->nchglnxt = NULL;
2507  /* LOOKATME - maybe add dummy list element on front to avoid comparison */
2508  if (__nchg_futend != NULL)
2509   { __nchg_futend->nchglnxt = nchglp; __nchg_futend = nchglp; }
2510  else __nchg_futhdr = __nchg_futend = nchglp;
2511 
2512  /* DBG remove ---
2513  if (__debug_flg && __ev_tracing)
2514   {
2515    __tr_msg("-- added net change element %s\n",
2516     __to_evtrwnam(__xs, np, i1, i2, __inst_ptr));
2517   }
2518  --- */
2519 }
2520 
2521 /*
2522  * add a dumpvars change element
2523  * only called first time for entire net in time slot
2524  */
__add_dmpv_chglst_el(struct net_t * np)2525 extern void __add_dmpv_chglst_el(struct net_t *np)
2526 {
2527  register struct dvchgnets_t *dvchgp;
2528 
2529  if (__dv_netfreelst == NULL)
2530    dvchgp = (struct dvchgnets_t *) __my_malloc(sizeof(struct dvchgnets_t));
2531  else
2532   {
2533    dvchgp = __dv_netfreelst;
2534    __dv_netfreelst = __dv_netfreelst->dvchgnxt;
2535   }
2536  /* indicate for this time slot inst of var already changed once */
2537  np->nchgaction[__inum] &= ~(NCHG_DMPVNOTCHGED);
2538  /* set the net and link the change on the front */
2539  dvchgp->dvchg_np = np;
2540  dvchgp->dvchg_itp = __inst_ptr;
2541 
2542  /* link on front since order does not matter */
2543  dvchgp->dvchgnxt = __dv_chgnethdr;
2544  __dv_chgnethdr = dvchgp;
2545  /* must indicate need for end of slot dv processing */
2546  __slotend_action |= SE_DUMPVARS;
2547 }
2548 
2549 /*
2550  * assign gate output to driven wire
2551  * this assumes __new_gateval previously set
2552  * works for both 8 bit strength and 2 bit non strength values
2553  *
2554  * notice simultaneously when changing wire (or scheduling if wire has delay)
2555  * must store gate state output - needed since when evaluating driver for
2556  * gate with delay need previous until actual store
2557  */
change_gate_outwire(register struct gate_t * gp)2558 static void change_gate_outwire(register struct gate_t *gp)
2559 {
2560  register word32 *wp;
2561  register struct expr_t *xp;
2562  register int32 bi, wi;
2563  int32 schd_wire, nins, srep;
2564  hword *hwp;
2565  word32 av, bv;
2566  byte sb2, *sbp;
2567  word32 uwrd;
2568  struct xstk_t *xsp;
2569 
2570  __immed_assigns++;
2571  xp = gp->gpins[0];
2572  switch ((byte) gp->g_class) {
2573   case GC_LOGIC:
2574    /* SJM 02/07/01 - remove st gstate out routine since only for logic */
2575    /* this removes inner loop case stmt */
2576 
2577    /* must store new gate value into state here - value does not have stren */
2578    nins = gp->gpnum - 1;
2579    if (nins > 15) srep = SR_VEC; else srep = SR_PVEC;
2580    /* FIXME - why are these not wrd? */
2581    av = ((word32) __new_gateval) & 1;
2582    bv = ((word32) __new_gateval) >> 1;
2583    gate_st_bit(gp->gstate, nins + 1, nins, srep, av, bv);
2584    break;
2585   case GC_UDP:
2586    /* new gateval for udp does not have strength - maybe added in store */
2587    /* tricky part for wide udp's - must update running signature if present */
2588    __cur_udp = gp->gmsym->el.eudpp;
2589    nins = __cur_udp->numins;
2590    bi = 2*nins;
2591    /* comb means no state - edge always has state */
2592    if (__cur_udp->u_wide)
2593     {
2594      wp = &(gp->gstate.wp[2*__inum]);
2595      /* update running 2nd state signature word32 */
2596      if (__cur_udp->utyp != U_COMB)
2597       {
2598        /* need to access old gate value (out about to change) */
2599        /* since for event schedule will not be stored */
2600        /* during initialize this will be meaningless 0 */
2601        __old_gateval = (wp[0] >> (2*nins)) & 3L;
2602 
2603        /* correct running index of output since part of state if not comb. */
2604        /* num ins is index of state */
2605        /* subtract off old contribution of state output */
2606        wp[1] -= ((__old_gateval == 3) ? 2 : __old_gateval)*__pow3tab[nins];
2607        /* add in new contribution of state output */
2608        wp[1] += ((__new_gateval == 3) ? 2 : __new_gateval)*__pow3tab[nins];
2609       }
2610      /* update first value word32 */
2611      wp[0] &= ~(3L << bi);
2612      wp[0] |= (__new_gateval << bi);
2613     }
2614    /* do the the gate state output store */
2615    /* udp state stored as nins 2 bit vals + out/state 2 bits */
2616    else
2617     {
2618      hwp = &(gp->gstate.hwp[__inum]);
2619      /* -- RELASE remove
2620      if (__debug_flg && __ev_tracing)
2621       __tr_msg("-- st udp out old %x\n", *hwp);
2622      -- */
2623      *hwp &= ~(3 << bi);
2624      *hwp |= (hword) (__new_gateval << bi);
2625      /* -- RELEASE remove ---
2626      if (__debug_flg && __ev_tracing)
2627       __tr_msg(" new %x\n", *hwp);
2628      -- */
2629     }
2630    break;
2631   case GC_BUFIF:
2632    /* store new gate val into bufif state - here must merge in stren */
2633    hwp = &(gp->gstate.hwp[__inum]);
2634    hwp[0] &= ~(0xff << 4);
2635    hwp[0] |= ((hword) (__new_gateval << 4));
2636    goto do_hasstren_assign;
2637   case GC_MOS:
2638    /* store new gate val into mos state */
2639    wp = &(gp->gstate.wp[__inum]);
2640    wp[0] &= ~(0xff << 16);
2641    wp[0] |= (__new_gateval << 16);
2642    goto do_hasstren_assign;
2643   case GC_CMOS:
2644    /* store new gate val into cmos state */
2645    wp = &(gp->gstate.wp[__inum]);
2646    wp[0] &= ~(0xff << 24);
2647    wp[0] |= (__new_gateval << 24);
2648 
2649 do_hasstren_assign:
2650    /* then assign - here strength variable and new gateval has strength */
2651    if (xp->x_multfi) __mdr_assign_or_sched(xp);
2652    else
2653     {
2654      if (xp->lhsx_ndel && !__wire_init) schd_wire = TRUE;
2655      else schd_wire = FALSE;
2656 
2657      /* 07/08/00 - if gate (always 1 bit) drives wider vec must initialize */
2658      /* other bits to z since only has 1 driver */
2659      push_xstk_(xsp, 4*xp->szu.xclen);
2660      sbp = (byte *) xsp->ap;
2661      set_byteval_(sbp, xp->szu.xclen, ST_HIZ);
2662      /* set the low bit */
2663      sbp[0] = (byte) __new_gateval;
2664      __exec_conta_assign(xp, (word32 *) sbp, (word32 *) NULL, schd_wire);
2665      __pop_xstk();
2666     }
2667    goto done;
2668   case GC_TRANIF:
2669    /* out wire here is conducting state from 3rd input */
2670    /* SJM 12/13/00 - serious malloc bug was using bit ofset */
2671    wi = get_wofs_(2*__inum);
2672    bi = get_bofs_(2*__inum);
2673    /* 2 bits give conducting state */
2674    if (__new_gateval == 2) __new_gateval = 3;
2675    gp->gstate.wp[wi] &= ~(3L << bi);
2676    gp->gstate.wp[wi] |= (__new_gateval << bi);
2677    if (__ev_tracing)
2678     {
2679      __tr_msg("-- relaxing %s in switch channel\n",
2680       __gstate_tostr(__xs, gp, TRUE));
2681     }
2682    /* tranif enable changed must evaluate channel */
2683    /* LOOKATME think this only needs to be called if from/to 0, x same as 1 */
2684    /* SJM 04/11/00 - put back so immediately perturb both terminal vertices */
2685    __immed_eval_trifchan(gp);
2686    return;
2687   /* tran can never get here */
2688   default: __case_terr(__FILE__, __LINE__);
2689  }
2690 
2691  /* store gate or udp where value maybe needs constant strength added */
2692  /* gate state now updated, must assign to wire or schedule assign */
2693  if (xp->x_multfi) __mdr_assign_or_sched(xp);
2694  else
2695   {
2696    /* even though decl. no need for z extend since 1 bit max. wide */
2697    if (xp->lhsx_ndel && !__wire_init) schd_wire = TRUE;
2698    else schd_wire = FALSE;
2699    /* notice lhs cannot be concat here and know source and dest 1 bit */
2700    if (xp->x_stren)
2701     {
2702      /* here any strength constant */
2703      /* notice always need to add strength logic gate cannot drive z */
2704      if (__new_gateval == 2) sb2 = 2;
2705      else
2706       {
2707        /* notice tran that uses g st val for mark never goes here */
2708        uwrd = __new_gateval | (gp->g_stval << 2);
2709        uwrd = (word32) __stren_map_tab[uwrd];
2710        sb2 = (byte) uwrd;
2711       }
2712      /* 07/08/00 - if gate (always 1 bit) drives wider vec must initialize */
2713      /* other bits to z since only has 1 driver */
2714      push_xstk_(xsp, 4*xp->szu.xclen);
2715      sbp = (byte *) xsp->ap;
2716      set_byteval_(sbp, xp->szu.xclen, ST_HIZ);
2717      /* set the low bit */
2718      sbp[0] = sb2;
2719      __exec_conta_assign(xp, (word32 *) sbp, (word32 *) NULL, schd_wire);
2720      __pop_xstk();
2721     }
2722    else
2723     {
2724      av = __new_gateval & 1L;
2725      bv = __new_gateval >> 1;
2726      /* assign needed although only bit select or 1 bit wire, can be xmr */
2727      __exec_conta_assign(xp, &av, &bv, schd_wire);
2728     }
2729   }
2730 
2731 done:
2732  /* SJM 11/27/00 - know out changed, state has been updated and strength */
2733  /* competition done to set new wire value - this call back monitors the */
2734  /* gate state so it does ont matter if after wire changed */
2735  if (__have_vpi_gateout_cbs)
2736   {
2737    int32 gi, tevpi;
2738 
2739    gi = gp - __inst_mod->mgates;
2740    if (__inst_mod->mgateout_cbs != NULL && __inst_mod->mgateout_cbs[gi] != NULL
2741     && (tevpi = __inst_mod->mgateout_cbs[gi][__inum]) != -1)
2742     {
2743      __exec_vpi_gateoutcbs(tevpi);
2744     }
2745   }
2746 
2747  if (__ev_tracing) trace_chg_gateout(gp, xp);
2748 }
2749 
2750 /*
2751  * trace message after change gate outwire
2752  */
trace_chg_gateout(struct gate_t * gp,struct expr_t * xp)2753 static void trace_chg_gateout(struct gate_t *gp, struct expr_t *xp)
2754 {
2755  char s1[RECLEN], s2[RECLEN], vs1[10], vs2[10];
2756 
2757  if (xp->lhsx_ndel && !__wire_init) strcpy(vs1, "schedule");
2758  else strcpy(vs1, "assign");
2759  if (gp->g_class == GC_UDP) strcpy(vs2, "udp"); else strcpy(vs2, "gate");
2760  __tr_msg("-- %s %s event output %s, %s to %s\n", vs1,
2761   __gstate_tostr(__xs, gp, TRUE), vs2, __to_gassign_str(s1, xp),
2762   __msgexpr_tostr(s2, xp));
2763 }
2764 
2765 /* SJM 02/07/01 - removed st gstate out routine - moved only used logic */
2766 
2767 /*
2768  * store into coded wp of length blen at biti for current instance
2769  * that is stored according to srep format from low 2 bits of rgap
2770  * notice bits are separated into a and b parts in gate representation
2771  *
2772  * this differs from lhs bit select in accessing value for current instance
2773  * and adjusting place to select from according to storage representation
2774  * cannot be used to access array or strength value and called
2775  * with know good index (not -1)
2776  * this is for logic gates only
2777  */
gate_st_bit(union pck_u pckv,int32 blen,int32 biti,int32 srep,register word32 av,register word32 bv)2778 static void gate_st_bit(union pck_u pckv, int32 blen, int32 biti, int32 srep,
2779  register word32 av, register word32 bv)
2780 {
2781  register word32 uwrd, ouwrd;
2782  word32 *rap;
2783  int32 wlen;
2784 
2785  /* this is same as full value store - biti 0 or will not get here */
2786  switch ((byte) srep) {
2787   case SR_SCAL: gate_st_scalval(pckv.wp, av, bv); return;
2788   case SR_VEC:
2789    wlen = wlen_(blen);
2790    /* rap is base of vector for current inst */
2791    rap = &(pckv.wp[2*wlen*__inum]);
2792    __lhsbsel(rap, biti, av);
2793    __lhsbsel(&(rap[wlen]), biti, bv);
2794    return;
2795   case SR_PVEC:
2796    /* SJM 12/19/99 - notice gates still packed into bp, hwp, wp not just word32 */
2797    ouwrd = get_packintowrd_(pckv, __inum, blen);
2798    uwrd = ouwrd & ~(1L << biti) & ~(ouwrd & (1L << (blen + biti)));
2799    uwrd |= ((av & 1L) << biti) | ((bv & 1L) << (blen + biti));
2800    if (uwrd != ouwrd)
2801     {
2802      st_packintowrd_(pckv, __inum, uwrd, blen);
2803     }
2804    return;
2805   default: __case_terr(__FILE__, __LINE__);
2806  }
2807 }
2808 
2809 /*
2810  * store a gate packed into 2 bits scalar
2811  * coded as 2 contiguous bits per instance
2812  * assuming shift by 0 legal and gets right answer in C
2813  */
gate_st_scalval(register word32 * wp,register word32 av,register word32 bv)2814 static void gate_st_scalval(register word32 *wp, register word32 av,
2815  register word32 bv)
2816 {
2817  register int32 bi;
2818  int32 dbi, dwi;
2819 
2820  bi = 2*__inum;
2821  dwi = get_wofs_(bi);
2822  dbi = get_bofs_(bi);
2823  wp[dwi] &= ~(3L << dbi);
2824  wp[dwi] |= ((av | (bv << 1)) << dbi);
2825 }
2826 
2827 /*
2828  * change input i part of gstate vector for mos style gate
2829  * strens passed thru so must load with stren even if driver no stren
2830  * returns false if new input value is same as old
2831  * this requires correct cur. itp
2832  */
chg_mos_instate(register struct gate_t * gp,word32 i)2833 static int32 chg_mos_instate(register struct gate_t *gp, word32 i)
2834 {
2835  register word32 uwrd;
2836  register struct expr_t *ndp;
2837  register byte *sbp;
2838  struct xstk_t *xsp;
2839 
2840  uwrd = gp->gstate.wp[__inum];
2841  /* split because change scheduled or assigned if only strength changed */
2842  if (i == 1)
2843   {
2844    ndp = gp->gpins[1];
2845    /* if port a reg cannot have strength, this will add strong */
2846    xsp = __ndst_eval_xpr(ndp);
2847    sbp = (byte *) xsp->ap;
2848    __new_inputval = sbp[0];
2849    __pop_xstk();
2850    __old_inputval = uwrd & 0xffL;
2851    if (__new_inputval == __old_inputval) return(FALSE);
2852    uwrd &= ~0xffL;
2853    uwrd |= __new_inputval;
2854   }
2855  else
2856   {
2857    /* control input should not have strength but if does ignored */
2858    xsp = __eval_xpr(gp->gpins[2]);
2859    __new_inputval = (xsp->ap[0] & 1L) | ((xsp->bp[0] & 1L) << 1);
2860    __pop_xstk();
2861    __old_inputval = (uwrd >> 8) & 3L;
2862    if (__new_inputval == __old_inputval) return(FALSE);
2863    uwrd &= ~(3L << 8);
2864    uwrd |= (__new_inputval << 8);
2865   }
2866  gp->gstate.wp[__inum] = uwrd;
2867  return(TRUE);
2868 }
2869 
2870 /*
2871  * change input i part of gstate vector for cmos 2 ctrl input style gate
2872  * strens passed thru so must load with stren even if driver no stren
2873  * returns false if new input value is same as old
2874  * this requires correct cur. itp
2875  *
2876  * format is 3 8 bit values (0th input data, 1 nmos in, 2 pmos in, 3 output)
2877  * but only 2 bits of 1st and 2nd control inputs used
2878  * notice input starts at 1 because output is pos. 0
2879  */
chg_cmos_instate(register struct gate_t * gp,word32 i)2880 static int32 chg_cmos_instate(register struct gate_t *gp, word32 i)
2881 {
2882  register word32 uwrd;
2883  register byte *sbp;
2884  register struct expr_t *ndp;
2885  register struct xstk_t *xsp;
2886 
2887  uwrd = gp->gstate.wp[__inum];
2888  /* split because change scheduled or assigned if only strength changed */
2889  if (i == 1)
2890   {
2891    ndp = gp->gpins[1];
2892    xsp = __ndst_eval_xpr(ndp);
2893    sbp = (byte *) xsp->ap;
2894    __new_inputval = sbp[0];
2895    __pop_xstk();
2896    __old_inputval = uwrd & 0xffL;
2897    if (__new_inputval == __old_inputval) return(FALSE);
2898    uwrd &= ~0xffL;
2899    uwrd |= __new_inputval;
2900   }
2901  else
2902   {
2903    /* control inputs should not have strength but if does removed */
2904    xsp = __eval_xpr(gp->gpins[i]);
2905    __new_inputval = (xsp->ap[0] & 1L) | ((xsp->bp[0] & 1L) << 1);
2906    __pop_xstk();
2907    if (i == 2)
2908     {
2909      /* n ctrl bits 15-8 */
2910      __old_inputval = (uwrd >> 8) & 3L;
2911      if (__new_inputval == __old_inputval) return(FALSE);
2912      uwrd &= ~(3L << 8);
2913      uwrd |= (__new_inputval << 8);
2914     }
2915    else
2916     {
2917      /* p ctrl bits 23-16 */
2918      __old_inputval = (uwrd >> 16) & 3L;
2919      if (__new_inputval == __old_inputval) return(FALSE);
2920      uwrd &= ~(3L << 16);
2921      uwrd |= (__new_inputval << 16);
2922     }
2923   }
2924  gp->gstate.wp[__inum] = uwrd;
2925  return(TRUE);
2926 }
2927 
2928 /*
2929  * TRANIF EVALUATION ROUTINES
2930  */
2931 
2932 /*
2933  * evaluate tranif when third enable port changes
2934  */
__eval_tranif_ld(register struct gate_t * gp,register int32 i)2935 extern void __eval_tranif_ld(register struct gate_t *gp, register int32 i)
2936 {
2937  int32 out_chg;
2938 
2939  /* DBG remove */
2940  if (i != 2) __arg_terr(__FILE__, __LINE__);
2941  /* --- */
2942  /* third in chged - schedule (if needed) conducting state chg */
2943  /* must always go through scheduling code for spike analysis */
2944  eval_tranif_onoff(gp);
2945  /* out_chg T if conducting state changed */
2946  out_chg = (__new_gateval != __old_gateval);
2947  /* must evaluate both sides */
2948  if (gp->g_delrep == DT_NONE)
2949   { if (out_chg) change_gate_outwire(gp); return; }
2950  prop_gatechg(gp, out_chg, FALSE);
2951 }
2952 
2953 /*
2954  * handle on (conducting) state checking and change for input state
2955  *
2956  * here for delay case gate value is pending conducting state (1 on, 0 off)
2957  * actual current conducting state is same as stored output wire value
2958  *
2959  * stored conducting state corrected for tranif1 and tranif0 (value
2960  * computed then reversed depnding on if1 or if0)
2961  */
eval_tranif_onoff(struct gate_t * gp)2962 static void eval_tranif_onoff(struct gate_t *gp)
2963 {
2964  register int32 wi, bi;
2965  register word32 cval;
2966  register struct xstk_t *xsp;
2967  int32 gateid;
2968 
2969  wi = get_wofs_(2*__inum);
2970  bi = get_bofs_(2*__inum);
2971  /* step 1: access old value */
2972  cval = gp->gstate.wp[wi];
2973  /* this is conducting state independent of if0 or if1 */
2974  __old_gateval = (cval >> bi) & 3L;
2975  /* step 2: compute new conducting value */
2976  xsp = __eval_xpr(gp->gpins[2]);
2977  __new_inputval = (xsp->ap[0] & 1L) | ((xsp->bp[0] & 1L) << 1);
2978  if (__new_inputval == 2) __new_inputval = 3;
2979  __pop_xstk();
2980  gateid = gp->gmsym->el.eprimp->gateid;
2981  /* exactly 4 types of tranif gates */
2982  if (gateid == G_TRANIF1 || gateid == G_RTRANIF1)
2983   __new_gateval = __new_inputval;
2984  else
2985   { __new_gateval = (__new_inputval == 0) ? 1
2986    : ((__new_inputval == 1) ? 0 : __new_inputval);
2987   }
2988 }
2989 
2990 /*
2991  * CONTINOUS ASSIGN EVENT ROUTINES
2992  */
2993 
2994 /*
2995  * load of changed wire is >1 bit conta rhs needs to be evaluated and
2996  * if delay scheduled else assigned and lhs added to net changes
2997  *
2998  * current itree element never changes in here
2999  * for now evaluating and scheduling - no optimization
3000  *
3001  * know rhs real illegal here since cannot assign to wire
3002  *
3003  * all computations done with non strength values since strength
3004  * added from conta type if needed when assigning to strength wire
3005  *
3006  * no pulse/glitch analysis here just inertial algorithm because
3007  * continuous assigns do not correspond to silicon rather modeling convention
3008  */
__eval_conta_rhs_ld(register struct net_pin_t * npp)3009 extern void __eval_conta_rhs_ld(register struct net_pin_t *npp)
3010 {
3011  register struct xstk_t *xsp, *xsp2;
3012  register struct conta_t *cap;
3013  int32 cv, cv2, schd_wire;
3014  int32 lhswlen, orhslen, lhslen;
3015  byte *sbp;
3016  word64 cadel, schtim;
3017  i_tev_ndx tevpi;
3018  struct expr_t *lhsxp;
3019  struct xstk_t *xsp3;
3020  struct conta_t *mast_cap;
3021 
3022  if (__ev_tracing) { evtr_eval_conta_rhs_ld(npp); return; }
3023 
3024  /* SJM 09/18/02 - no separate per bit NP type, just check for pb sim on */
3025  mast_cap = npp->elnpp.ecap;
3026  /* get pattern never decomposd per bit */
3027  if (mast_cap->lhsx->getpatlhs) { __process_getpat(mast_cap); return; }
3028 
3029  if (mast_cap->ca_pb_sim) cap = &(mast_cap->pbcau.pbcaps[npp->pbi]);
3030  else cap = mast_cap;
3031  /* know getpat never has delay */
3032  lhsxp = cap->lhsx;
3033  lhslen = cap->lhsx->szu.xclen;
3034  /* this pushes rhs new maybe to be scheduled value onto expression stack */
3035  xsp = __eval_xpr(cap->rhsx);
3036 
3037  /* here rhs must be z with fixed conta strength merged in if present */
3038  if (xsp->xslen != lhslen)
3039   {
3040    orhslen = xsp->xslen;
3041 
3042    /* SJM 09/29/03 - change to handle sign extension and separate types */
3043    if (xsp->xslen > lhslen) __narrow_sizchg(xsp, lhslen);
3044    else if (xsp->xslen < lhslen)
3045     {
3046      if (cap->rhsx->has_sign) __sgn_xtnd_widen(xsp, lhslen);
3047      else __sizchg_widen(xsp, lhslen);
3048     }
3049 
3050    /* SJM 07/09/03 - now understand XL algorithm to mimic - for conta */
3051    /* semantics requires rhs non stren eval with 0 widening then add stren */
3052    /* SJM 05/10/04 init x widending not related to new signed widening */
3053    if (__wire_init) __fix_widened_toxs(xsp, orhslen);
3054   }
3055 
3056  /* case 1: no delay assign */
3057  /* since 1 bit handled as gate, not checking for no change - just assign */
3058  /* also rhs eval. is driver (i.e. not stored) */
3059  if (mast_cap->ca_delrep == DT_NONE)
3060   {
3061    __immed_assigns++;
3062    /* in here deals with saved driver - if any lhs bits fi>1 all must be */
3063    /* SJM 09/28/02 - know if master fi>1 all per bit will be */
3064    if (lhsxp->x_multfi)
3065     {
3066      /* this packs if possible */
3067      __st_perinst_val(cap->ca_drv_wp, lhslen, xsp->ap, xsp->bp);
3068      __mdr_assign_or_sched(lhsxp);
3069     }
3070    else
3071     {
3072      /* here do not need drv and do not need schd driver, rhs is driver */
3073      if (lhsxp->lhsx_ndel && !__wire_init) schd_wire = TRUE;
3074      else schd_wire = FALSE;
3075      if (lhsxp->x_stren)
3076       {
3077        /* convert to strength bytes forms - add in driven from ca */
3078        push_xstk_(xsp2, 4*lhslen);
3079        sbp = (byte *) xsp2->ap;
3080        __st_standval(sbp, xsp, cap->ca_stval);
3081        if (lhsxp->optyp == LCB) __stren_exec_ca_concat(lhsxp, sbp, schd_wire);
3082        else __exec_conta_assign(lhsxp, xsp2->ap, xsp2->bp, schd_wire);
3083        __pop_xstk();
3084       }
3085      else
3086       {
3087        if (lhsxp->optyp == LCB)
3088         __exec_ca_concat(lhsxp, xsp->ap, xsp->bp, schd_wire);
3089        else __exec_conta_assign(lhsxp, xsp->ap, xsp->bp, schd_wire);
3090       }
3091     }
3092    __pop_xstk();
3093    return;
3094   }
3095 
3096  /* case 2: has delay */
3097  lhswlen = wlen_(lhslen);
3098  /* xsp2 is currently driving (old) value and must exist */
3099  /* DBG remove ---
3100  if (cap->ca_drv_wp.wp == NULL) __arg_terr(__FILE__, __LINE__);
3101  -- */
3102 
3103  push_xstk_(xsp2, lhslen);
3104  __ld_perinst_val(xsp2->ap, xsp2->bp, cap->ca_drv_wp, lhslen);
3105  tevpi = cap->caschd_tevs[__inum];
3106  cv = memcmp(xsp2->ap, xsp->ap, 2*lhswlen*WRDBYTES);
3107  __pop_xstk();
3108  /* case 2a: short circuit case no event and new and old same */
3109  if (tevpi == -1 && cv == 0) { __pop_xstk(); return; }
3110 
3111  /* compute delay - know at least one bit changed */
3112  __new_gateval = 1L;
3113  /* if 4v delay, must set new_gateval for use in delay selection */
3114  /* notice modified LRM if left hand side all x's, minimum delay is used */
3115  if (mast_cap->ca_4vdel)
3116   {
3117    if (mast_cap->ca_pb_sim)
3118     {
3119      struct xstk_t *mast_xsp;
3120 
3121      /* if 4v delay, must always eval entire conta rhs to select delay */
3122      mast_xsp = __eval_xpr(mast_cap->rhsx);
3123      if (vval_is0_(mast_xsp->ap, lhslen))
3124       {
3125        if (vval_is0_(mast_xsp->bp, lhslen)) __new_gateval = 0L;
3126        else if (__vval_is1(mast_xsp->bp, lhslen)) __new_gateval = 2L;
3127       }
3128      else if (__vval_is1(mast_xsp->ap, lhslen)
3129       && __vval_is1(mast_xsp->bp, lhslen)) { __new_gateval = 3L; }
3130      __pop_xstk();
3131     }
3132    else
3133     {
3134      if (vval_is0_(xsp->ap, lhslen))
3135       {
3136        if (vval_is0_(xsp->bp, lhslen)) __new_gateval = 0L;
3137        else if (__vval_is1(xsp->bp, lhslen)) __new_gateval = 2L;
3138       }
3139      else if (__vval_is1(xsp->ap, lhslen) && __vval_is1(xsp->bp, lhslen))
3140       { __new_gateval = 3L; }
3141     }
3142   }
3143  /* this may use new gateval global to select delay */
3144  /* SJM 09/28/02 - delay same for all so stored in master */
3145  __get_del(&cadel, mast_cap->ca_du, mast_cap->ca_delrep);
3146  schtim = __simtime + cadel;
3147 
3148  /* case 2b: no pending event and different */
3149  if (tevpi == -1)
3150   {
3151    /* case 1b: new value to schedule */
3152    /* know xsp is lhs width */
3153    schedule_1caev(cap, cadel, schtim, xsp);
3154    __pop_xstk();
3155    return;
3156   }
3157 
3158  push_xstk_(xsp3, lhslen);
3159  __ld_perinst_val(xsp3->ap, xsp3->bp, cap->schd_drv_wp, lhslen);
3160  /* compare currently scheduled to to new to be scheduled */
3161  /* if same do nothing since already schedule to right value and know */
3162  /* value will be later */
3163  cv2 = memcmp(xsp3->ap, xsp->ap, 2*lhswlen*WRDBYTES);
3164  __pop_xstk();
3165  if (cv2 == 0) { __pop_xstk(); return; }
3166 
3167  /* case 2c: pending event - no spike analysis for >1 bit contas */
3168  /* case 2c-1 - new and old the same - cancel */
3169  if (cv == 0)
3170   {
3171    __tevtab[tevpi].te_cancel = TRUE;
3172     __inertial_cancels++;
3173    cap->caschd_tevs[__inum] = -1;
3174   }
3175  /* case 2c-2 - new and old differ - reschedule latest input change */
3176  /* notice even for modeling anomally where latest input change leads to */
3177  /* earlier output event, use latest input change */
3178  else reschedule_1caev(tevpi, cadel, schtim, xsp);
3179 
3180  __pop_xstk();
3181 }
3182 
3183 /*
3184  * event tracing version of eval conta rhs
3185  */
evtr_eval_conta_rhs_ld(struct net_pin_t * npp)3186 static void evtr_eval_conta_rhs_ld(struct net_pin_t *npp)
3187 {
3188  int32 cv, schd_wire;
3189  int32 lhswlen, orhslen, lhslen;
3190  byte *sbp;
3191  word64 cadel, schtim;
3192  i_tev_ndx tevpi;
3193  struct tev_t *tevp;
3194  struct conta_t *cap, *mast_cap;
3195  struct xstk_t *xsp, *xsp2, *xsp3;
3196  struct expr_t *lhsxp;
3197  char s1[RECLEN], s2[RECLEN], s3[RECLEN], s4[RECLEN], s5[RECLEN];
3198 
3199  /* SJM 09/18/02 - no separate per bit NP type, just check for pb tab */
3200  mast_cap = npp->elnpp.ecap;
3201  /* know getpat never has delay and never decomposed per bit */
3202  if (mast_cap->lhsx->getpatlhs) { __process_getpat(mast_cap); return; }
3203 
3204  if (mast_cap->ca_pb_sim) cap = &(mast_cap->pbcau.pbcaps[npp->pbi]);
3205  else cap = mast_cap;
3206  lhsxp = cap->lhsx;
3207 
3208  if (mast_cap->ca_pb_sim)
3209   {
3210    __tr_msg("-- %s RHS bit %d changed:\n",
3211     __to_evtrcanam(__xs2, mast_cap, __inst_ptr), npp->pbi);
3212   }
3213  else
3214   {
3215    __tr_msg("-- %s RHS changed:\n",
3216     __to_evtrcanam(__xs2, mast_cap, __inst_ptr));
3217   }
3218  lhslen = cap->lhsx->szu.xclen;
3219  xsp = __eval_xpr(cap->rhsx);
3220 
3221  /* here rhs must be z with fixed conta strength merged in if present */
3222  if (xsp->xslen != lhslen)
3223   {
3224    orhslen = xsp->xslen;
3225 
3226    /* SJM 09/29/03 - change to handle sign extension and separate types */
3227    if (xsp->xslen > lhslen) __narrow_sizchg(xsp, lhslen);
3228    else if (xsp->xslen < lhslen)
3229     {
3230      if (cap->rhsx->has_sign) __sgn_xtnd_widen(xsp, lhslen);
3231      else __sizchg_widen(xsp, lhslen);
3232     }
3233    /* SJM 07/09/03 - now understand XL algorithm to mimic - if rhs is reg */
3234    /* or reg type expr, must widen with 0's (automatic) if net with zs */
3235    /* wire init is special case */
3236    /* SJM 05/10/04 init x widending not related to new signed widening */
3237    if (__wire_init) __fix_widened_toxs(xsp, orhslen);
3238   }
3239 
3240  /* case 1: no delay assign */
3241  /* since 1 bit handled as gate, not checking for no change - just assign */
3242  if (mast_cap->ca_delrep == DT_NONE)
3243   {
3244    __immed_assigns++;
3245    __tr_msg(" NODEL <NV=%s>\n",
3246     __regab_tostr(s1, xsp->ap, xsp->bp, lhslen, BHEX, FALSE));
3247    /* in here deals with saved driver */
3248 
3249    /* SJM 09/28/0-2 - know if master fi>1 all per bit will be */
3250    if (lhsxp->x_multfi)
3251     {
3252      __st_perinst_val(cap->ca_drv_wp, lhslen, xsp->ap, xsp->bp);
3253      __mdr_assign_or_sched(lhsxp);
3254     }
3255    else
3256     {
3257      /* here do not need drv and do not need schd driver, rhs is driver */
3258      if (lhsxp->lhsx_ndel && !__wire_init) schd_wire = TRUE;
3259      else schd_wire = FALSE;
3260      if (lhsxp->x_stren)
3261       {
3262        /* convert to strength bytes forms - add in driven from ca */
3263        push_xstk_(xsp2, 4*lhslen);
3264        sbp = (byte *) xsp2->ap;
3265        /* stren val also in PB */
3266        __st_standval(sbp, xsp, cap->ca_stval);
3267        if (lhsxp->optyp == LCB) __stren_exec_ca_concat(lhsxp, sbp, schd_wire);
3268        /* SJM 03/30/99 - was storing value without strength added */
3269        else __exec_conta_assign(lhsxp, xsp2->ap, xsp2->bp, schd_wire);
3270        __pop_xstk();
3271       }
3272      else
3273       {
3274        if (lhsxp->optyp == LCB)
3275         __exec_ca_concat(lhsxp, xsp->ap, xsp->bp, schd_wire);
3276        else __exec_conta_assign(lhsxp, xsp->ap, xsp->bp, schd_wire);
3277       }
3278     }
3279    __pop_xstk();
3280    return;
3281   }
3282 
3283  /* case 2: has delay */
3284  /* this is current (old) driving value */
3285  lhswlen = wlen_(lhslen);
3286  push_xstk_(xsp2, lhslen);
3287  __ld_perinst_val(xsp2->ap, xsp2->bp, cap->ca_drv_wp, lhslen);
3288  tevpi = cap->caschd_tevs[__inum];
3289  cv = memcmp(xsp2->ap, xsp->ap, 2*lhswlen*WRDBYTES);
3290  /* case 2a: short circuit case no event and new and old same */
3291  if (tevpi == -1 && cv == 0)
3292   {
3293    __tr_msg(" DEL, NOCHG <OV=%s>\n",
3294     __regab_tostr(s1, xsp2->ap, xsp2->bp, lhslen, BHEX, FALSE));
3295    __pop_xstk();
3296    __pop_xstk();
3297    return;
3298   }
3299 
3300  /* compute delay */
3301  __new_gateval = 1L;
3302  /* if 4v delay, must set new gateval for use in delay selection */
3303  /* SJM 09/28/02 - now match non evtr 4v case */
3304  if (mast_cap->ca_4vdel)
3305   {
3306    if (mast_cap->ca_pb_sim)
3307     {
3308      struct xstk_t *mast_xsp;
3309 
3310      /* if 4v delay, must always eval entire conta rhs to select delay */
3311      mast_xsp = __eval_xpr(mast_cap->rhsx);
3312      if (vval_is0_(mast_xsp->ap, lhslen))
3313       {
3314        if (vval_is0_(mast_xsp->bp, lhslen)) __new_gateval = 0L;
3315        else if (__vval_is1(mast_xsp->bp, lhslen)) __new_gateval = 2L;
3316       }
3317      else if (__vval_is1(mast_xsp->ap, lhslen)
3318       && __vval_is1(mast_xsp->bp, lhslen)) { __new_gateval = 3L; }
3319      __pop_xstk();
3320     }
3321    else
3322     {
3323      if (vval_is0_(xsp->ap, lhslen))
3324       {
3325        if (vval_is0_(xsp->bp, lhslen)) __new_gateval = 0L;
3326        else if (__vval_is1(xsp->bp, lhslen)) __new_gateval = 2L;
3327       }
3328      else if (__vval_is1(xsp->ap, lhslen) && __vval_is1(xsp->bp, lhslen))
3329       { __new_gateval = 3L; }
3330     }
3331   }
3332  /* this may use new gateval global to select delay */
3333  __get_del(&cadel, mast_cap->ca_du, mast_cap->ca_delrep);
3334  schtim = __simtime + cadel;
3335 
3336  /* case 2b: no pending event and different */
3337  if (tevpi == -1)
3338   {
3339    /* case 1b: new value to schedule */
3340    __tr_msg(" DEL, SCHD AT %s <OV=%s, NSV=%s>\n",
3341     __to_timstr(s1, &schtim),
3342     __regab_tostr(s2, xsp2->ap, xsp2->bp, lhslen, BHEX, FALSE),
3343     __regab_tostr(s3, xsp->ap, xsp->bp, lhslen, BHEX, FALSE));
3344 
3345    /* know xsp is lhs width */
3346    schedule_1caev(cap, cadel, schtim, xsp);
3347    __pop_xstk();
3348    __pop_xstk();
3349    return;
3350   }
3351 
3352  /* case 2c: pending event - no spike analysis for >1 bit contas */
3353  tevp = &(__tevtab[tevpi]);
3354  push_xstk_(xsp3, lhslen);
3355  __ld_perinst_val(xsp3->ap, xsp3->bp, cap->schd_drv_wp, lhslen);
3356 
3357  /* compare currently scheduled to to new to be scheduled */
3358  /* if same do nothing since already schedule to right value and know */
3359  /* value will be later */
3360  if (memcmp(xsp3->ap, xsp->ap, 2*lhswlen*WRDBYTES) == 0)
3361   {
3362    __tr_msg(
3363     " DEL, MODEL ANOMALLY IGNORE SCHED TO SAME <OSV=NSV=%s> OLD AT %s NEW %s\n",
3364     __regab_tostr(s1, xsp->ap, xsp->bp, lhslen, BHEX, FALSE),
3365     __to_timstr(s2, &(tevp->etime)), __to_timstr(s3, &schtim));
3366    goto done;
3367   }
3368 
3369  /* case 2c-1-a - new and old the same - remove inertial pulse */
3370  if (cv == 0)
3371   {
3372    /* cancel pending and return */
3373    __tr_msg(" DEL, PEND, SAME <OV=NSV=%s, OSV=%s AT %s INERTIAL CANCEL>\n",
3374     __regab_tostr(s1, xsp->ap, xsp->bp, lhslen, BHEX, FALSE),
3375     __regab_tostr(s2, xsp3->ap, xsp3->bp, lhslen, BHEX, FALSE),
3376     __to_timstr(s3, &(tevp->etime)));
3377    /* cancel */
3378    tevp->te_cancel = TRUE;
3379    __inertial_cancels++;
3380    cap->caschd_tevs[__inum] = -1;
3381    goto done;
3382   }
3383  /* case 2c-1-b - new and old differ */
3384  __tr_msg(
3385   " DEL, PEND, RESCHD <OV=%s, OSV=%s AT %s, NSV=%s AT %s REPLACES>\n",
3386   __regab_tostr(s1, xsp2->ap, xsp2->bp, lhslen, BHEX, FALSE),
3387   __regab_tostr(s2, xsp3->ap, xsp3->bp, lhslen, BHEX, FALSE),
3388   __to_timstr(s5, &(tevp->etime)),
3389   __regab_tostr(s3, xsp->ap, xsp->bp, lhslen, BHEX, FALSE),
3390   __to_timstr(s4, &schtim));
3391 
3392  /* reschedule by replacing (if same time) or cancelling */
3393  reschedule_1caev(tevpi, cadel, schtim, xsp);
3394 
3395 done:
3396  __pop_xstk();
3397  __pop_xstk();
3398  __pop_xstk();
3399 }
3400 
3401 /*
3402  * emit an continous assign locator
3403  * must make sure s at least IDLEN + MSG TRUNC LEN
3404  */
__to_evtrcanam(char * s,struct conta_t * cap,struct itree_t * teitp)3405 extern char *__to_evtrcanam(char *s, struct conta_t *cap,
3406  struct itree_t *teitp)
3407 {
3408  char s1[RECLEN], s2[RECLEN];
3409 
3410  sprintf(s, "continuous assign in %s %s", __msg2_blditree(s1, teitp),
3411   __bld_lineloc(s2, cap->casym->syfnam_ind, cap->casym->sylin_cnt));
3412  return(s);
3413 }
3414 
3415 /*
3416  * schedule 1 conta event
3417  * know schd_xsp width is exactly lhs width
3418  */
schedule_1caev(struct conta_t * cap,word64 cadel,word64 schtim,struct xstk_t * schd_xsp)3419 static void schedule_1caev(struct conta_t *cap, word64 cadel,
3420  word64 schtim, struct xstk_t *schd_xsp)
3421 {
3422  register i_tev_ndx tevpi;
3423 
3424  alloc_tev_(tevpi, TE_CA, __inst_ptr, schtim);
3425  if (cadel == 0ULL)
3426   {
3427    /* this is #0, but must still build tev */
3428    if (__p0_te_hdri == -1) __p0_te_hdri = __p0_te_endi = tevpi;
3429    else { __tevtab[__p0_te_endi].tenxti = tevpi; __p0_te_endi = tevpi; }
3430   }
3431  else __insert_event(tevpi);
3432 
3433  cap->caschd_tevs[__inum] = tevpi;
3434  __tevtab[tevpi].tu.tecap = cap;
3435  __st_perinst_val(cap->schd_drv_wp, schd_xsp->xslen, schd_xsp->ap,
3436   schd_xsp->bp);
3437 }
3438 
3439 /*
3440  * take ca event and new value and either update if time same or cancel and
3441  * create new event if later
3442  */
reschedule_1caev(i_tev_ndx tevpi,word64 cadel,word64 newtim,struct xstk_t * schd_xsp)3443 static void reschedule_1caev(i_tev_ndx tevpi, word64 cadel,
3444  word64 newtim, struct xstk_t *schd_xsp)
3445 {
3446  struct tev_t *tevp;
3447  struct conta_t *cap;
3448 
3449  tevp = &(__tevtab[tevpi]);
3450  /* if del == 0 (pnd0), will always be same time reschedule */
3451  cap = tevp->tu.tecap;
3452  if (cadel == 0ULL)
3453   {
3454    /* new scheduled value replaces old */
3455    __newval_rescheds++;
3456    /* know length must be the same */
3457    __st_perinst_val(cap->schd_drv_wp, schd_xsp->xslen, schd_xsp->ap,
3458     schd_xsp->bp);
3459    return;
3460   }
3461  /* cancel */
3462  tevp->te_cancel = TRUE;
3463  __inertial_cancels++;
3464  /* this will change the scheduled field so no need to set to nil */
3465  schedule_1caev(cap, cadel, newtim, schd_xsp);
3466 }
3467 
3468 /*
3469  * process a continous assign actual assignment (end of delay ev triggered)
3470  * 1 bit continuous assign are processed as gates per Verilog semantics
3471  * and not seen here
3472  *
3473  * will only get here if delay >= 0 (maybe #0)
3474  * SJM 09/28/02 - for rhs concat decomposed into PB, event ptr is PB
3475  */
process_conta_ev(register struct tev_t * tevp)3476 static void process_conta_ev(register struct tev_t *tevp)
3477 {
3478  register struct xstk_t *xsp, *xsp2;
3479  register struct conta_t *cap;
3480  int32 schd_wire, lhslen;
3481  byte *sbp;
3482  struct expr_t *lhsxp;
3483 
3484  cap = tevp->tu.tecap;
3485  lhsxp = cap->lhsx;
3486  lhslen = cap->lhsx->szu.xclen;
3487 
3488  push_xstk_(xsp, lhslen);
3489  __ld_perinst_val(xsp->ap, xsp->bp, cap->schd_drv_wp, lhslen);
3490  if (__ev_tracing)
3491   {
3492    struct conta_t *cap2;
3493 
3494    __evtr_resume_msg();
3495    if (cap->ca_pb_el) cap2 = cap->pbcau.mast_cap; else cap2 = cap;
3496    __to_evtrcanam(__xs, cap2, tevp->teitp);
3497    __regab_tostr(__xs2, xsp->ap, xsp->bp, lhslen, BHEX, FALSE);
3498    if (lhsxp->x_multfi)
3499     __tr_msg("-- %s event this driver of multiple:\n   %s\n", __xs, __xs2);
3500    else __tr_msg("-- %s event the fi=1 driver: %s\n", __xs, __xs2);
3501   }
3502  /* move value from scheduled to driver wp - never store constant strength */
3503  /* has delay so will always have ca drv wp */
3504  /* store scheduled value into driving value */
3505  __st_perinst_val(cap->ca_drv_wp, lhslen, xsp->ap, xsp->bp);
3506 
3507  if (lhsxp->x_multfi) __mdr_assign_or_sched(lhsxp);
3508  else
3509   {
3510    /* here do not need drv and do not need schd driver, rhs is driver */
3511    if (lhsxp->lhsx_ndel && !__wire_init) schd_wire = TRUE;
3512    else schd_wire = FALSE;
3513 
3514    if (lhsxp->x_stren)
3515     {
3516      /* convert to strength bytes forms - add in driven from ca */
3517      /* know all widths exactly  required lhs width */
3518      push_xstk_(xsp2, 4*lhslen);
3519      sbp = (byte *) xsp2->ap;
3520      /* notice stren value also in each per bit, just not delay */
3521      __st_standval(sbp, xsp, cap->ca_stval);
3522      if (lhsxp->optyp == LCB) __stren_exec_ca_concat(lhsxp, sbp, schd_wire);
3523      else __exec_conta_assign(lhsxp, xsp2->ap, xsp2->bp, schd_wire);
3524      __pop_xstk();
3525     }
3526    else
3527     {
3528      if (lhsxp->optyp == LCB)
3529       __exec_ca_concat(lhsxp, xsp->ap, xsp->bp, schd_wire);
3530      else __exec_conta_assign(lhsxp, xsp->ap, xsp->bp, schd_wire);
3531     }
3532   }
3533  __pop_xstk();
3534  cap->caschd_tevs[__inum] = -1;
3535  /* can just leave scheduled wire value - nothing to free */
3536 }
3537 
3538 /*
3539  * process a wire delay event - know this is always 1 bit
3540  * non inout path dest. scheduled wire changes processed here also
3541  *
3542  * since r,f or path delays will have different delays and inertial
3543  * conditions for every bit
3544  * know for scalar bi 0 not -1
3545  *
3546  * could possibly optimize one delay form
3547  * will only get here if wire has delay > 0 (or #0)
3548  * also know path source or destination can never have wire delay
3549  *
3550  * notice when wire changes must see if really changes by doing
3551  * fi>1 competition of right type using scheduled plus current
3552  * then know scheduled value really changed
3553  */
process_wire_ev(register struct tev_t * tevp)3554 static void process_wire_ev(register struct tev_t *tevp)
3555 {
3556  register int32 bi;
3557  register struct net_t *np;
3558  register byte *sbp;
3559  word32 nval;
3560  struct rngdwir_t *dwirp;
3561 
3562  np = tevp->tu.tenp->tenu.np;
3563  bi = tevp->tu.tenp->nbi;
3564  /* DBG remove ---
3565  if (bi < 0) __misc_terr(__FILE__, __LINE__);
3566  --- */
3567 
3568  /* free wire event auxialiary field here since bit and wire extracted */
3569  __my_free((char *) tevp->tu.tenp, sizeof(struct tenp_t));
3570  tevp->tu.tenp = NULL;
3571 
3572  nval = tevp->outv;
3573  if (__ev_tracing)
3574   {
3575    char s1[RECLEN], s2[RECLEN];
3576 
3577    __evtr_resume_msg();
3578    if (np->n_isapthdst) strcpy(s2, " (path destination)");
3579    else strcpy(s2, "");
3580    __tr_msg("-- processing delay wire %s%s store event, value %s\n",
3581     __to_evtrwnam(__xs, np, bi, bi, tevp->teitp), s2,
3582      __to_vvnam(s1, (word32) nval));
3583   }
3584  dwirp = np->nu.rngdwir;
3585  dwirp->wschd_pbtevs[np->nwid*tevp->teitp->itinum + bi] = -1;
3586 
3587  /* inhibit if active force */
3588  if (np->frc_assgn_allocated && force_inhibit_wireassign(np, bi, tevp->teitp))
3589   return;
3590 
3591  __push_itstk(tevp->teitp);
3592 
3593  /* store bit into wire - value is after any multi-fi competition */
3594  /* this add net chg element if needed */
3595  if (np->n_stren)
3596   {
3597    if (tevp->te_trdecay)
3598     __gfwarn(649, np->nsym->syfnam_ind, np->nsym->sylin_cnt,
3599      "charge on node %s.%s has decayed", __msg2_blditree(__xs, __inst_ptr),
3600      np->nsym->synam);
3601 
3602    /* get strength wire address */
3603    get_stwire_addr_(sbp, np);
3604    if (sbp[bi] != nval)
3605     {
3606      sbp[bi] = nval;
3607      /* know change, record if needed */
3608      record_sel_nchg_(np, bi, bi);
3609     }
3610   }
3611  else __chg_st_bit(np, bi, nval & 1L, (nval >> 1) & 1L);
3612  __pop_itstk();
3613 }
3614 
3615 /*
3616  * process a a non blocking procedural assign event - do the assign
3617  * here just assign - no inertial - each just overwrites
3618  *
3619  * SJM 08/08/99 - fixed so lhs indices evaluated at schedule time not
3620  * event proces time to match LRM and XL
3621  */
process_nbpa_ev(struct tev_t * tevp)3622 static void process_nbpa_ev(struct tev_t *tevp)
3623 {
3624  register word32 *wp;
3625  register struct expr_t *con_lhsxp;
3626  register struct st_t *stp;
3627  int32 wlen;
3628  struct tenbpa_t *tenbp;
3629 
3630  __push_itstk(tevp->teitp);
3631  tenbp = tevp->tu.tenbpa;
3632  wp = tenbp->nbawp;
3633  stp = tenbp->nbastp;
3634 
3635  /* SJM 08/08/99 - need to assign to copied lhs expr with select indices */
3636  /* (possibly many if lhs concatenate) replaced by constants */
3637  /* SJM PUTMEBACK */
3638  con_lhsxp = tenbp->nblhsxp;
3639  if (con_lhsxp == NULL) con_lhsxp = stp->st.spra.lhsx;
3640 
3641  wlen = wlen_(con_lhsxp->szu.xclen);
3642  if (__ev_tracing)
3643   {
3644    char s1[RECLEN], s2[RECLEN], s3[RECLEN], s4[RECLEN];
3645 
3646    __tr_msg("-- nb event assign in %s %s of %s to %s at %s\n",
3647     __msg2_blditree(s1, tevp->teitp), __bld_lineloc(s2,
3648      tenbp->nbastp->stfnam_ind, tenbp->nbastp->stlin_cnt),
3649     __xregab_tostr(s3, wp, &wp[wlen], con_lhsxp->szu.xclen, stp->st.spra.rhsx),
3650     __msgexpr_tostr(s4, con_lhsxp), __to_timstr(__xs, &__simtime));
3651   }
3652 
3653  /* SJM 05/19/04 - complex procedural assign needs stmt file/line context */
3654  /* for error messages such as out of range - do not need to save since */
3655  /* no statement context in event processing - this is exception */
3656  __slin_cnt = stp->stlin_cnt;
3657  __sfnam_ind = stp->stfnam_ind;
3658 
3659  /* do assign - know if lhs expr copied, widith still same */
3660  __exec2_proc_assign(con_lhsxp, wp, &(wp[wlen]));
3661 
3662  /* final step is freeing contents */
3663  __my_free((char *) wp, 2*wlen*WRDBYTES);
3664  /* if needed to copy lhs expr., now free */
3665  if (tenbp->nblhsxp != NULL) __free_xtree(tenbp->nblhsxp);
3666 
3667  __my_free((char *) tevp->tu.tenbpa, sizeof(struct tenbpa_t));
3668  tevp->tu.tenbpa = NULL;
3669  __pop_itstk();
3670 }
3671 
3672 /*
3673  * print out evnet trace time - not in event trace message
3674  */
__evtr_resume_msg(void)3675 extern void __evtr_resume_msg(void)
3676 {
3677  char s1[RECLEN];
3678 
3679  if (__last_evtrtime != __simtime)
3680   {
3681    /* this should go through time format ? */
3682    __tr_msg("\n<<< event tracing at time %s\n", __to_timstr(s1, &__simtime));
3683    __last_evtrtime = __simtime;
3684   }
3685 }
3686 
3687 /*
3688  * process a special getpattern form continous assigment
3689  * know left of rhsx is $getpattern symbol and right is var. array index
3690  *
3691  * notice this is a special immediate overriding assign and does not handle
3692  * multi-fi wire properties or wire delays or strengths
3693  *
3694  * should probably try to optimize special 64 bit case too
3695  * point of this is to optimized for known fixed form
3696  *
3697  * could keep old getpattern value and build index of change and bit
3698  * select to change those
3699  * cannot be xmr
3700  */
__process_getpat(struct conta_t * cap)3701 extern void __process_getpat(struct conta_t *cap)
3702 {
3703  register int32 bi;
3704  register struct expr_t *catx;
3705  register word32 cbita, cbitb;
3706  int32 i, wlen, ubits;
3707  word32 tmpa, tmpb;
3708  struct expr_t *idndp, *lhsxp, *rhsxp;
3709  struct xstk_t *xsp;
3710  struct net_t *np;
3711 
3712  /* know rhs is variable array index */
3713  /* rhs is get pattern function call */
3714  lhsxp = cap->lhsx;
3715  rhsxp = cap->rhsx;
3716  xsp = __eval_xpr(rhsxp->ru.x->lu.x);
3717  /* this is except to convert to lhs width - extra array bits ignored or */
3718  /* lhs just not filled */
3719 
3720  /* if out of range or x, value will be changed to x */
3721 
3722  if (__ev_tracing)
3723   {
3724    __evtr_resume_msg();
3725    __tr_msg("-- $getpattern %s processed\n",
3726     __to_evtrcanam(__xs, cap, __inst_ptr));
3727   }
3728 
3729  /* wider than 1 word32 case */
3730  wlen = wlen_(lhsxp->szu.xclen);
3731  ubits = ubits_(lhsxp->szu.xclen);
3732  catx = lhsxp->ru.x;
3733  bi = (ubits == 0) ? WBITS - 1: ubits - 1;
3734  for (i = wlen - 1; i >= 0; i--)
3735   {
3736    tmpa = xsp->ap[i];
3737    tmpb = xsp->bp[i];
3738    /* know need prop. turned off after last propagation (off here) */
3739    for (; bi >= 0; catx = catx->ru.x, bi--)
3740     {
3741      if (catx == NULL) goto done;
3742 
3743      idndp = catx->lu.x;
3744      np = idndp->lu.sy->el.enp;
3745 
3746      cbita = (tmpa >> bi) & 1L;
3747      cbitb = (tmpb >> bi) & 1L;
3748      chg_st_scalval_(np->nva.bp, cbita, cbitb);
3749 
3750      /* know lhs get pat concat elements are scalars */
3751      if (__lhs_changed) record_nchg_(np);
3752     }
3753    bi = WBITS - 1;
3754    if (catx == NULL) break;
3755   }
3756 done:
3757  __immed_assigns++;
3758  __pop_xstk();
3759 }
3760 
3761 /*
3762  * emit an netname for tracing with path if needed
3763  * for ev. know never task/func. part of xmr reference
3764  */
__to_evtrwnam(char * s,struct net_t * np,int32 bi1,int32 bi2,struct itree_t * teitp)3765 extern char *__to_evtrwnam(char *s, struct net_t *np, int32 bi1, int32 bi2,
3766  struct itree_t *teitp)
3767 {
3768  char s1[RECLEN], s2[RECLEN];
3769 
3770  __msg2_blditree(s1, teitp);
3771  strcat(s1, ".");
3772  strcat(s1, __schop(s2, np->nsym->synam));
3773 
3774  if (bi1 == -1 || !np->n_isavec) strcpy(s, s1);
3775  else if (bi1 == bi2) sprintf(s, "%s[%d]", s1, __unnormalize_ndx(np, bi1));
3776  else sprintf(s, "%s[%d:%d]", s1, __unnormalize_ndx(np, bi1),
3777   __unnormalize_ndx(np, bi2));
3778  return(s);
3779 }
3780 
3781 /*
3782  * emit an MIPD port name for tracing with path if needed
3783  *
3784  * port can be only 1 bit always number from hight to low so no normalize
3785  */
__to_evtrpnam(char * s,struct mod_pin_t * mpp,int32 bi,struct itree_t * teitp)3786 extern char *__to_evtrpnam(char *s, struct mod_pin_t *mpp, int32 bi,
3787  struct itree_t *teitp)
3788 {
3789  char s1[RECLEN], s2[RECLEN];
3790 
3791  __msg2_blditree(s1, teitp);
3792  strcat(s1, ".");
3793  __schop(s2, __to_mpnam(__xs, mpp->mpsnam));
3794  strcat(s1, s2);
3795 
3796  if (bi == -1 || mpp->mpwide == 1) strcpy(s, s1);
3797  else sprintf(s, "%s[%d]", s1, bi);
3798  return(s);
3799 }
3800 
3801 /*
3802  * for one bit, know some bits forced inhibit assign if this bit forced
3803  * here do not need to worry about some bits only forced from range
3804  * this is only for wire where 1 bit per bit*inst product
3805  */
force_inhibit_wireassign(struct net_t * np,register int32 biti,struct itree_t * itp)3806 static int32 force_inhibit_wireassign(struct net_t *np, register int32 biti,
3807  struct itree_t *itp)
3808 {
3809  register struct qcval_t *frc_qcp;
3810  int32 nd_itpop, rv;
3811 
3812  if (itp != NULL) { __push_itstk(itp); nd_itpop = TRUE; }
3813  else nd_itpop = FALSE;
3814  frc_qcp =  &(np->nu2.qcval[__inum*np->nwid + biti]);
3815  if (frc_qcp->qc_active) rv = TRUE; else rv = FALSE;
3816  if (nd_itpop) __pop_itstk();
3817  return(rv);
3818 }
3819 
3820 /*
3821  * process a path dest. tran (inout) wire delay internal hard driver
3822  * change event
3823  *
3824  * for inout path. know the one driver changed previously and path delayed
3825  * update of hard driver internal tran channel value for the wire
3826  * any other change of path will cause event cancel and new schedule
3827  * so fact that the drivers if evaled will be new value still works
3828  *
3829  * since r,f or path delays will have different delays and inertial
3830  * conditions for every bit, know for scalar bi 0 not -1
3831  */
process_trpthdst_ev(register struct tev_t * tevp)3832 static void process_trpthdst_ev(register struct tev_t *tevp)
3833 {
3834  register int32 bi;
3835  register struct net_t *np;
3836  register byte *sbp;
3837  struct traux_t *trap;
3838  word32 nval, av, bv;
3839  struct rngdwir_t *dwirp;
3840  struct xstk_t *xsp;
3841 
3842  /* notice event here emitted in change gate outwire */
3843  np = tevp->tu.tenp->tenu.np;
3844  bi = tevp->tu.tenp->nbi;
3845  /* DBG remove ---
3846  if (bi < 0) __misc_terr(__FILE__, __LINE__);
3847  --- */
3848 
3849  /* free wire event auxialiary field here since bit and wire extracted */
3850  __my_free((char *) tevp->tu.tenp, sizeof(struct tenp_t));
3851  tevp->tu.tenp = NULL;
3852 
3853  nval = tevp->outv;
3854  if (__ev_tracing)
3855   {
3856    char s1[RECLEN];
3857 
3858    __evtr_resume_msg();
3859    __tr_msg(
3860     "-- processing inout path dest. %s driven value update event, value %s\n",
3861     __to_evtrwnam(__xs, np, bi, bi, tevp->teitp),
3862      __to_vvnam(s1, (word32) nval));
3863   }
3864  dwirp = np->nu.rngdwir;
3865  dwirp->wschd_pbtevs[np->nwid*tevp->teitp->itinum + bi] = -1;
3866 
3867  trap = np->ntraux;
3868  __push_itstk(tevp->teitp);
3869  /* update hard driver stored value and re-eval tran channel if needed */
3870  if (np->n_stren)
3871   {
3872    /* get strength wire address */
3873    sbp = &(trap->trnva.bp[__inum*np->nwid]);
3874    if (sbp[bi] == nval) goto done;
3875    sbp[bi] = nval;
3876   }
3877  else
3878   {
3879    if (!np->n_isavec)
3880     {
3881      ld_scalval_(&av, &bv, trap->trnva.bp);
3882      if (nval == (av | (bv << 1))) goto done;
3883      /* SJM 07/16/01 - typo was storing old val so tr chan value never chgs */
3884      /* need to store new non stren value not old */
3885      /* ??? wrong - st_scalval_(trap->trnva.bp, av, bv); */
3886      st2_scalval_(trap->trnva.bp, nval);
3887     }
3888    else
3889     {
3890      push_xstk_(xsp, np->nwid);
3891      __ld_perinst_val(xsp->ap, xsp->bp, trap->trnva, np->nwid);
3892      av = rhsbsel_(xsp->ap, bi);
3893      bv = rhsbsel_(xsp->bp, bi);
3894      if (nval == (av | (bv << 1))) { __pop_xstk(); goto done; }
3895      __lhsbsel(xsp->ap, bi, (nval & 1L));
3896      __lhsbsel(xsp->bp, bi, ((nval >> 1) & 1L));
3897      __st_perinst_val(trap->trnva, np->nwid, xsp->ap, xsp->bp);
3898      __pop_xstk();
3899     }
3900   }
3901  /* if some but not this bit in tran channel, just assign */
3902  /* SJM - 03/15/01 - know bit not -1 since schedules as 0 for scalar */
3903  __eval_tran_1bit(np, bi);
3904 done:
3905  __pop_itstk();
3906 }
3907 
3908 /*
3909  * ROUTINES TO PROCESS BEHAVIORAL EVENTS
3910  */
3911 
3912 /* table for converting 4 bit (oonn) edge pair to edge value byte */
3913 /* table treats edge with z as x here */
3914 byte __epair_tab[] =
3915  { NOEDGE, EDGE01, EDGE0X, EDGE0X, EDGE10, NOEDGE, EDGE1X, EDGE1X,
3916    EDGEX0, EDGEX1, NOEDGE, NOEDGE, EDGEX0, EDGEX1, NOEDGE, NOEDGE };
3917 
3918 /*
3919  * after net changed net (wire or reg) progagate to all dces wire drives
3920  * bit range passed and used to eliminate fan-out for other bits here
3921  * all ranges here normalized high to low form
3922  * notice will never get to event trigger through this path (through cause)
3923  *
3924  * inst. ptr here is place np changed (i.e. for XMR define itree loc)
3925  * know npi1 >= npi2 since normalized internally
3926  */
__wakeup_delay_ctrls(register struct net_t * np,register int32 npi1,register int32 npi2)3927 extern void __wakeup_delay_ctrls(register struct net_t *np, register int32 npi1,
3928  register int32 npi2)
3929 {
3930  register struct dcevnt_t *dcep, *dcep2;
3931  register word32 *wp;
3932  int32 nd_itpop, oneinst, tevpi, i1;
3933  word32 oval, nval;
3934  byte emask;
3935  struct delctrl_t *dctp;
3936  struct fmonlst_t *fmonp;
3937  struct fmselst_t *fmsep;
3938  struct dce_expr_t *dcexp;
3939 
3940  for (dcep = np->dcelst; dcep != NULL; )
3941   {
3942    /* --- DBG remove ---
3943    if (__inst_ptr == NULL) __misc_terr(__FILE__, __LINE__);
3944    --- */
3945 
3946    /* filter one instance forms before case */
3947    if (dcep->dce_1inst && dcep->dce_matchitp != __inst_ptr)
3948     { dcep = dcep->dcenxt; continue; }
3949 
3950    /* SJM 10/06/06 - for vpi vc call back, may free the dcep so must save */
3951    dcep2 = dcep->dcenxt;
3952    switch ((byte) dcep->dce_typ) {
3953     case DCE_RNG_INST:
3954      /* SJM 11/25/02 - notice can't be turned off/on */
3955      dctp = dcep->st_dctrl;
3956      /* all of wire changed match */
3957      if (npi1 == -1) goto do_event_ctrl;
3958      /* dce is range DCE range, know dci1 cannot be -1 */
3959      if (dcep->dci1 == -2)
3960       {
3961        /* SJM 10/12/04 - because contab realloced, must be ndx base of IS */
3962        wp = &(__contab[dcep->dci2.xvi]);
3963        i1 = (int32) wp[2*__inum];
3964        /* change must be inside range to match */
3965        if (i1 > npi1 || i1 < npi2) break;
3966       }
3967      else
3968       {
3969        /* SJM 06/26/04 - FIXME ??? ### isn't else needed here ??? */
3970        /* eliminate if changed bit do not overlap range */
3971        /* if low chged above high or high chged below low, eliminate */
3972        if (npi2 > dcep->dci1 || npi1 < dcep->dci2.i) break;
3973       }
3974      goto do_event_ctrl;
3975     case DCE_INST:
3976      /* notice dce that is entire wire always matches changed range */
3977      dctp = dcep->st_dctrl;
3978 
3979 do_event_ctrl:
3980      nd_itpop = FALSE;
3981      /* first see if variable really changed (plus edge filtering) */
3982      oval = nval = 3;
3983      /* if no chg record, then array or reg entire wire so know changed */
3984      /* know for any wire even scalar, will exist */
3985      /* LOOKATME - filtering even for DOWN XMR insts that do not match? */
3986      if (dcep->prevval.wp != NULL)
3987       {
3988        /* also eliminate if event control range or wire did not change */
3989        /* for xmr and/or collpase - look in target itree where dce attached */
3990        /* for non dce expr form, sets old and new values for edge detection */
3991        oneinst = (dcep->dce_1inst) ? TRUE : FALSE;
3992        if (!np->n_isavec)
3993         {
3994          /* SJM 06/29/04 - simplified - always use stren version for scalar */
3995          if (!scal_stfilter_dce_chg(np, dcep, &oval, &nval, oneinst))
3996           goto dce_done;
3997         }
3998        else
3999         {
4000          if (!np->n_stren)
4001           {
4002            if (!filter_dce_chg(np, dcep, &oval, &nval, oneinst))
4003             goto dce_done;
4004           }
4005          else
4006           {
4007            if (!stfilter_dce_chg(np, dcep, &oval, &nval, oneinst))
4008             goto dce_done;
4009           }
4010         }
4011       }
4012      /*
4013       * idea for normal event control dce:
4014       * during prep: first for xmr trace from ref to target (place where wire)
4015       *
4016       * here when triggering (i.e. wakeup and schedule thread)
4017       * if xmr to on target place to wake up, for multiply
4018       * instantiated down will be one dce for each down inst. - when up wire
4019       * changes need to match one right dce itree loc which is done by
4020       * comparing against when match move down to xmr move from target back
4021       * to xmr ref.
4022       */
4023      /* if one inst form (know matches) move to reference itree loc. */
4024      if (dcep->dce_1inst)
4025       { __push_itstk(dcep->dce_refitp); nd_itpop = TRUE; }
4026      /* for xmr know target wire changed @(i1.i2.i3.i4.w)  w in dest. */
4027      else if (dcep->dce_xmrtyp != XNP_LOC)
4028       {
4029        /* SJM 04/17/03 - if not right instance do not process */
4030        if (!__match_push_targ_to_ref(dcep->dce_xmrtyp, dcep->dceu.dcegrp))
4031          break;
4032        nd_itpop = TRUE;
4033       }
4034 
4035      /* if armed (i.e. evctrl active) normal processing */
4036      /* notice current thread (init/always) may differ from dctp thread */
4037      /* so current thread must not be used here */
4038      if ((tevpi = dctp->dceschd_tevs[__inum]) != -1)
4039       {
4040        /* RELEASE remove ---
4041        {
4042         struct tev_t *tevp;
4043 
4044         tevp = &(__tevtab[tevpi]);
4045         if (tevp->tetyp < 1 || tevp->tetyp > 14)
4046          __misc_terr(__FILE__, __LINE__);
4047        }
4048        --- */
4049        /* filter out if wrong edge - know if has edge prev val set */
4050        /* unless dce expr also set (non nul) */
4051        if (dcep->dce_edge)
4052         {
4053          /* eval. expr - know at ref. (not var. chg) itree loc. */
4054          if ((dcexp = dcep->dce_expr) != NULL)
4055           {
4056            /* if XMR dce, already at ref itree loc*/
4057            if (!filter_edge_expr(dcexp, &oval, &nval)) goto dce_done;
4058            /* nval and oval set for edge checking below */
4059           }
4060 
4061          /* even though only pos and neg legal here use general signature */
4062          /* dce edgval is 1 bit per edge type table - epair tab maps to bit */
4063          emask = __epair_tab[nval | (oval << 2)];
4064          /* if no bits in common, no match */
4065          if (((byte) dcep->dce_edgval & emask) == 0) goto dce_done;
4066         }
4067        /* last: after move to ref inst, need to match itree loc for iact */
4068        /* need edge check before here because update old eval */
4069        if (dctp->dc_iact && dcep->iact_itp != __inst_ptr) goto dce_done;
4070 
4071        /* 10/27/00 SJM - if repeat form check and decrement repeat count */
4072        /* and if not counted down to 0 yet, do nothing (filter out) */
4073        if (dctp->repcntx != NULL)
4074         {
4075          /* REMOVEME AGAIN 04/01/02 ??? */
4076          /* DBG remove --- */
4077          if (__debug_flg)
4078           {
4079            __dbg_msg(
4080             "+++ now %s repeated edge for net %s (inst %s) count %d\n",
4081             __to_timstr(__xs2, &__simtime), np->nsym->synam,
4082             __msg2_blditree(__xs, __inst_ptr),
4083             (int32) dctp->dce_repcnts[__inum] - 1);
4084           }
4085          /* --- */
4086          /* SJM 04/02/02 since word32, any positive still do not trigger */
4087          if (--dctp->dce_repcnts[__inum] != 0) goto dce_done;
4088         }
4089        trigger_evctrl(dctp, tevpi);
4090       }
4091 
4092 dce_done:
4093      if (nd_itpop) __pop_itstk();
4094      break;
4095     case DCE_RNG_MONIT:
4096      /* no -2 IS form since 1 active monit from 1 itree place only */
4097      /* if enire wire changed, always match */
4098      if (npi1 != -1 && (npi1 < dcep->dci2.i || npi2 > dcep->dci1)) break;
4099      /*FALLTHRU */
4100     case DCE_MONIT:
4101      /* SJM 11/25/02 - only check off for ones that can be off */
4102      if (dcep->dce_off) break;
4103 
4104      /* notice these work by storing old and new values */
4105      if (dcep->dce_matchitp != __inst_ptr) break;
4106      /* fmon nil for the one monitor in design */
4107      if (dcep->dceu2.dce_fmon == NULL) __slotend_action |= SE_MONIT_TRIGGER;
4108      else
4109       {
4110        fmonp = dcep->dceu2.dce_fmon;
4111        /* if already activated, nothing to do */
4112        if (fmonp->fmse_trig == NULL)
4113         {
4114          /* allocate new se fmon */
4115          if (__fmse_freelst == NULL)
4116           fmsep = (struct fmselst_t *) __my_malloc(sizeof(struct fmselst_t));
4117          else
4118           {
4119            fmsep = __fmse_freelst;
4120            __fmse_freelst = __fmse_freelst->fmsenxt;
4121           }
4122          fmsep->fmsenxt = NULL;
4123          fmsep->fmon = fmonp;
4124 
4125          /* link it on list */
4126          if (__fmonse_hdr == NULL) __fmonse_hdr = fmsep;
4127          else __fmonse_end->fmsenxt = fmsep;
4128          __fmonse_end = fmsep;
4129          /* mark triggered */
4130          fmonp->fmse_trig = fmsep;
4131          __slotend_action |= SE_FMONIT_TRIGGER;
4132         }
4133       }
4134      break;
4135     case DCE_RNG_QCAF:
4136      if (dcep->dce_off) break;
4137      /* no -2 IS form since 1 active from 1 itree place only */
4138      /* if enire wire changed, always match */
4139      if (npi1 != -1 && (npi1 < dcep->dci2.i || npi2 > dcep->dci1)) break;
4140      /*FALLTHRU */
4141     case DCE_QCAF:
4142      if (dcep->dce_off) break;
4143      if (dcep->dce_matchitp != __inst_ptr) break;
4144      /* do not care which rhs wire changed must eval and assign all */
4145      __assign_qcaf(dcep);
4146      break;
4147     case DCE_RNG_PVC:
4148      /* SJM 07/24/00 - must turn off PLI 1.0 PV dces from inside self */
4149      if (dcep->dce_off) break;
4150 
4151      /* no -2 IS form since 1 active from 1 itree place only */
4152      /* if enire wire changed, always match */
4153      if (npi1 != -1 && (npi1 < dcep->dci2.i || npi2 > dcep->dci1)) break;
4154      /*FALLTHRU */
4155     case DCE_PVC:
4156      /* SJM 07/24/00 - must turn off PLI 1.0 PV dces from inside self */
4157      if (dcep->dce_off) break;
4158 
4159      /* notice tf PV change always per instance */
4160      if (dcep->dce_matchitp != __inst_ptr) break;
4161 
4162      /* must check to make sure psel assign changed bits in actual range */
4163      oval = nval = 3;
4164      /* if no chg record, then array or reg entire wire so know changed */
4165      /* one dce for each different inst and location of _tf call */
4166      if (dcep->prevval.wp != NULL)
4167       {
4168        if (np->n_stren)
4169         { if (!stfilter_dce_chg(np, dcep, &oval, &nval, TRUE)) break; }
4170        else
4171         { if (!filter_dce_chg(np, dcep, &oval, &nval, TRUE)) break; }
4172       }
4173      /* do not care which rhs wire changed must eval and assign all */
4174      __pvc_call_misctf(dcep);
4175      break;
4176     case DCE_RNG_CBVC:
4177      /* SJM 07/24/00 - must turn off PLI 1.0 PV dces from inside self */
4178      if (dcep->dce_off) break;
4179 
4180      /* callback value change but dce contents differ */
4181      /* no -2 IS form since 1 active from 1 itree place only */
4182      /* if enire wire changed, always match */
4183      if (npi1 != -1 && (npi1 < dcep->dci2.i || npi2 > dcep->dci1)) break;
4184      /*FALLTHRU */
4185     case DCE_CBVC:
4186      /* SJM 07/24/00 - must turn off PLI 1.0 PV dces from inside self */
4187      if (dcep->dce_off) break;
4188 
4189      if (dcep->dce_matchitp != __inst_ptr) break;
4190      /* DBG remove ---
4191      if (__debug_flg && np->n_stren)
4192       {
4193        int32 dwid;
4194        byte *sbp;
4195        char s1[RECLEN];
4196 
4197        get_stwire_addr_(sbp, np);
4198        -* SJM 06/03/02 - was wrongly checking dci2 *-
4199        if (dcep->dci1 == -2) __misc_terr(__FILE__, __LINE__);
4200 
4201        if (dcep->dci1 != -1)
4202         {
4203          sbp = &(sbp[dcep->dci2.i]);
4204          dwid = (dcep->dci1 - dcep->dci2.i) + 1;
4205          sprintf(s1, "%s[%d:%d]", np->nsym->synam, dcep->dci1, dcep->dci2.i);
4206         }
4207        else { dwid = np->nwid; strcpy(s1, np->nsym->synam); }
4208        __dbg_msg("CBVC: %s strength value %s (old %s)\n", s1,
4209         __st_regab_tostr(__xs, sbp, dwid),
4210         __st_regab_tostr(__xs2, dcep->prevval.bp, dwid));
4211       }
4212      else
4213       {
4214        struct xstk_t *xsp, *xsp2;
4215 
4216        push_xstk_(xsp, np->nwid);
4217        __ld_wire_val(xsp->ap, xsp->bp, np);
4218 
4219        if (dcep->prevval.wp != NULL)
4220         {
4221          -* know have current instance here *-
4222          push_xstk_(xsp2, np->nwid);
4223          __ld_perinst_val(xsp2->ap, xsp2->bp, dcep->prevval, np->nwid);
4224          __regab_tostr(__xs2, xsp2->ap, xsp2->bp, xsp2->xslen, BHEX, FALSE);
4225          __pop_xstk();
4226         }
4227        else strcpy(__xs2, "**none**");
4228 
4229        __dbg_msg("CBVC: value %s (old %s)\n",
4230         __regab_tostr(__xs, xsp->ap, xsp->bp, xsp->xslen, BHEX, FALSE), __xs2);
4231        __pop_xstk();
4232       }
4233      --- */
4234 
4235      oval = nval = 3;
4236      /* if no chg record, then array or reg entire wire so know changed */
4237      /* one dce for each different inst and location of _tf call */
4238      if (dcep->prevval.wp != NULL)
4239       {
4240        if (!np->n_isavec)
4241         {
4242          /* 05/20/00 - SJM - following LRM vi vpi stren report st chg */
4243          /* user passed non stren val request to vpi_ cb call back */
4244          if (!np->n_stren || dcep->dce_nomonstren)
4245           {
4246            /* SJM 06/29/04 - simplified - always use stren version for scal */
4247            if (!scal_stfilter_dce_chg(np, dcep, &oval, &nval, TRUE))
4248             break;
4249           }
4250          else
4251           {
4252            /* need strength changes too */
4253             if (!vccb_scal_standval_filter(np, dcep, &oval, &nval, TRUE))
4254              break;
4255           }
4256         }
4257        else
4258         {
4259          if (!np->n_stren)
4260           { if (!filter_dce_chg(np, dcep, &oval, &nval, TRUE)) break; }
4261          else
4262           {
4263            /* 05/20/00 - SJM - following LRM vi vpi stren report st chg */
4264            /* user passed non stren val request to vpi_ cb call back */
4265            if (dcep->dce_nomonstren)
4266             {
4267              if (!stfilter_dce_chg(np, dcep, &oval, &nval, TRUE)) break;
4268             }
4269            else
4270             {
4271              /* need strength changes too */
4272              if (!vccb_vec_standval_filter(np, dcep, &oval, &nval, TRUE))
4273               break;
4274             }
4275           }
4276         }
4277       }
4278      /* need one call back for every change */
4279 
4280      /* SJM 07/24/00 - must run with this call back turned off in case */
4281      /* call back c code does put value to reg because change propagation */
4282      /* for regs must be immediate */
4283      /* notice will never get here unless dce on */
4284      dcep->dce_off = TRUE;
4285      /* SJM 10/06/06 - must pass the dce since dce cbp has list of dces */
4286      __cbvc_callback(dcep, dcep->dceu.dce_cbp, dcep->dceu.dce_cbp->cb_hp);
4287 
4288      /* SJM 10/06/06 - dcep may be free in the user call back so cbvc */
4289      /* call back processing code handles turning back on if user did */
4290      /* not turn off in the cb routine - also loop must handle freed case */
4291      break;
4292 
4293     /* these are used only in vpi_ for force/release call backs */
4294     case DCE_CBF: case DCE_RNG_CBF: case DCE_CBR: case DCE_RNG_CBR:
4295      break;
4296     default: __case_terr(__FILE__, __LINE__);
4297    }
4298    dcep = dcep2;
4299   }
4300 }
4301 
4302 /*
4303  * evaluate, set edge new and old and filter for dce change - non xmr case
4304  */
filter_edge_expr(register struct dce_expr_t * dcexp,word32 * oval,word32 * nval)4305 static int32 filter_edge_expr(register struct dce_expr_t *dcexp, word32 *oval,
4306  word32 *nval)
4307 {
4308  register word32 nav, nbv;
4309  register struct xstk_t *xsp;
4310  word32 av, bv;
4311 
4312  /* evaluate expr. to get current edge in ref. context */
4313  xsp = __eval_xpr(dcexp->edgxp);
4314  /* extract low bit in case wide */
4315  nav = xsp->ap[0] & 1L;
4316  nbv = xsp->bp[0] & 1L;
4317  /* SJM 08/07/00 - now done with pushed expr value */
4318  __pop_xstk();
4319 
4320  *nval = nav | (nbv << 1);
4321  ld_scalval_(&av, &bv, dcexp->bp);
4322  *oval = av | (bv << 1);
4323  /* if variable does not effect expr. value, no edge */
4324  if (nval == oval) return(FALSE);
4325  st_scalval_(dcexp->bp, nav, nbv);
4326  return(TRUE);
4327 }
4328 
4329 /*
4330  * trigger an armed event control for current itree loc.
4331  * changes pending delay control event and thread resume event, and links in
4332  *
4333  * itree loc. must match and for xmr/col. is ref. itree loc.
4334  * know will not see if event delay control not active (armed)
4335  */
trigger_evctrl(struct delctrl_t * dctp,register i_tev_ndx tevpi)4336 static void trigger_evctrl(struct delctrl_t *dctp, register i_tev_ndx tevpi)
4337 {
4338  register struct tev_t *tevp;
4339 
4340  tevp = &(__tevtab[tevpi]);
4341  /* getting here means dctrl event triggered */
4342  /* DBG remove --- */
4343  if (__debug_flg && __st_tracing)
4344   {
4345    struct st_t *stp;
4346 
4347    if (tevp->tetyp == TE_NBPA)
4348     {
4349      stp = tevp->tu.tenbpa->nbastp;
4350      __tr_msg(
4351       "-- scheduling NB event control assign for now line %s (itree=%s)\n",
4352       __bld_lineloc(__xs, stp->stfnam_ind, stp->stlin_cnt),
4353       __inst_ptr->itip->isym->synam);
4354     }
4355    else
4356     {
4357      stp = tevp->tu.tethrd->thnxtstp;
4358      __tr_msg(
4359       "-- scheduling event control resume for now line %s (chg in thd=%s, itree=%s)\n",
4360       __bld_lineloc(__xs, stp->stfnam_ind, stp->stlin_cnt),
4361        tevp->tu.tethrd->th_itp->itip->isym->synam,
4362       __inst_ptr->itip->isym->synam);
4363     }
4364   }
4365  /* --- */
4366 
4367  /* must schedule wakeup since no way to interupt current context */
4368  /* which may not be procedural threads */
4369  tevp->etime = __simtime;
4370  /* armed event and now resume event already associated with thread */
4371  /* restart thread already set - must add to front for interactive */
4372  if (tevp->tetyp != TE_NBPA)
4373   {
4374    __add_ev_to_front(tevpi);
4375    dctp->dceschd_tevs[__inum] = -1;
4376    /* in case disable, indicate suspended on ev thrd no suspend to disable */
4377    tevp->tu.tethrd->th_dctp = NULL;
4378   }
4379  /* else add to #0 for non blocking assign */
4380  else
4381   {
4382    /* LOOKATME - is this right */
4383    /* here ok to have list of events (non inertial) */
4384    /* know at least 1 event, remove head schedule for now after trigger */
4385    dctp->dceschd_tevs[__inum] = tevp->tenxti;
4386    if (__p0_te_hdri == -1) __p0_te_hdri = __p0_te_endi = tevpi;
4387    else { __tevtab[__p0_te_endi].tenxti = tevpi; __p0_te_endi = tevpi; }
4388 
4389    /* works because no new tevs that could cause realloc called */
4390    __tevtab[tevpi].tenxti = -1;
4391    /* this now looks like normal delay control nb */
4392    tevp->tu.tenbpa->nbdctp = NULL;
4393   }
4394 }
4395 
4396 /*
4397  * stren filter non monit dce for real change - return F if not changed
4398  *
4399  * needed because may assign to range but monitor bit in range did not chged
4400  * also if edge operator (not chaange) sets the oval and nval
4401  * dce put on target
4402  *
4403  * oneinst for cases where must load right inst of wire but only one inst
4404  * for dce
4405  *
4406  * this is for change operator not %v so value not strength change used
4407  */
stfilter_dce_chg(register struct net_t * np,register struct dcevnt_t * dcep,word32 * oval,word32 * nval,int32 oneinst)4408 static int32 stfilter_dce_chg(register struct net_t *np,
4409  register struct dcevnt_t *dcep, word32 *oval, word32 *nval, int32 oneinst)
4410 {
4411  register int32 bi;
4412  register byte *dcesbp, *nsbp;
4413  int32 dcewid, i1, i2;
4414  byte dcev, nv;
4415 
4416  /* SJM 05/08/03 - stren can't be expr since eval of expr removes stren */
4417  /* DBG remove -- */
4418  if (dcep->dce_expr != NULL) __misc_terr(__FILE__, __LINE__);
4419  /* --- */
4420 
4421  dcewid = __get_dcewid(dcep, np);
4422  /* get dce value ptr (for iact per instance so from 0th) */
4423  if (oneinst) dcesbp = dcep->prevval.bp;
4424  else dcesbp = &(dcep->prevval.bp[__inum*dcewid]);
4425 
4426  __get_cor_range(dcep->dci1, dcep->dci2, &i1, &i2);
4427  /* point to wire value */
4428  get_stwire_addr_(nsbp, np);
4429  /* since LRM allows vector - this automatically accesses low bit */
4430  if (i1 != -1) nsbp = &(nsbp[i1]);
4431 
4432  /* only need to set values for edge if complicated need expr form */
4433  if (dcep->dce_edge)
4434   {
4435    if ((dcev = (dcesbp[0] & 3)) == (nv = (nsbp[0] & 3))) return(FALSE);
4436    *oval = dcev;
4437    *nval = nv;
4438    /* update the prevval for next wire change */
4439    dcesbp[0] = nsbp[0];
4440    return(TRUE);
4441   }
4442 
4443  /* all change operators here (%v handled elsewhere) are value only */
4444  for (bi = 0; bi < dcewid; bi++)
4445   { if ((dcesbp[bi] & 3) != (nsbp[bi] & 3)) goto not_same_val; }
4446  return(FALSE);
4447 
4448 not_same_val:
4449  /* old value comes from internally stored prev. val, new is value of wire */
4450  /* copy from nbsp to dcesbp */
4451  memcpy(dcesbp, nsbp, dcewid);
4452  return(TRUE);
4453 }
4454 
4455 /*
4456  * vector stren and value filter for vpi_ stren only val chg call back
4457  * return F if not changed
4458  *
4459  * needed because may assign to range but monitor bit in range did not chged
4460  * also if edge operator (not chaange) sets the oval and nval
4461  * dce put on target
4462  *
4463  * 06/06/00 - SJM - new routine to filter vpi vec stren only chg
4464  */
vccb_vec_standval_filter(register struct net_t * np,register struct dcevnt_t * dcep,word32 * oval,word32 * nval,int32 oneinst)4465 static int32 vccb_vec_standval_filter(register struct net_t *np,
4466  register struct dcevnt_t *dcep, word32 *oval, word32 *nval, int32 oneinst)
4467 {
4468  register byte *dcesbp, *nsbp;
4469  int32 dcewid, i1, i2;
4470  byte dcev, nv;
4471 
4472  /* SJM 05/08/03 - stren can't be expr since eval of expr removes stren */
4473  /* DBG remove -- */
4474  if (dcep->dce_expr != NULL) __misc_terr(__FILE__, __LINE__);
4475  /* --- */
4476 
4477  dcewid = __get_dcewid(dcep, np);
4478  /* get dce value ptr (for iact per instance so from 0th) */
4479  if (oneinst) dcesbp = dcep->prevval.bp;
4480  else dcesbp = &(dcep->prevval.bp[__inum*dcewid]);
4481 
4482  __get_cor_range(dcep->dci1, dcep->dci2, &i1, &i2);
4483  /* point to wire value */
4484  get_stwire_addr_(nsbp, np);
4485  /* since LRM allows vector - this automatically accesses low bit */
4486  if (i1 != -1) nsbp = &(nsbp[i1]);
4487 
4488  /* only need to set values for edge if complicated need expr form */
4489  /* for edges only low bit */
4490  /* LOOKATME - think edges can't happen here */
4491  if (dcep->dce_edge)
4492   {
4493    if ((dcev = dcesbp[0]) == (nv = nsbp[0])) return(FALSE);
4494    *oval = dcev & 3;
4495    *nval = nv & 3;
4496    /* update the prevval for next wire change */
4497    dcesbp[0] = nsbp[0];
4498    return(TRUE);
4499   }
4500 
4501  /* call back happens if only strength changes */
4502  if (memcmp((char *) nsbp, (char *) dcesbp, dcewid) == 0) return(FALSE);
4503  memcpy(dcesbp, nsbp, dcewid);
4504  return(TRUE);
4505 }
4506 
4507 /*
4508  * vpi_ scalar val chg stren and value filter (report stren only chges)
4509  * return F if not changed
4510  *
4511  * 06/06/00 - SJM - new routine to filter vpi scalar stren only chg
4512  */
vccb_scal_standval_filter(register struct net_t * np,register struct dcevnt_t * dcep,word32 * oval,word32 * nval,int32 oneinst)4513 static int32 vccb_scal_standval_filter(register struct net_t *np,
4514  register struct dcevnt_t *dcep, word32 *oval, word32 *nval, int32 oneinst)
4515 {
4516  register byte *dcesbp, *nsbp;
4517  byte dcev, nv;
4518 
4519  /* SJM 05/08/03 - stren can't be expr since eval of expr removes stren */
4520  /* DBG remove -- */
4521  if (dcep->dce_expr != NULL) __misc_terr(__FILE__, __LINE__);
4522  /* --- */
4523 
4524  /* get dce value ptr (for iact per instance so from 0th) */
4525  if (oneinst) dcesbp = dcep->prevval.bp;
4526  else dcesbp = &(dcep->prevval.bp[__inum]);
4527 
4528  /* point to wire value */
4529  get_stwire_addr_(nsbp, np);
4530  /* if values same, no edge or no change */
4531  if ((dcev = dcesbp[0]) == (nv = nsbp[0])) return(FALSE);
4532  dcesbp[0] = nsbp[0];
4533 
4534  /* only need to set values for edge if complicated need expr form */
4535  /* but need value without strength for edge */
4536  if (dcep->dce_edge) { *oval = dcev & 3; *nval = nv & 3; }
4537 
4538  return(TRUE);
4539 }
4540 
4541 /*
4542  * filter non monit dce for actual change - return F if not changed
4543  * called from decl itree contextfor XMR
4544  *
4545  * needed because may assign to range but monitor bit in range did not chged
4546  * also if edge operator (not change) sets the oval and nval
4547  * know wire changed so "new" prevval is wire value
4548  */
filter_dce_chg(register struct net_t * np,register struct dcevnt_t * dcep,word32 * oval,word32 * nval,int32 oneinst)4549 static int32 filter_dce_chg(register struct net_t *np,
4550  register struct dcevnt_t *dcep, word32 *oval, word32 *nval, int32 oneinst)
4551 {
4552  int32 dcewid, i1, i2, rv;
4553  register struct xstk_t *dcexsp, *nxsp;
4554 
4555  dcewid = __get_dcewid(dcep, np);
4556  /* load dce current value */
4557  push_xstk_(dcexsp, dcewid);
4558  /* if only one inst prev. val. is from 0th inst but wire from rigth inst */
4559  if (oneinst)
4560   {
4561    /* need some model so use current itree's although need inst 1 for eval */
4562    /* first move from inst about to be pushed on top of */
4563    __push_wrkitstk(__inst_mod, 0);
4564    __ld_perinst_val(dcexsp->ap, dcexsp->bp, dcep->prevval, dcewid);
4565    __pop_wrkitstk();
4566   }
4567  else __ld_perinst_val(dcexsp->ap, dcexsp->bp, dcep->prevval, dcewid);
4568 
4569  /* load wire value */
4570  push_xstk_(nxsp, dcewid);
4571  __get_cor_range(dcep->dci1, dcep->dci2, &i1, &i2);
4572  __ld_wire_sect(nxsp->ap, nxsp->bp, np, i1, i2);
4573  if (cmp_vval_(dcexsp->ap, nxsp->ap, dcewid) == 0 &&
4574   (cmp_vval_(dcexsp->bp, nxsp->bp, dcewid) == 0)) { rv = FALSE; goto done; }
4575 
4576  /* only need to set values for edge if complicated need expr form */
4577  if (dcep->dce_edge)
4578   {
4579    /* old value comes from internally stored preval, new is value of wire */
4580    *oval = (dcexsp->ap[0] & 1L) | ((dcexsp->bp[0] << 1) & 2L);
4581    *nval = (nxsp->ap[0] & 1L) | ((nxsp->bp[0] << 1) & 2L);
4582   }
4583  /* if one instance store into that 0th inst (only) loc. */
4584  if (oneinst)
4585   {
4586    /* need itree loc. - use current mod - save before pushing on top */
4587    __push_wrkitstk(__inst_mod, 0);
4588    __st_perinst_val(dcep->prevval, dcewid, nxsp->ap, nxsp->bp);
4589    __pop_wrkitstk();
4590   }
4591  else __st_perinst_val(dcep->prevval, dcewid, nxsp->ap, nxsp->bp);
4592  rv = TRUE;
4593 done:
4594  __pop_xstk();
4595  __pop_xstk();
4596  return(rv);
4597 }
4598 
4599 /*
4600  * scalar stren filter non monit dce for real change
4601  * return F if not changed
4602  *
4603  * changes are always value only - monit %v strength handles in monit
4604  * as special case
4605  */
scal_stfilter_dce_chg(register struct net_t * np,register struct dcevnt_t * dcep,word32 * oval,word32 * nval,int32 oneinst)4606 static int32 scal_stfilter_dce_chg(register struct net_t *np,
4607  register struct dcevnt_t *dcep, word32 *oval, word32 *nval, int32 oneinst)
4608 {
4609  register byte *dcesbp, *nsbp;
4610  byte dcev, nv;
4611 
4612  /* get dce value ptr (for iact per instance so from 0th) */
4613  if (oneinst) dcesbp = dcep->prevval.bp;
4614  else dcesbp = &(dcep->prevval.bp[__inum]);
4615 
4616  /* point to wire value */
4617  get_stwire_addr_(nsbp, np);
4618  /* if values same, no edge or no change */
4619  if ((dcev = (dcesbp[0] & 3)) == (nv = (nsbp[0] & 3))) return(FALSE);
4620  dcesbp[0] = nsbp[0];
4621 
4622  /* only need to set values for edge if complicated need expr form */
4623  if (dcep->dce_edge) { *oval = dcev; *nval = nv; }
4624 
4625  /* LOOKATME - could fix %v strength only change on value here */
4626  /* if knew which dce was not %v and dcewid is number of bytes since stren */
4627 
4628  return(TRUE);
4629 }
4630 
4631 /*
4632  * TIMING CHECK SIMULATION ROUTINES
4633  */
4634 
4635 /*
4636  * process some time change record net pin record
4637  * action depends on npp subtype
4638  * these are always 1 bit only (bsel, scalar or can be low bit of vector)
4639  */
__process_npp_timofchg(struct net_t * np,register struct net_pin_t * npp)4640 extern void __process_npp_timofchg(struct net_t *np,
4641  register struct net_pin_t *npp)
4642 {
4643  register int32 ii;
4644  register struct tchg_t *tchgp;
4645  int32 i1;
4646  word32 new_eval, old_eval, av, bv;
4647  word64 chgtim, reftim;
4648  struct tchk_t *tcp;
4649  struct chktchg_t *chktcp;
4650  struct itree_t *downitp;
4651  struct npaux_t *npauxp;
4652  struct spcpth_t *newpthp;
4653 
4654  /* notice because load bit of scalar works for entire wire get bit 0 */
4655  if ((npauxp = npp->npaux) == NULL) i1 = 0; else i1 = npauxp->nbi1;
4656  /* all but in module need this correction */
4657  downitp = NULL;
4658  if (npp->npproctyp != NP_PROC_INMOD)
4659   {
4660    /* know np is wire npp is on and one under itree stack is wire inst */
4661    /* current itree loc. here is loc. of ref. */
4662    downitp = __inst_ptr;
4663    __pop_itstk();
4664   }
4665  __ld_bit(&av, &bv, np, i1);
4666  new_eval = av | (bv << 1);
4667  if (downitp != NULL) __push_itstk(downitp);
4668 
4669  /* know inst ptr does not change in here */
4670  ii = __inum;
4671  switch (npp->chgsubtyp) {
4672   case NPCHG_TCSTART:
4673    /* notice reference event always recorded */
4674    /* $period does not have reference event net pin change element */
4675    tchgp = npp->elnpp.etchgp;
4676    tcp = tchgp->chgu.chgtcp;
4677    if (!filter_bitchange(new_eval, tchgp->oldval, tcp->startedge,
4678     tcp->startcondx)) break;
4679    tchgp->lastchg[ii] = __simtime;
4680    if (__debug_flg && __ev_tracing)
4681     {
4682      char s1[RECLEN], s2[RECLEN];
4683 
4684      if (npp->npproctyp != NP_PROC_INMOD) bld_xmrsrc_ref(s1, np);
4685      else sprintf(s1, "%s.%s", __msg2_blditree(__xs, __inst_ptr),
4686       np->nsym->synam);
4687 
4688      __tr_msg("## wire %s recording %s (line %s) reference event at %s\n", s1,
4689       __to_tcnam(__xs, tcp->tchktyp),
4690       __bld_lineloc(s2, tcp->tcsym->syfnam_ind, tcp->tcsym->sylin_cnt),
4691       __to_timstr(__xs2, &__simtime));
4692 
4693       bld_srcfilter_ref(s2, FALSE, tcp->startedge, tcp->startcondx);
4694       if (strcmp(s2, "") != 0) __tr_msg("   %s\n", s2);
4695     }
4696    break;
4697   case NPCHG_TCCHK:
4698    chktcp = npp->elnpp.echktchgp;
4699    tchgp = chktcp->startchgp;
4700    tcp = tchgp->chgu.chgtcp;
4701    /* this returns F if condition or edge does not match */
4702    if (!filter_bitchange(new_eval, chktcp->chkoldval, tcp->chkedge,
4703     tcp->chkcondx)) break;
4704 
4705    reftim = tchgp->lastchg[ii];
4706    /* filter out initialize changes - need real change for timing check */
4707    if (reftim == 0ULL)
4708     {
4709      /* first during run change for period recorded but no violation */
4710      if (tcp->tchktyp == TCHK_PERIOD) tchgp->lastchg[ii] = __simtime;
4711      break;
4712     }
4713    /* also if already on list for now, do not add again */
4714    chgtim = chktcp->chklastchg[ii];
4715    if (chgtim != __simtime)
4716     {
4717      add_tchk_chged(chktcp);
4718      /* possibly better to only record for "record before check" case */
4719      chktcp->chklastchg[ii] = __simtime;
4720 
4721      if (__debug_flg && __ev_tracing)
4722       {
4723        char s1[RECLEN], s2[RECLEN];
4724 
4725        if (npp->npproctyp != NP_PROC_INMOD) bld_xmrsrc_ref(s1, np);
4726        else sprintf(s1, "%s.%s", __msg2_blditree(__xs, __inst_ptr),
4727         np->nsym->synam);
4728 
4729        __tr_msg("## wire %s recording %s (line %s) data event at %s\n", s1,
4730         __to_tcnam(__xs, tcp->tchktyp),
4731         __bld_lineloc(s2, tcp->tcsym->syfnam_ind, tcp->tcsym->sylin_cnt),
4732         __to_timstr(__xs2, &__simtime));
4733 
4734         bld_srcfilter_ref(s2, FALSE, tcp->chkedge, tcp->chkcondx);
4735         if (strcmp(s2, "") != 0) __tr_msg("   %s\n", s2);
4736       }
4737      /* if repeated edge during same time - use 1st of this time as ref. */
4738     }
4739    /* SJM 10/10/04 - was wrongly setting ref even change time when repeated */
4740    /* check event changes during same time */
4741    break;
4742   case NPCHG_PTHSRC:
4743    tchgp = npp->elnpp.etchgp;
4744    newpthp = tchgp->chgu.chgpthp;
4745    /* special case code if no path edge or cond - also always record ifnone */
4746    /* because must be simple path */
4747    if (newpthp->pth_ifnone || (newpthp->pthedge == NOEDGE
4748     && newpthp->pthcondx == NULL))
4749     {
4750      ld_scalval_(&av, &bv, tchgp->oldval);
4751      old_eval = (av | (bv << 1));
4752      if (new_eval == old_eval) break;
4753      /* always save new value to old because value changed */
4754      st2_scalval_(tchgp->oldval, new_eval);
4755     }
4756    else
4757     {
4758      if (!filter_bitchange(new_eval, tchgp->oldval, newpthp->pthedge,
4759       newpthp->pthcondx)) break;
4760     }
4761    tchgp->lastchg[ii] = __simtime;
4762    if ((__debug_flg && __ev_tracing) || __pth_tracing)
4763     {
4764      char s1[RECLEN], s2[RECLEN];
4765 
4766      if (npp->npproctyp != NP_PROC_INMOD) bld_xmrsrc_ref(s1, np);
4767      else sprintf(s1, "%s.%s", __msg2_blditree(__xs, __inst_ptr),
4768       np->nsym->synam);
4769 
4770      /* notice cannot identify by delay since do not know old/new value */
4771      __tr_msg("## wire %s recording path (line %s) source change at %s\n", s1,
4772       __bld_lineloc(s2, newpthp->pthsym->syfnam_ind,
4773       newpthp->pthsym->sylin_cnt), __to_timstr(__xs, &__simtime));
4774 
4775      bld_srcfilter_ref(s2, newpthp->pth_ifnone, newpthp->pthedge,
4776       newpthp->pthcondx);
4777      if (strcmp(s2, "") != 0) __tr_msg("   %s\n", s2);
4778     }
4779    break;
4780   default: __case_terr(__FILE__, __LINE__);
4781  }
4782 }
4783 
4784 /*
4785  * build xmr source net instance reference
4786  */
bld_xmrsrc_ref(char * s,struct net_t * np)4787 static void bld_xmrsrc_ref(char *s, struct net_t *np)
4788 {
4789  sprintf(s, "%s.%s (xmr from %s)",
4790   __msg2_blditree(__xs, __itstk[__itspi - 1]), np->nsym->synam,
4791   __msg2_blditree(__xs2, __inst_ptr));
4792 }
4793 
4794 /*
4795  * build path source filter (ifnone, or edge and/or condition)
4796  */
bld_srcfilter_ref(char * s,word32 pthifnone,word32 cedge,struct expr_t * cxp)4797 static void bld_srcfilter_ref(char *s, word32 pthifnone, word32 cedge,
4798  struct expr_t *cxp)
4799 {
4800  int32 has_edge;
4801  struct xstk_t *xsp;
4802  char s1[RECLEN], s2[RECLEN], s3[RECLEN];
4803 
4804  has_edge = FALSE;
4805  strcpy(s, "");
4806  if (pthifnone) strcpy(s, "CONDITION: ifnone)");
4807  else
4808   {
4809    if (cedge != NOEDGE)
4810     {
4811      sprintf(s1, "EDGE: %s", __to_edgenam(s2, cedge));
4812      strcpy(s, s1);
4813      has_edge = TRUE;
4814     }
4815    if (cxp != NULL)
4816     {
4817      xsp = __eval_xpr(cxp);
4818      sprintf(s1, "CONDITION: %s TRUE value %s", __msgexpr_tostr(s2, cxp),
4819       __regab_tostr(s3, xsp->ap, xsp->bp, xsp->xslen, BHEX, FALSE));
4820      /* SJM 08/30/99 - for edge trace was not popping stack */
4821      __pop_xstk();
4822      if (has_edge) strcat(s, ", and ");
4823      strcat(s, s1);
4824     }
4825   }
4826 }
4827 
4828 /*
4829  * return T if bit changed (must pass all filters to change
4830  * because new edge value already computed (in up), need down itree here
4831  */
filter_bitchange(register word32 new_eval,register byte * oldbp,register word32 signat,struct expr_t * condx)4832 static int32 filter_bitchange(register word32 new_eval, register byte *oldbp,
4833  register word32 signat, struct expr_t *condx)
4834 {
4835  register word32 old_eval;
4836  word32 av, bv;
4837  int32 epair;
4838  struct xstk_t *xsp;
4839 
4840  ld_scalval_(&av, &bv, oldbp);
4841  old_eval = (av | (bv << 1));
4842 
4843  /* first filter: this bit did not change */
4844  if (new_eval == old_eval) return(FALSE);
4845  /* always save new value to old */
4846  st2_scalval_(oldbp, new_eval);
4847 
4848  /* second filter if has edge - only change if matching edge */
4849  if (signat != 0)
4850   {
4851    /* build edge 4 bit index */
4852    epair = __epair_tab[new_eval | (old_eval << 2)];
4853    /* if any edge table bit is 1, then found edge */
4854    if ((signat & epair) == 0) return(FALSE);
4855   }
4856  /* third filter &&& cond or sdpd or edge sensitive path */
4857  if (condx != NULL)
4858   {
4859    /* only eliminate if evaluates to 0 - x/z condition match */
4860    xsp = __eval2_xpr(condx);
4861    /* timing check condition expressions must be 1 bit - use low */
4862    /* fastest to just always mask */
4863    /* LRM requires anything but explicit false (0) is T */
4864    /* for === operators never x/z (only 1 or 0) possible so always works */
4865    /* === illegal in SDPDs so never a problem */
4866    /* for nondeterministic x or z is always T on paths or tchks */
4867 
4868    if ((xsp->ap[0] & 1L) == 0L && (xsp->bp[0] & 1L) == 0L)
4869     { __pop_xstk(); return(FALSE); }
4870    __pop_xstk();
4871   }
4872  return(TRUE);
4873 }
4874 
4875 /*
4876  * add a timing check to end of now data change routines
4877  * for processing at end of time slot (required by semantics)
4878  */
add_tchk_chged(struct chktchg_t * chkchgp)4879 static void add_tchk_chged(struct chktchg_t *chkchgp)
4880 {
4881  struct tc_pendlst_t *tcpendp;
4882 
4883  /* get a list element from somewhere */
4884  if (__tcpendfreelst == NULL)
4885   tcpendp = (struct tc_pendlst_t *) __my_malloc(sizeof(struct tc_pendlst_t));
4886  else
4887   {
4888    tcpendp = __tcpendfreelst;
4889    __tcpendfreelst = __tcpendfreelst->tc_plnxt;
4890   }
4891  tcpendp->tc_chkchgp = chkchgp;
4892  tcpendp->tc_itp = __inst_ptr;
4893  tcpendp->tc_plnxt = NULL;
4894 
4895  /* link on end since need batch movement of all to free list */
4896  if (__tcpendlst_end != NULL)
4897   { __tcpendlst_end->tc_plnxt = tcpendp; __tcpendlst_end = tcpendp; }
4898  else
4899   {
4900    __tcpendlst_hdr = __tcpendlst_end = tcpendp;
4901    __slotend_action |= SE_TCHK_VIOLATION;
4902   }
4903 }
4904 
4905 /*
4906  * routine to process all timing check violations at end of time slot
4907  * LOOKATME - this is maybe wrong because of recording of data event order?
4908  */
process_all_tchk_violations(void)4909 static void process_all_tchk_violations(void)
4910 {
4911  register struct tc_pendlst_t *tcpendp;
4912  word64 diff, lim1, lim2, reftim;
4913  struct tchk_t *tcp, *tcp2;
4914  struct chktchg_t *chktcp;
4915  struct tchg_t *chgp;
4916  char s1[RECLEN], s2[RECLEN];
4917 
4918  for (tcpendp = __tcpendlst_hdr; tcpendp != NULL; tcpendp = tcpendp->tc_plnxt)
4919   {
4920    __push_itstk(tcpendp->tc_itp);
4921    chktcp = tcpendp->tc_chkchgp;
4922    chgp = chktcp->startchgp;
4923    tcp = chgp->chgu.chgtcp;
4924    reftim = chgp->lastchg[__inum];
4925    diff = __simtime - reftim;
4926    /* notice no edge globals set but error before here if not simple delay */
4927    if (!tcp->tc_supofsuphld && !tcp->tc_recofrecrem)
4928     __get_del(&lim1, tcp->tclim_du, tcp->tc_delrep);
4929    lim2 = 0ULL;
4930 
4931    switch ((byte) tcp->tchktyp) {
4932     case TCHK_SETUP:
4933      if (tcp->tc_supofsuphld)
4934       {
4935        /* added setup of setup hold needs limit from 1st lim of setuphold */
4936        /* because must be able to change both delays during sim */
4937        tcp2 = (struct tchk_t *) tcp->tclim_du.pdels;
4938        __get_del(&lim1, tcp2->tclim_du, tcp2->tc_delrep);
4939       }
4940      /* for setup if simultaneous change no violation */
4941      /* setup of setuphold also here with reversed for setup conns/lim1 */
4942      if (diff >= lim1 || diff == 0ULL) break;
4943 
4944 emit_msg:
4945      bld_tchk_srcdump(__xs, tcp, &reftim, &__simtime, &lim1, &lim2);
4946      __gfwarn(566, tcp->tcsym->syfnam_ind, tcp->tcsym->sylin_cnt,
4947       "timing violation in %s (diff. %s)\n %s",
4948       __msg2_blditree(s1, __inst_ptr), __to_timstr(s2, &diff), __xs);
4949      /* toggle notify reg if present */
4950      if (tcp->ntfy_np != NULL) process_notify(tcp->ntfy_np);
4951      if (__have_vpi_actions) __vpi_tchkerr_trycall(tcp, __inst_ptr);
4952      break;
4953     case TCHK_SETUPHOLD:
4954      /* this is hold part of setup hold */
4955      __get_del(&lim2, tcp->tclim2_du, tcp->tc_delrep2);
4956      /* 2nd limit is hold part */
4957      lim1 = lim2;
4958      /* AIV 09/15/04 - difference of 0 must not cause check - see hold */
4959      if (diff < lim1 && diff != 0ULL) goto emit_msg;
4960      break;
4961     case TCHK_HOLD:
4962      /* AIV 09/15/04 - difference of 0 must not cause check */
4963      if (diff < lim1 && diff != 0ULL) goto emit_msg;
4964      break;
4965     case TCHK_WIDTH:
4966      /* opposite edge data event less than limit after 1st edge ref., err */
4967      /* if time less than threshold, ignore very narrow pulses */
4968      /* if no threshold, will be set to 0, if same as threshold still err */
4969      if (tcp->tc_haslim2) __get_del(&lim2, tcp->tclim2_du, tcp->tc_delrep2);
4970      else lim2 = 0ULL;
4971 
4972      /* ignore pulse width (ok) if less than lim2 (threshold) */
4973      /* ? think LRM says if occur simultaneously no change (threshhold 0) */
4974      if (diff < lim1 && diff >= lim2) goto emit_msg;
4975      break;
4976     case TCHK_PERIOD:
4977      /* same edge data event less than limit after 1st edge ref. */
4978      /* period error if clock period too narrow, no threshold */
4979      /* no separate reference event */
4980      /* must set reference event to now even if no violation */
4981      chgp->lastchg[__inum] = __simtime;
4982 
4983      /* notice if edge repeated in time slot, it is timing violation */
4984      if (diff < lim1) goto emit_msg;
4985      break;
4986     case TCHK_SKEW:
4987      /* if data event (2nd) more than limit after ref. 1st => err */
4988      /* skew error if date event too long after reference event */
4989      /* i.e. too much skew (lateness) of arriving signal */
4990      /* SJM 04/13/04 - if backward should be opposite of PERIOD above */
4991      if (diff > lim1) goto emit_msg;
4992      break;
4993     case TCHK_RECREM:
4994      /* this is removal part of recrem */
4995      __get_del(&lim2, tcp->tclim2_du, tcp->tc_delrep2);
4996      /* 2nd limit is hold part */
4997      lim1 = lim2;
4998      if (diff < lim1 && diff != 0ULL) goto emit_msg;
4999      break;
5000     case TCHK_RECOVERY:
5001      /* SJM 01/16/04 - terminals reversed for rec part of recrem */
5002      if (tcp->tc_recofrecrem)
5003       {
5004        /* added setup of setup hold needs limit from 1st lim of setuphold */
5005        /* because must be able to change both delays during sim */
5006        tcp2 = (struct tchk_t *) tcp->tclim_du.pdels;
5007        __get_del(&lim1, tcp2->tclim_du, tcp2->tc_delrep);
5008       }
5009 
5010      /* if data event (2nd clock) occurs too soon after 1st (clear edge) */
5011      /* recovery like hold but error if reference event not edge */
5012      /* recovery err if clock happens too soon after clear or preset edge */
5013      if (diff < lim1 && diff != 0ULL) goto emit_msg;
5014      break;
5015     case TCHK_REMOVAL:
5016      /* if data event (2nd clock) does not occur soon enough after 1st */
5017      /* (clear edge) - removal like setup but error if reference event not */
5018      /* edge - removal err if clock happens too soon after clear or preset */
5019      /* edge */
5020      /* AIV 07/09/04 - removal test was reversed - was > but needs to be < */
5021      if (diff < lim1 && diff != 0ULL) goto emit_msg;
5022      break;
5023     default: __case_terr(__FILE__, __LINE__);
5024    }
5025    __pop_itstk();
5026   }
5027  /* must move all processed to front of free list */
5028  __tcpendlst_end->tc_plnxt = __tcpendfreelst;
5029  __tcpendfreelst = __tcpendlst_hdr;
5030  __tcpendlst_hdr = __tcpendlst_end = NULL;
5031 }
5032 
5033 /* LOOKATME - to match OVISIM x goes to 1 not 0 */
5034 static word32 ntfy_toggle_tab[] = {1, 0, 2, 1};
5035 
5036 /*
5037  * process notify
5038  */
process_notify(struct net_t * np)5039 static void process_notify(struct net_t *np)
5040 {
5041  struct xstk_t *xsp;
5042  word32 val;
5043 
5044  push_xstk_(xsp, np->nwid);
5045  __ld_wire_val(xsp->ap, xsp->bp, np);
5046  /* DBG remove */
5047  if (xsp->xslen != 1) __misc_terr(__FILE__, __LINE__);
5048  /* --- */
5049  val = xsp->ap[0] | (xsp->bp[0] << 1);
5050  val = ntfy_toggle_tab[val];
5051  xsp->ap[0] = val & 1L;
5052  xsp->bp[0] = (val >> 1) & 1L;
5053  __chg_st_val(np, xsp->ap, xsp->bp);
5054  __pop_xstk();
5055 }
5056 
5057 /*
5058  * build a timing check source dump string with constants
5059  *
5060  * notice for setuphold (hold part) lim1 is correct lim2
5061  */
bld_tchk_srcdump(char * s,struct tchk_t * tcp,word64 * tim1,word64 * tim2,word64 * lim1,word64 * lim2)5062 static char *bld_tchk_srcdump(char *s, struct tchk_t *tcp, word64 *tim1,
5063  word64 *tim2, word64 *lim1, word64 *lim2)
5064 {
5065  int32 nd_rpar;
5066  char s1[RECLEN];
5067 
5068  __cur_sofs = 0;
5069  /* indicate whether setup or hold */
5070  if (tcp->tchktyp == TCHK_SETUPHOLD) __adds("hold(of setuphold)");
5071  else if (tcp->tc_supofsuphld) __adds("setup(of setuphold)");
5072  else if (tcp->tchktyp == TCHK_RECREM) __adds("removal(of recrem)");
5073  else if (tcp->tc_recofrecrem) __adds("recovery(of recrem)");
5074  else __adds(__to_tcnam(s1, tcp->tchktyp));
5075 
5076  __adds("(");
5077  if (tcp->startedge != NOEDGE || tcp->startcondx != NULL)
5078   { addch_('('); nd_rpar = TRUE; }
5079  else nd_rpar = FALSE;
5080  if (tcp->startedge != NOEDGE)
5081   { __adds(__to_edgenam(s1, tcp->startedge)); addch_(' '); }
5082  __adds(__msgexpr_tostr(s1, tcp->startxp));
5083  if (tcp->startcondx != NULL)
5084   { __adds(" &&& "); __adds(__msgexpr_tostr(s1, tcp->startcondx));  }
5085  if (nd_rpar) addch_(')');
5086  addch_(':');
5087  __adds(__to_timstr(s1, tim1));
5088 
5089  __adds(", ");
5090  if (tcp->chkedge != NOEDGE || tcp->chkcondx != NULL)
5091   { addch_('('); nd_rpar = TRUE; }
5092  else nd_rpar = FALSE;
5093  if (tcp->chkedge != NOEDGE)
5094   { __adds(__to_edgenam(s1, tcp->chkedge)); addch_(' '); }
5095  __adds(__msgexpr_tostr(s1, tcp->chkxp));
5096  if (tcp->chkcondx != NULL)
5097   { __adds(" &&& "); __adds(__msgexpr_tostr(s1, tcp->chkcondx));  }
5098  if (nd_rpar) addch_(')');
5099  addch_(':');
5100  __adds(__to_timstr(s1, tim2));
5101 
5102  __adds(", ");
5103  __adds(__to_timstr(s1, lim1));
5104 
5105  if ((tcp->tchktyp == TCHK_WIDTH || tcp->tchktyp == TCHK_PERIOD)
5106   && *lim2 != 0ULL)
5107   { __adds(", "); __adds(__to_timstr(s1, lim2)); }
5108  __adds(");");
5109  __trunc_exprline(MSGTRUNCLEN, FALSE);
5110  strcpy(s, __exprline);
5111  __cur_sofs = 0;
5112  return(s);
5113 }
5114 
5115 /*
5116  * ROUTINES TO INITIALIZE BEFORE START OF SIMULATION
5117  * TRICKY BEGINNING OF TIME 0 PROPOGATION HERE
5118  */
5119 
5120 /*
5121  * initialization just before simulation start
5122  * need a dummy thread for functions on rhs of contas
5123  *
5124  * notice can execute statements from called conta rhs functions in here
5125  */
__init_sim(void)5126 extern void __init_sim(void)
5127 {
5128  char *sav_fnam;
5129 
5130  /* this is never called for resets, so initialize to no resets */
5131  __reset_count = 0;
5132  /* just set this to some value - task exec. always sets again */
5133  __reset_value = 0;
5134 
5135  init_stime();
5136  sav_fnam = __in_fils[0];
5137  __in_fils[0] = __pv_stralloc("**initialize none**");
5138 
5139  /* do not emit new message at time 0 since initialize time */
5140  __last_trtime = 0ULL;
5141  __last_evtrtime = 0ULL;
5142 
5143  __suspended_thd = NULL;
5144  __suspended_itp = NULL;
5145  __cur_thd = NULL;
5146  /* current inst. stack needs nil on bottom for debugging and must be empty */
5147  /* DBG remove -- */
5148  if (__itspi != -1) __misc_terr(__FILE__, __LINE__);
5149  if (__inst_ptr != NULL) __misc_terr(__FILE__, __LINE__);
5150  /* --- */
5151 
5152  /* must leave instance stack exactly as is - cannot initialize */
5153  init_wires();
5154  __pv_stlevel = 0;
5155 
5156  /* initialize dumpvars state */
5157  __dv_calltime = 0ULL;
5158  __dv_seen = FALSE;
5159  __dv_state = DVST_NOTSETUP;
5160  __dv_dumplimit_size = 0;
5161  __dv_chgnethdr = NULL;
5162  __dv_netfreelst = NULL;
5163  __dv_hdr = __dv_end = NULL;
5164  __dv_isall_form = FALSE;
5165 
5166  __in_fils[0] = sav_fnam;
5167  /* debugger source files go through last library file */
5168  __last_srcf = __last_lbf;
5169  /* putting any $input files on end since last_inf only needed for $input */
5170  /* from now on */
5171  /* resetting does not effect this */
5172  __last_inf = __last_lbf;
5173 
5174  /* last step is to setup interactive environment */
5175  /* needed since interactive setup stuff can be in source */
5176  __init_interactive();
5177  if (__slotend_action != 0) __misc_terr(__FILE__, __LINE__);
5178  __slotend_action = 0;
5179  __run_state = SS_SIM;
5180 }
5181 
5182 /*
5183  * initial simulation time and variables
5184  */
init_stime(void)5185 static void init_stime(void)
5186 {
5187  register int32 i;
5188  struct telhdr_t *telp;
5189 
5190  /* initialize the simulation realloced event table */
5191  /* because fibronacci growth start with small value */
5192 
5193  /* notice for now fixed size timing wheel */
5194  /* initialize timing wheel headers to no events but need place holder */
5195  /* allocate timing wheel - allow variable sizing - should adjust from ts */
5196  __twhsize = TWHINITSIZE;
5197 
5198  /* need to allocate extra fence */
5199  __twheel = (struct telhdr_t **)
5200   __my_malloc((__twhsize + 1) *sizeof(struct telhdr_t *));
5201 
5202  /* include extra fence in loop */
5203  for (i = 0; i <= __twhsize; i++)
5204   {
5205    __twheel[i] = telp = (struct telhdr_t *)
5206     __my_malloc(sizeof(struct telhdr_t));
5207    telp->te_hdri = telp->te_endi = -1;
5208    telp->num_events = 0;
5209   }
5210  /* this is fence */
5211  __twheel[__twhsize]->num_events = -1;
5212  /* just need some good value here */
5213  __twheel[__twhsize]->te_hdri = -1;
5214 
5215  __simtime = 0ULL;
5216  __cur_te_hdri = __cur_te_endi = -1;
5217  __p0_te_hdri = __p0_te_endi = -1;
5218  /* SJM 07/05/05 - also initialize non block current time after pnd0 queue */
5219  __nb_te_hdri = __nb_te_endi = -1;
5220 
5221  __tedpfreelst = NULL;
5222  __teputvfreelst = NULL;
5223  /* init overflow q */
5224  __topi = 0;
5225  __btqroot = NULL;
5226 
5227  /* whetime is end of wheel time */
5228  /* works because __twsize never bigger than 2*31 */
5229  __whetime = (word64) (__twhsize - 1);
5230  __num_twhevents = 0;
5231  __num_ovflqevents = 0;
5232  __num_proc_tevents = __num_cancel_tevents = 0;
5233  __inertial_cancels = __newval_rescheds = 0;
5234  __proc_thrd_tevents = 0;
5235  __num_netchges = 0;
5236  __num_switch_vtxs_processed = 0;
5237  __immed_assigns = 0;
5238  __strobe_hdr = __strobe_end = __strb_freelst = NULL;
5239  __monit_active = TRUE;
5240  __monit_dcehdr = NULL;
5241  __fmon_hdr = __fmon_end = NULL;
5242  __fmonse_hdr = __fmonse_end = __fmse_freelst = NULL;
5243  __nchg_futend = __nchg_futhdr = __nchgfreelst = NULL;
5244  /* SJM 08/16/03 - now need to start with lhs changed off */
5245  __lhs_changed = FALSE;
5246  __tcpendlst_end = __tcpendlst_hdr = __tcpendfreelst = NULL;
5247  __dltevfreelst = NULL;
5248  __cur_thd = NULL;
5249  /* tf one way pending event free list */
5250  __ltevfreelst = NULL;
5251  __wrkevtab = NULL;
5252  __last_wevti = -1;
5253  __size_wrkevtab = 0;
5254 }
5255 
5256 /*
5257  * after debugger :rerun command - do initialization
5258  */
__reinit_sim(void)5259 extern void __reinit_sim(void)
5260 {
5261  char *sav_fnam;
5262 
5263  /* another reset - needed first in case init code uses $reset_count */
5264  __reset_count++;
5265 
5266  /* for catching problems with incorrectly reset dces */
5267  /* DBG remove ---
5268  chk_schd_dces();
5269  --- */
5270 
5271  reinit_stime();
5272 
5273  sav_fnam = __in_fils[0];
5274  __in_fils[0] = __pv_stralloc("**initialize none**");
5275 
5276  /* no time 0 messages */
5277  __last_trtime = 0ULL;
5278  __last_evtrtime = 0ULL;
5279 
5280  /* current inst. stack needs nil on bottom for debugging */
5281  __cur_thd = NULL;
5282  __suspended_thd = NULL;
5283  __suspended_itp = NULL;
5284  /* must empty stack since may have been called from running code */
5285  __itspi = -1;
5286  __itstk[0] = NULL;
5287  __inst_ptr = NULL;
5288  __inst_mod = NULL;
5289 
5290  /* must leave instance stack exactly as is - cannot initialize */
5291  init_wires();
5292  __pv_stlevel = 0;
5293 
5294  /* reinitialize dumpvars state */
5295  __dv_calltime = 0ULL;
5296  __dv_seen = FALSE;
5297  __dv_state = DVST_NOTSETUP;
5298  __dv_dumplimit_size = 0;
5299  __dv_chgnethdr = NULL;
5300  __dv_hdr = __dv_end = NULL;
5301  __dv_isall_form = FALSE;
5302 
5303  /* free and maybe close open command file - for reset will use start cmd_s */
5304  if (__cmd_s != NULL)
5305   {
5306    __my_fclose(__cmd_s);
5307    __cmd_s = NULL;
5308    __my_free(__cmd_fnam, strlen(__cmd_fnam) + 1);
5309    __cmd_fnam = NULL;
5310   }
5311 
5312  __in_fils[0] = sav_fnam;
5313 
5314  /* interactive environment must be left as is */
5315  __slotend_action = 0;
5316 
5317  /* things initialized in pv that need to be re-initialized */
5318  __next_dvnum = 0;
5319  /* reset interactive run state */
5320  __pending_enter_iact = FALSE;
5321  __iact_reason = IAER_UNKN;
5322  /* this will cause any -i file to be read on first iact entry */
5323  __ia_entered = FALSE;
5324  __cur_sofs = 0;
5325  __xspi = __itspi = __fcspi = -1;
5326  __inst_ptr = NULL;
5327  /* reinit $scope must start at first top level module */
5328  __scope_ptr = __it_roots[0];
5329  __scope_tskp = NULL;
5330  __run_state = SS_SIM;
5331 }
5332 
5333 /*
5334  * reinitial simulation time and variables
5335  */
reinit_stime(void)5336 static void reinit_stime(void)
5337 {
5338  __simtime = 0ULL;
5339  __cur_te_hdri = __cur_te_endi = -1;
5340  __p0_te_hdri = __p0_te_endi = -1;
5341  /* SJM 07/05/05 - also initialize non block current time after pnd0 queue */
5342  __nb_te_hdri = __nb_te_endi = -1;
5343 
5344  /* works because __twsize never bigger than 2*31 */
5345  __whetime = (word64) (__twhsize - 1);
5346  __num_twhevents = 0;
5347  __num_ovflqevents = 0;
5348  __num_proc_tevents = __num_cancel_tevents = 0;
5349  __inertial_cancels = __newval_rescheds = 0;
5350  __proc_thrd_tevents = 0;
5351  __num_netchges = 0;
5352  __immed_assigns = 0;
5353  __strobe_hdr = __strobe_end = NULL;
5354  __monit_active = TRUE;
5355  __monit_dcehdr = NULL;
5356  __fmon_hdr = __fmon_end = NULL;
5357  /* here leave the free list to reuse storage from there */
5358  __fmonse_hdr = __fmonse_end = NULL;
5359 
5360  /* SJM 08/16/03 - now need to start with lhs changed off */
5361  __lhs_changed = FALSE;
5362 
5363  /* notice must leave free lists - will begin by allocating from there */
5364  __nchg_futend = __nchg_futhdr = NULL;
5365  __tcpendlst_end = __tcpendlst_hdr = NULL;
5366  __cur_thd = NULL;
5367 
5368  /* must leave tevtab timing queue - free added o free list */
5369  /* num used stays same */
5370 }
5371 
5372 /*
5373  * initialize by scheduling a wire change for every wire driver
5374  * needs empty timing wheel header for delay 0 scheduling
5375  *
5376  * this needs to be run with all wire delays disabled
5377  * no PLI interaction here and run in SS_RESET or SS_LOAD run state
5378  */
init_wires(void)5379 static void init_wires(void)
5380 {
5381  int32 ii;
5382 
5383  __wire_init = TRUE;
5384  /* go through list of 1 inst. corresponding to each top level module */
5385  /* ignore all delays in propagating from lhs's to rhs's */
5386  __nchg_futend = __nchg_futhdr = NULL;
5387  __initalw_thrd_hdr = NULL;
5388 
5389  /* SJM - 05/24/00 - must not process var changes until 0 normal #0 pt. */
5390  /* if (__nchg_futhdr != NULL) process_all_netchgs(); */
5391 
5392  for (ii = 0; ii < __numtopm; ii++)
5393   {
5394    init_itinsts(__it_roots[ii]);
5395    /* even though top modules can be linked by xmrs, do here can at worst */
5396    /* cause a few extra events to be processed from xmrs */
5397    /* SJM - 05/24/00 - must not process var changes until 0 normal #0 pt. */
5398    /* if (__nchg_futhdr != NULL) process_all_netchgs(); */
5399   }
5400 
5401  /* SJM 04/11/01 - initializing tran channels after drivers propagated */
5402  /* hard drivers as possible have changed */
5403  __init_all_trchans();
5404 
5405  __wire_init = FALSE;
5406  if (__ev_tracing)
5407   __tr_msg("\n>>>> wire initialization complete <<<<<\n");
5408 }
5409 
5410 /*
5411  * initialize all wires and threads in one itree instance
5412  * know that when storage for all wires allocated, also initialized
5413  *
5414  * algorithm is to evaluate every cont. assign (including cross module ports)
5415  * and gate/udp and schedule wire change at some point
5416  * this adds all no delay to netchg list and all delays are scheduledo
5417  * with pnd0's going on pnd0 list
5418  *
5419  * also allocate and schedules all behavioral initial/always threads
5420  * notice behavior force/assign can only occur during time 0 or later
5421  */
init_itinsts(struct itree_t * up_itp)5422 static void init_itinsts(struct itree_t *up_itp)
5423 {
5424  register int32 gi, pbi;
5425  int32 cai, ii;
5426  struct conta_t *cap;
5427  struct mod_t *mdp;
5428  struct itree_t *itp;
5429 
5430  if (__debug_flg)
5431   {
5432    __dbg_msg("== initializing wires in %s\n", __msg2_blditree(__xs, up_itp));
5433   }
5434  /* on entry know unprocessed net change list empty */
5435  mdp = up_itp->itip->imsym->el.emdp;
5436  __push_itstk(up_itp);
5437  /* schedule all behavioral threads for this instance */
5438  init_sched_thd(mdp);
5439 
5440  /* evaluate and schedule all gates in instance */
5441  for (gi = 0; gi < mdp->mgnum; gi++) gate_initeval(&(mdp->mgates[gi]));
5442 
5443  /* and all contas */
5444  for (cap = mdp->mcas, cai = 0; cai < mdp->mcanum; cai++, cap++)
5445   {
5446    /* SJM 09/28/02 - need to initialize the PB separated contas */
5447    if (cap->ca_pb_sim)
5448     {
5449      /* SJM 08/08/03 - for per bit sim form, need 2nd arg master conta */
5450      for (pbi = 0; pbi < cap->lhsx->szu.xclen; pbi++)
5451       { conta_initeval(&(cap->pbcau.pbcaps[pbi]), cap); }
5452     }
5453    else conta_initeval(cap, cap);
5454   }
5455  __pop_itstk();
5456 
5457  for (ii = 0; ii < mdp->minum; ii++)
5458   {
5459    itp = &(up_itp->in_its[ii]);
5460    /* force downward cross port continuous assign for inputs and inouts */
5461    __init_instdownport_contas(up_itp, itp);
5462 
5463    /* process depth first down one instance */
5464    init_itinsts(itp);
5465 
5466    /* force upward from down (after its processed) output/inout contas */
5467    __init_instupport_contas(itp);
5468   }
5469 }
5470 
5471 /*
5472  * build thread and schedule time 0 event for each initial always block
5473  * just goes into time 0 current time event list as if time move from -1
5474  *
5475  * if initial/always statement is named block will have unnamed block
5476  * put around it by here
5477  *
5478  * this requires cur. itp to be set to current place in itree
5479  */
init_sched_thd(struct mod_t * mdp)5480 static void init_sched_thd(struct mod_t *mdp)
5481 {
5482  register struct ialst_t *ialp;
5483  struct thread_t *thp;
5484  i_tev_ndx tevpi;
5485  struct telhdr_t *tw0;
5486  struct st_t *stp, *stp2;
5487 
5488  /* each element in ia sts list is a possibly added unnamed begin block */
5489  /* each separate intial/always must be its own thread */
5490  /* because one blocking does not block others */
5491  for (ialp = mdp->ialst; ialp != NULL; ialp = ialp->ialnxt)
5492   {
5493    /* build the initial/always thread */
5494    /* build the event and allocate assoc. thread */
5495    alloc_tev_(tevpi, TE_THRD, __inst_ptr, __tim_zero);
5496    stp = ialp->iastp;
5497 
5498    thp = __alloc_thrd();
5499    thp->th_ialw = TRUE;
5500    __cur_thd = thp;
5501    thp->thenbl_sfnam_ind = ialp->ia_first_ifi;
5502    thp->thenbl_slin_cnt = ialp->ia_first_lini;
5503    /* caller must alloc any event type specific fields, cannot assume NULL */
5504    __tevtab[tevpi].tu.tethrd = thp;
5505    thp->thnxtstp = stp;
5506    thp->thpar = NULL;
5507    thp->th_itp = __inst_ptr;
5508    /* link on sequential list for rerun freeing */
5509    if (__initalw_thrd_hdr == NULL) __initalw_thrd_hdr = thp;
5510    else
5511     {
5512      thp->thright = __initalw_thrd_hdr;
5513      __initalw_thrd_hdr->thleft = thp;
5514      __initalw_thrd_hdr = thp;
5515     }
5516 
5517    /* this just causes all initial and always 1st statements to happen */
5518    /* at time 0 - know thnxtstp is just list of statements */
5519    if (__ev_tracing)
5520     {
5521      stp2 = __tevtab[tevpi].tu.tethrd->thnxtstp;
5522      if (stp2 == NULL)
5523       {
5524        __tr_msg(
5525         "-- adding initial machine code thread for init/always at %s\n",
5526         __bld_lineloc(__xs, ialp->ia_first_ifi, ialp->ia_first_lini));
5527       }
5528      else
5529       {
5530        __tr_msg("-- adding initial procedural start at statement %s\n",
5531         __bld_lineloc(__xs, stp2->stfnam_ind, stp2->stlin_cnt));
5532       }
5533     }
5534 
5535    tw0 = __twheel[0];
5536    if (tw0->te_hdri == -1) tw0->te_hdri = tw0->te_endi = tevpi;
5537    else { __tevtab[tw0->te_endi].tenxti = tevpi; tw0->te_endi = tevpi; }
5538    tw0->num_events += 1;
5539    __num_twhevents++;
5540    __cur_thd = NULL;
5541   }
5542 }
5543 
5544 /*
5545  * initialize gate by evaluating all inputs, changing wire if needed,
5546  * and if wire changed, propagate changes
5547  * called once for every gate in itree
5548  */
gate_initeval(struct gate_t * gp)5549 static void gate_initeval(struct gate_t *gp)
5550 {
5551  int32 i, gid;
5552 
5553  /* evaluate gate - even if no change assign (this stores state) */
5554  /* if input value same, nothing to do */
5555  switch ((byte) gp->g_class) {
5556   case GC_LOGIC: init_logic_gate(gp); break;
5557   case GC_UDP: init_udp(gp); break;
5558   case GC_BUFIF: init_bufif_gate(gp); break;
5559   case GC_MOS:
5560    chg_mos_instate(gp, 1);
5561    chg_mos_instate(gp, 2);
5562 
5563    gid = gp->gmsym->el.eprimp->gateid;
5564    /* note here input change routine and eval separate */
5565    /* eval always evals even if new and old input are the same */
5566    if (gid == G_NMOS) __eval_nmos_gate(gp->gstate.wp[__inum]);
5567    else if (gid == G_RNMOS) __eval_rnmos_gate(gp->gstate.wp[__inum]);
5568    else if (gid == G_PMOS) __eval_pmos_gate(gp->gstate.wp[__inum]);
5569    else if (gid == G_RPMOS) __eval_rpmos_gate(gp->gstate.wp[__inum]);
5570    else __case_terr(__FILE__, __LINE__);
5571    break;
5572   case GC_CMOS:
5573    chg_mos_instate(gp, 1);
5574    chg_mos_instate(gp, 2);
5575    chg_mos_instate(gp, 3);
5576    /* note here input change routine and eval separate */
5577    /* eval always evals even if new and old input are the same */
5578    __eval_cmos_gate(gp);
5579    break;
5580   case GC_PULL:
5581    /* each port is pull wire */
5582    /* this is needed since probably all drivers tristate at time 0 */
5583    /* but pull must start at time 0 */
5584    /* notice no output here so starts at 0 not 1 (normally 0 output) */
5585    for (i = 0; i < (int32) gp->gpnum; i++)
5586     __mdr_assign_or_sched(gp->gpins[i]);
5587    if (__debug_flg)
5588     {
5589      __dbg_msg("-- all connections of pull %s evalutated for initialization\n",
5590      to_evtronam(__xs, gp->gsym->synam, __inst_ptr,
5591       (struct task_t *) NULL));
5592     }
5593    return;
5594   case GC_TRANIF: init_tranif_gate(gp); return;
5595   /* nothing to do for trans */
5596   case GC_TRAN: return;
5597   default: __case_terr(__FILE__, __LINE__);
5598  }
5599 
5600  if (__debug_flg && __ev_tracing)
5601   {
5602    char s1[RECLEN];
5603 
5604    if (gp->g_class == GC_UDP) strcpy(s1, "udp"); else strcpy(s1, "gate");
5605    /* notice during wire init all wire delays off */
5606    __tr_msg("-- %s %s %s assign initialized to state:\n",
5607     gp->gmsym->synam, s1, to_evtronam(__xs, gp->gsym->synam, __inst_ptr,
5608      (struct task_t *) NULL));
5609    __tr_msg("   %s\n", __gstate_tostr(__xs, gp, FALSE));
5610   }
5611  /* must always immediately assign to wire */
5612  change_gate_outwire(gp);
5613 }
5614 
5615 /*
5616  * initialize udps
5617  * 1) evaluate all udp input exprs in case constant (update wide signature)
5618  * 2) if has initial value, set new gate value to initial and do not eval
5619  * 3) if no initial value, force evaluate to get new gate value
5620  * for sequential use combinatorial table
5621  *
5622  * for wide this updates signature to x that is then adjusted by
5623  * storing gate output that always happens during init.
5624  *
5625  * notice that for gpins, 0 is output and first input is 1
5626  * but values in upd state word32 are input 0 in low 2 bits, and high (maybe
5627  * output if sequential) in high 2 bits
5628  */
init_udp(struct gate_t * gp)5629 static void init_udp(struct gate_t *gp)
5630 {
5631  register int32 i;
5632  int32 nins;
5633  hword *hwp;
5634  word32 wide_ival, new_inputval, *wp;
5635  int32 out_chg;
5636  struct xstk_t *xsp;
5637  extern word32 __to_noztab[];
5638  extern word32 __to_uvaltab[];
5639 
5640  __cur_udp = gp->gmsym->el.eudpp;
5641  /* for level, this includes state */
5642  nins = __cur_udp->numins;
5643  if (!__cur_udp->u_wide)
5644   {
5645    /* here can ingore old state (if present) since just over-written */
5646    hwp = &(gp->gstate.hwp[__inum]);
5647    /* eval and store all inputs in case constant expr. */
5648    for (i = 0; i < nins; i++)
5649     {
5650      xsp = __eval_xpr(gp->gpins[i + 1]);
5651      new_inputval = __to_noztab[(xsp->ap[0] & 1L) | ((xsp->bp[0] & 1L) << 1)];
5652      __pop_xstk();
5653      /* know z will always cause new input value */
5654      /* change the input */
5655      hwp[0] &= ~(3L << (2*i));
5656      hwp[0] |= ((hword) new_inputval << (2*i));
5657      /* -- RELEASE remove ---
5658      if (__debug_flg)
5659       __dbg_msg("-- udp init after %dth input hwp=%lx\n", i, hwp[0]);
5660      -- */
5661     }
5662    /* -- RELEASE remove --
5663    if (__debug_flg)
5664     __dbg_msg("-- narrow before init eval: hwp=%lx\n", hwp[0]);
5665    -- */
5666   }
5667  else
5668   {
5669    /* in wide, case need 2nd running value index word32 */
5670    wp = &(gp->gstate.wp[2*__inum]);
5671    for (i = 0; i < nins; i++)
5672     {
5673      /* remove signature contribution from initialized value */
5674      wide_ival = __to_uvaltab[((wp[0] >> (2*i)) & 3L)];
5675      wp[1] -= wide_ival*__pow3tab[i];
5676 
5677      xsp = __eval_xpr(gp->gpins[i + 1]);
5678      /* think evaluate can be wide thing that must be truncated */
5679      new_inputval = __to_noztab[(xsp->ap[0] & 1L) | ((xsp->bp[0] & 1L) << 1)];
5680      __pop_xstk();
5681      wp[0] &= ~(3L << (2*i));
5682      wp[0] |= (new_inputval << (2*i));
5683 
5684      /* add in new input signature value */
5685      wide_ival = __to_uvaltab[new_inputval];
5686      wp[1] += wide_ival*__pow3tab[i];
5687     }
5688    /* -- RELEASE remove ---
5689    if (__debug_flg)
5690      __dbg_msg("-- wide before init eval: w0=%lx, w1=%lu\n", wp[0], wp[1]);
5691    -- */
5692   }
5693  /* know combinatorial will never have initial value */
5694  if (__cur_udp->ival == NO_VAL)
5695   {
5696    /* change old input 0 to force evaluation - new replace set wrong */
5697    if (!__cur_udp->u_wide)
5698     {
5699      hwp = &(gp->gstate.hwp[__inum]);
5700      new_inputval = (word32) (hwp[0] & 3L);
5701      if (new_inputval == 0) new_inputval = 3; else new_inputval = 0;
5702      hwp[0] &= ~(3L);
5703      hwp[0] |= (hword) new_inputval;
5704     }
5705    else
5706     {
5707      wp = &(gp->gstate.wp[2*__inum]);
5708      new_inputval = wp[0] & 3L;
5709      /* subtract out old signature contribution */
5710      wide_ival = __to_uvaltab[new_inputval];
5711      wp[1] -= wide_ival*__pow3tab[0];
5712 
5713      if (new_inputval == 0) new_inputval = 2; else new_inputval = 0;
5714      wp[0] &= ~(3L);
5715      wp[0] |= new_inputval;
5716 
5717      /* add in new input signature value */
5718      wide_ival = __to_uvaltab[new_inputval];
5719      wp[1] += wide_ival*__pow3tab[0];
5720     }
5721    /* this sets new gate value */
5722    __eval_udp(gp, 1, &out_chg, FALSE);
5723   }
5724  else __new_gateval = __cur_udp->ival;
5725  /* caller will store or schedule store into output connection */
5726 }
5727 
5728 /*
5729  * initialize logic gate by evaluating all inputs then forcing eval of
5730  * logic gate - not for buf and mos types gates
5731  */
init_logic_gate(struct gate_t * gp)5732 static void init_logic_gate(struct gate_t *gp)
5733 {
5734  register int32 i;
5735  int32 srep, nins;
5736  int32 out_chg;
5737  struct xstk_t *xsp;
5738 
5739  if (gp->gpnum > 16) srep = SR_VEC; else srep = SR_PVEC;
5740 
5741  /* tricky part must - make sure input 0 does not eval the same */
5742  /* simply invert b bit of input 0 result */
5743  nins = gp->gpnum - 1;
5744  for (i = 0; i < nins; i++)
5745   {
5746    xsp = __eval_xpr(gp->gpins[i + 1]);
5747    xsp->ap[0] &= 1L;
5748    xsp->bp[0] &= 1L;
5749    if (i == 0) xsp->bp[0] = (~xsp->bp[0]) & 1L;
5750    gate_st_bit(gp->gstate, (int32) gp->gpnum, i, srep, xsp->ap[0], xsp->bp[0]);
5751    __pop_xstk();
5752   }
5753  /* know reevaluating 1st input will result in new value so will eval. */
5754  __eval_logic_gate(gp, 1, &out_chg);
5755  /* wire assign in caller */
5756 }
5757 
5758 /*
5759  * initialize a bufif style gate
5760  * evaluate both inputs and change gate state for data to opposite
5761  *
5762  * storage: low 2 bits data in, next 2 control in - next 8 out strength
5763  * stored as half word
5764  */
init_bufif_gate(struct gate_t * gp)5765 static void init_bufif_gate(struct gate_t *gp)
5766 {
5767  int32 out_chg;
5768  hword *hwp;
5769  struct xstk_t *xsp;
5770 
5771  hwp = &(gp->gstate.hwp[__inum]);
5772  xsp = __eval_xpr(gp->gpins[1]);
5773  xsp->ap[0] &= 1L;
5774  xsp->bp[0] &= 1L;
5775  /* invert to force eval */
5776  xsp->bp[0] = (~xsp->bp[0]) & 1L;
5777  hwp[0] &= ~3L;
5778  hwp[0] |= ((hword) (xsp->ap[0] | (xsp->bp[0] << 1)));
5779  __pop_xstk();
5780 
5781  xsp = __eval_xpr(gp->gpins[2]);
5782  xsp->ap[0] &= 1L;
5783  xsp->bp[0] &= 1L;
5784  hwp[0] &= ~(3L << 2);
5785  hwp[0] |= ((hword) ((xsp->ap[0] | (xsp->bp[0] << 1)) << 2));
5786  __pop_xstk();
5787  /* eval. 1st input in gpins - index 1 */
5788  __eval_bufif_gate(gp, 1, &out_chg);
5789 }
5790 
5791 /*
5792  * initialize the state of a tranif gate by evaluating control input
5793  * here must evaluate input and store into state
5794  * for tran, do not need any initialization
5795  */
init_tranif_gate(struct gate_t * gp)5796 static void init_tranif_gate(struct gate_t *gp)
5797 {
5798  register word32 cval;
5799  int32 conducting, gateid, bi, wi;
5800  struct xstk_t *xsp;
5801 
5802  /* first initialize conducting state */
5803  xsp = __eval_xpr(gp->gpins[2]);
5804  conducting = (xsp->ap[0] & 1L) | ((xsp->bp[0] & 1L) << 1);
5805  if (conducting == 2) conducting = 3;
5806  __pop_xstk();
5807  gateid = gp->gmsym->el.eprimp->gateid;
5808  /* for if0s, 0 turns on (1), 1 of (0) */
5809  if (gateid == G_TRANIF0 || gateid == G_RTRANIF0)
5810   {
5811    if (conducting == 0) conducting = 1;
5812    else if (conducting == 1) conducting = 0;
5813   }
5814 
5815  /* immediate change to conducting state during initialization */
5816  bi = get_bofs_(2*__inum);
5817  wi = get_wofs_(2*__inum);
5818  cval = gp->gstate.wp[wi];
5819  cval &= ~(3L << bi);
5820  cval |= (conducting << bi);
5821  gp->gstate.wp[wi] = cval;
5822 
5823  if (__debug_flg && __ev_tracing)
5824   {
5825    __tr_msg("-- %s %s 3rd input evaluated - initial conducting: %s\n",
5826     gp->gmsym->synam, to_evtronam(__xs, gp->gsym->synam, __inst_ptr,
5827     (struct task_t *) NULL), (conducting == 1) ? "*ON*"
5828      : ((conducting == 0) ? "*OFF*" : "*UNKNOWN*"));
5829   }
5830 }
5831 
5832 /*
5833  * evaluate continous assign to initialize lhs
5834  * notice ignore conta delay here but use wire delay
5835  *
5836  * SJM 09/28/02 - for per bit rhs concat contas, caller passed decomposed PB
5837  */
conta_initeval(struct conta_t * cap,struct conta_t * mast_cap)5838 static void conta_initeval(struct conta_t *cap, struct conta_t *mast_cap)
5839 {
5840  int32 lhslen, orhslen;
5841  byte *sbp;
5842  struct xstk_t *xsp, *xsp2;
5843  struct expr_t *xp, *lhsxp;
5844  struct sy_t *syp;
5845 
5846  /* do not need to set lhs here before schedule changed */
5847  xp = cap->rhsx;
5848  lhsxp = cap->lhsx;
5849  /* if rhs normal function must call it in case of constant args */
5850  /* but $getpattern must just assign right width x's since index probably */
5851  /* out of range at this point */
5852  if (xp->optyp == FCALL)
5853   {
5854    syp = xp->lu.x->lu.sy;
5855    /* know getpat conta form never has rhsval wp or driver wp */
5856    /* rule is that $getpattern with unknown index is x's */
5857    if (syp->sytyp == SYM_SF && syp->el.esyftbp->syfnum == STN_GETPATTERN)
5858     {
5859      lhslen = lhsxp->szu.xclen;
5860      push_xstk_(xsp, lhslen);
5861      /* IN - getpattern with unknown index (like at init) return x value */
5862      one_allbits_(xsp->ap, lhslen);
5863      one_allbits_(xsp->bp, lhslen);
5864 
5865      /* know all getpat lhs wires fi == 1 and no wire delay and no stren */
5866      __exec_ca_concat(lhsxp, xsp->ap, xsp->bp, FALSE);
5867      __pop_xstk();
5868      return;
5869     }
5870   }
5871 
5872  lhslen = lhsxp->szu.xclen;
5873  xsp = __eval_xpr(xp);
5874  if (xsp->xslen != lhslen)
5875   {
5876    orhslen = xsp->xslen;
5877 
5878    /* SJM 09/29/03 - change to handle sign extension and separate types */
5879    if (xsp->xslen > lhslen) __narrow_sizchg(xsp, lhslen);
5880    else if (xsp->xslen < lhslen)
5881     {
5882      if (xp->has_sign) __sgn_xtnd_widen(xsp, lhslen);
5883      else __sizchg_widen(xsp, lhslen);
5884     }
5885    /* during initialization widen to x not 0 */
5886 
5887    /* SJM 05/19/04 - remove see below must widen rhs expr using 0's */
5888    /* --
5889    if (orhslen < xsp->xslen)
5890     {
5891      if (__wire_init) __fix_widened_toxs(xsp, orhslen);
5892     }
5893    -- */
5894   }
5895 
5896  /* fi == 1 and no delay contas have no driver state stored */
5897  if (cap->ca_drv_wp.wp != NULL)
5898   {
5899    /* SJM - 02/18/03 - remove - since above does same change so never exec */
5900    /* --- REMOVED
5901    if (xsp->xslen != lhslen)
5902     {
5903      orhslen = xsp->xslen;
5904 
5905      -* SJM 09/29/03 - change to handle sign extension and separate types *-
5906      if (xsp->xslen > lhslen) __narrow_sizchg(xsp, lhslen);
5907      else if (xsp->xslen < lhslen)
5908       {
5909        if (xp->has_sign) __sgn_xtnd_widen(xsp, lhslen);
5910        else __sizchg_widen(xsp, lhslen);
5911       }
5912 
5913      __fix_widened_toxs(xsp, orhslen);
5914     }
5915    -- */
5916    __st_perinst_val(cap->ca_drv_wp, lhslen, xsp->ap, xsp->bp);
5917   }
5918  if (__debug_flg && __ev_tracing)
5919   {
5920    char s1[RECLEN];
5921 
5922    /* notice even if delay wire, off during wire initialization */
5923    strcpy(s1, "assigned");
5924    __tr_msg("-- %s %s initial value %s\n", s1,
5925     __to_evtrcanam(__xs, mast_cap, __inst_ptr),
5926     __regab_tostr(__xs2, xsp->ap, xsp->bp, xsp->xslen, BHEX, FALSE));
5927   }
5928 
5929  /* notice multi-fi case assume new driving value already assigned */
5930  /* this will add any conta driving strength if needed from ca drv wp */
5931  if (lhsxp->x_multfi) __mdr_assign_or_sched(lhsxp);
5932  else
5933   {
5934    /* notice wire delays including specify paths off during wire init */
5935    if (lhsxp->x_stren)
5936     {
5937      push_xstk_(xsp2, 4*lhslen);
5938      sbp = (byte *) xsp2->ap;
5939      __st_standval(sbp, xsp, cap->ca_stval);
5940      if (lhsxp->optyp == LCB) __stren_exec_ca_concat(lhsxp, sbp, FALSE);
5941      /* SJM 03/30/99 - was storing value without strength added */
5942      else __exec_conta_assign(lhsxp, xsp2->ap, xsp2->bp, FALSE);
5943      __pop_xstk();
5944     }
5945    else
5946     {
5947      if (lhsxp->optyp == LCB)
5948       __exec_ca_concat(lhsxp, xsp->ap, xsp->bp, FALSE);
5949      else __exec_conta_assign(lhsxp, xsp->ap, xsp->bp, FALSE);
5950     }
5951   }
5952  __pop_xstk();
5953 }
5954 
5955 /*
5956  * MEDIUM LEVEL SCHEDULING MECHANISM ROUTINES
5957  */
5958 
5959 /*
5960  * insert event in timing wheel or overflow q (know event after now)
5961  */
__insert_event(register i_tev_ndx tevpi)5962 extern void __insert_event(register i_tev_ndx tevpi)
5963 {
5964  register int32 schtwi;
5965  register struct tev_t *tevp;
5966  register struct telhdr_t *telp;
5967  word64 schtim, t;
5968 
5969  tevp = &(__tevtab[tevpi]);
5970  schtim = tevp->etime;
5971  /* because of wrap around event up to __twhsize - 1 fit in timing wheel */
5972  /* notice borrow possible here - but index always fit in 1 word32 */
5973  t = schtim - __simtime;
5974  /* schtwi here is number of timing wheel slots to scheduled event */
5975  schtwi = (int32) (t & WORDMASK_ULL);
5976 
5977  /* if event would go in timing wheel if it were at 0, then goes in */
5978  /* but may need to wrap around */
5979  if (schtwi < __twhsize && t < 0x7fffffffULL)
5980   {
5981    /* wrap around table if needed */
5982    if ((schtwi += __cur_twi) >= __twhsize) schtwi %= __twhsize;
5983    /* ??? DBG add ---
5984    if (__debug_flg && __ev_tracing)
5985     {
5986      word64 t2;
5987      char s1[RECLEN], s2[RECLEN];
5988 
5989      t = (word64) (__twhsize - 1);
5990      t2 = __whetime - t;
5991      __tr_msg(
5992       ".. adding %s event to timing wheel based at %s for time %s (schtwi=%d, cur_twi=%d)\n",
5993       __to_tetyp(__xs, tevp->tetyp), __to_timstr(s1, &t2),
5994       __to_timstr(s2, &schtim), schtwi, __cur_twi);
5995     }
5996    --- */
5997 
5998    telp = __twheel[schtwi];
5999     /* know tevp next field is nil */
6000    if (telp->te_hdri == -1) telp->te_hdri = telp->te_endi = tevpi;
6001    else
6002     {
6003      if (tevp->vpi_onfront)
6004       { tevp->tenxti = telp->te_hdri; telp->te_hdri = tevpi; }
6005      else
6006       { __tevtab[telp->te_endi].tenxti = tevpi; telp->te_endi = tevpi; }
6007     }
6008    telp->num_events += 1;
6009    __num_twhevents++;
6010   }
6011  else add_ovfetim(schtim, tevpi, tevp);
6012 }
6013 
6014 /*
6015  * routine to allocate event - non macro for debugging
6016  * the b zero initializes all flags to off
6017  */
6018 /* DBG ??? remove --- */
__alloc_tev(int32 etyp,struct itree_t * itp,word64 absetime)6019 extern i_tev_ndx __alloc_tev(int32 etyp, struct itree_t *itp, word64 absetime)
6020 {
6021   register struct tev_t *tevp__;
6022   register i_tev_ndx tevpi;
6023 
6024   if (__tefreelsti != -1)
6025    { tevpi = __tefreelsti; __tefreelsti = __tevtab[__tefreelsti].tenxti; }
6026   else
6027    {
6028     if (++__numused_tevtab >= __size_tevtab) __grow_tevtab();
6029     tevpi = __numused_tevtab;
6030    }
6031   tevp__ = &(__tevtab[tevpi]);
6032   /* LOOKATME - maybe zeroing to init bit fields unportable */
6033   memset(tevp__, 0, sizeof(struct tev_t));
6034   tevp__->tetyp = etyp;
6035   tevp__->teitp = itp;
6036   tevp__->etime = absetime;
6037   tevp__->tenxti = -1;
6038   return(tevpi);
6039 }
6040 /*  --- */
6041 
6042 /*
6043  * grow the tev table by reallocating
6044  *
6045  * BEWARE - because of growth by reallocating tevp ptrs can only
6046  * be used as tmps between calls to alloc tev macro
6047  *
6048  * notice this increases table size when no free and next to used at
6049  * end of table, after grow caller increases num used value
6050  */
__grow_tevtab(void)6051 extern void __grow_tevtab(void)
6052 {
6053  int32 osize, nsize;
6054 
6055  osize = __size_tevtab*sizeof(struct tev_t);
6056  __size_tevtab += (__size_tevtab/2);
6057  nsize = __size_tevtab*sizeof(struct tev_t);
6058  __tevtab = (struct tev_t *) __my_realloc((char *) __tevtab, osize, nsize);
6059 
6060  /* DBG remove --- UNDO */
6061  if (__debug_flg)
6062   __dbg_msg("+++ fixed event table grew from %d bytes to %d\n", osize, nsize);
6063  /* --- */
6064 }
6065 
6066 /*
6067  * normally unused droutine for debugging
6068  */
chk_tev_list(register i_tev_ndx tevpi)6069 static void chk_tev_list(register i_tev_ndx tevpi)
6070 {
6071  struct tev_t *tevp;
6072 
6073  for (; tevpi != -1; tevpi = __tevtab[tevpi].tenxti)
6074   {
6075    tevp = &(__tevtab[tevpi]);
6076    if (tevp->tetyp < 1 || tevp->tetyp > 14) __misc_terr(__FILE__, __LINE__);
6077   }
6078 }
6079 
6080 /*
6081  * normally unused routine for checking pending scheduled dce events
6082  * UNUSED
6083  */
6084 /* ---
6085 static void chk_schd_dces(void)
6086 {
6087  register int32 ni;
6088  register struct mod_t *mdp;
6089  register struct task_t *tskp;
6090  struct net_t *np;
6091 
6092  for (mdp = __modhdr; mdp != NULL; mdp = mdp->mnxt)
6093   {
6094    for (ni = 0, np = &(mdp->mnets[0]); ni < mdp->mnnum; ni++, np++)
6095     {
6096      if (np->dcelst == NULL) continue;
6097      chk_1nschd_dce(np, mdp);
6098     }
6099    for (tskp = mdp->mtasks; tskp != NULL; tskp = tskp->tsknxt)
6100     {
6101      for (ni = 0, np = &(tskp->tsk_regs[0]); ni < tskp->trnum; ni++, np++)
6102       {
6103        if (np->dcelst == NULL) continue;
6104        chk_1nschd_dce(np, mdp);
6105       }
6106     }
6107   }
6108 }
6109 -- */
6110 
6111 /*
6112  * check one static scheduled dce
6113  * UNUSED
6114  */
6115 /* ---
6116 static void chk_1nschd_dce(struct net_t *np, struct mod_t *mdp)
6117 {
6118  register struct dcevnt_t *dcep;
6119  register int32 ii;
6120  i_tev_ndx tevpi;
6121  struct delctrl_t *dctp;
6122 
6123  for (dcep = np->dcelst; dcep != NULL; dcep = dcep->dcenxt)
6124   {
6125    if (dcep->dce_typ != DCE_RNG_INST && dcep->dce_typ != DCE_INST)
6126      continue;
6127    dctp = dcep->st_dctrl;
6128    if (dctp->dceschd_tevs == NULL)
6129     __cvsim_msg("*** net %s dcep no dceschd_tevs\n", np->nsym->synam);
6130    for (ii = 0; ii < mdp->flatinum; ii++)
6131     {
6132      if ((tevpi = dctp->dceschd_tevs[__inum]) != -1)
6133       __cvsim_msg("*** net %s inst num. %s dceschd_tevs index %d set\n",
6134        np->nsym->synam, ii, tevpi);
6135      if (__tevtab[tevpi].tetyp < 1 || __tevtab[tevpi].tetyp > 14)
6136       __misc_terr(__FILE__, __LINE__);
6137     }
6138   }
6139 }
6140 --- */
6141 
6142 /*
6143  * ROUTINES TO IMPLEMENT TIME FLOW
6144  */
6145 
move_to_time0(void)6146 static int32 move_to_time0(void)
6147 {
6148  register struct telhdr_t *twp;
6149 
6150  __simtime++;
6151  twp = __twheel[++__cur_twi];
6152  __cur_te_hdri = twp->te_hdri;
6153  __cur_te_endi = twp->te_endi;
6154  return(TRUE);
6155 }
6156 
6157 /*
6158  * move to next time slot
6159  * return FALSE if no events pending (i.e. all done)
6160  * know both normal and pound 0 event lists empty from last time
6161  *
6162  * when done, events to process at current time ready to be processed
6163  * and __cur_te_hdr and __cur_te_end point to the now event queue
6164  */
move_time(void)6165 static int32 move_time(void)
6166 {
6167  register struct telhdr_t *twp;
6168  word64 tmp;
6169 
6170  /* -- DBG remove
6171  if (__btqroot != NULL) dmp_btree(__btqroot);
6172  dmp_twheel();
6173  --- */
6174 
6175  __simtime++;
6176  /* normal case, find event in timing wheel */
6177  /* DBG remove ---
6178  if (__num_twhevents < 0) __misc_terr(__FILE__, __LINE__);
6179  --- */
6180  if (__num_twhevents == 0) goto move_gap;
6181 again:
6182  /* --- DBG remove chk_event_consist(); */
6183 
6184  twp = __twheel[++__cur_twi];
6185  /* hit sentinel */
6186  if (twp->num_events == -1) goto at_twend;
6187  if (twp->te_hdri == -1) { __simtime++; goto again; }
6188  __cur_te_hdri = twp->te_hdri;
6189  __cur_te_endi = twp->te_endi;
6190  goto got_event;
6191 
6192  /* move all events whose time is < new sim time + twh size to time wheel */
6193  /* copy header of timing q to timing wheel */
6194  /* if no events in overflow q nothing to do here */
6195  /* this reduces number of overflow events and increases __twheel events */
6196 at_twend:
6197  tmp = (word64) (__twhsize - 1);
6198  __whetime = __simtime + tmp;
6199  __cur_twi = -1;
6200  /* events still in timing wheel, if ovflow empty, must advance wheel */
6201  if (__btqroot != NULL) ovflow_into_wheel();
6202  /* DBG remove -- */
6203  if (__debug_flg && __ev_tracing)
6204   {
6205    word64 t;
6206 
6207    tmp = (word64) __twhsize;
6208    t = __whetime - tmp + 1;
6209    /* whe time is end of timing wheel time */
6210    __tr_msg(".. timing wheel base moved to %s\n", __to_timstr(__xs, &t));
6211   }
6212  /* --- */
6213  /* know timing wheel not empty or will not get to at twend */
6214  goto again;
6215 
6216  /* handle gap in timing wheel - know wheel currently empty */
6217 move_gap:
6218  if (__btqroot == NULL) return(FALSE);
6219  __simtime = __btqroot->btltim;
6220  tmp = (word64) (__twhsize - 1);
6221  __whetime = __simtime + tmp;
6222  /* DBG remove */
6223  if (__whetime <= __simtime)
6224   {
6225    __pv_terr(338,
6226     "scheduled event causes 64 bit time overflow - contact vendor");
6227   }
6228  /* -- */
6229  ovflow_into_wheel();
6230  /* DBG remove -- */
6231  if (__debug_flg && __ev_tracing)
6232   {
6233    __tr_msg(".. event gap - jumping to %s\n", __to_timstr(__xs, &__simtime));
6234   }
6235  /* -- */
6236  /* know at least one event in timing wheel */
6237  __cur_twi = -1;
6238  goto again;
6239 got_event:
6240  /* getting to here means have event - better always happen */
6241  /* --- DBG remove --
6242  if (__debug_flg && __ev_tracing)
6243   {
6244    __tr_msg(
6245     ".. dumping current pending event list for time %s (%ld events):\n",
6246     __to_timstr(__xs, &__simtime), twp->num_events);
6247    dmp_events(twp->te_hdri);
6248    __dmp_event_tab();
6249   }
6250  -- */
6251  return(TRUE);
6252 }
6253 
6254 /*
6255  * check a timing wheel to make sure number of events consistent
6256  */
chk_event_consist(void)6257 static void chk_event_consist(void)
6258 {
6259  register int32 twi;
6260  int32 num_whevents;
6261 
6262  for (num_whevents = 0, twi = 0;; twi++)
6263   {
6264    /* use sentinel for end */
6265    if (__twheel[twi]->num_events == -1) break;
6266    num_whevents += __twheel[twi]->num_events;
6267   }
6268 
6269  /* DBG remove --- */
6270  if (__num_twhevents != num_whevents) __misc_terr(__FILE__, __LINE__);
6271  if (__btqroot == NULL)
6272   { if (__num_ovflqevents != 0) __misc_terr(__FILE__, __LINE__); }
6273  else { if (__num_ovflqevents == 0) __misc_terr(__FILE__, __LINE__); }
6274  /* --- */
6275 }
6276 
6277 /*
6278  * adding event tevp to overflow tree at time etim
6279  * will never see #0 form here and always add exactly 1 event
6280  */
add_ovfetim(word64 etim,i_tev_ndx tevpi,struct tev_t * tevp)6281 static void add_ovfetim(word64 etim, i_tev_ndx tevpi, struct tev_t *tevp)
6282 {
6283  struct bt_t *btp, *splthdr, *new_splthdr;
6284  struct telhdr_t *telpp;
6285 
6286  __num_ovflqevents++;
6287  /* ---
6288  if (__debug_flg && __ev_tracing)
6289   {
6290    word64 tmp, t;
6291    char s1[RECLEN];
6292 
6293    tmp = (word64) __twhsize;
6294    t = __whetime - tmp + 1;
6295    __tr_msg(
6296     ".. adding event after timing wheel based at %s for time %s (ovfl. num. %d)\n",
6297     __to_timstr(s1, &t), __to_timstr(__xs, &(tevp->etime)), __num_ovflqevents);
6298   }
6299  --- */
6300  /* empty tree */
6301  if (__btqroot == NULL)
6302   {
6303    __btqroot = alloc_btnod(BTNORM);
6304    __btqroot->btnfill = 1;
6305    btp = alloc_btnod(BTFRNGE);
6306    __max_level = 1;
6307    __btqroot->ofsu.btofs = btp;
6308    __btqroot->btltim = etim;
6309 
6310    btp->ofsu.telp = telpp = (struct telhdr_t *)
6311     __my_malloc(sizeof(struct telhdr_t));
6312    telpp->te_hdri = telpp->te_endi = tevpi;
6313    telpp->num_events = 1;
6314    btp->btltim = etim;
6315    btp->btnxt = NULL;
6316    btp->btnfill = 1;
6317    return;
6318   }
6319 
6320  /* search down tree to find fringe node that new time gets found or */
6321  /* insert in */
6322  splthdr = find_fringe(etim);
6323 
6324  /* insert somewhere in fringe - know goes within range or on ends */
6325  /* if found and did not need to insert, done */
6326  if ((new_splthdr = insert_fringe(splthdr, etim, tevpi)) == NULL)
6327   return;
6328 
6329  /* if inserted at front must update 1 up from fringe path */
6330  if (splthdr != new_splthdr)
6331   {
6332    btp = __btndhdrstk[__topi];
6333    btp->ofsu.btofs = new_splthdr;
6334   }
6335 
6336  /* if not did not grow to be too wide, done */
6337  if (new_splthdr->btnfill < BTREEMAXWID) return;
6338  /* this uses path set in find_fringe to split and grow tree upwards */
6339  splitinsert_nonfringe();
6340 }
6341 
6342 /*
6343  * allocate a btree node of type bntyp
6344  */
alloc_btnod(int32 btyp)6345 static struct bt_t *alloc_btnod(int32 btyp)
6346 {
6347  struct bt_t *btp;
6348 
6349  btp = (struct bt_t *) __my_malloc(sizeof(struct bt_t));
6350  btp->bttyp = btyp;
6351  btp->btnfill = 0;
6352  btp->btltim = 0ULL;
6353  btp->ofsu.btofs = NULL;
6354  btp->btnxt = NULL;
6355  return(btp);
6356 }
6357 
6358 /*
6359  * find fringe multiple element node that etim goes in or divides or will be
6360  * found in
6361  * return header node of fringe node as list and set __topi to 1 less than
6362  * fringe - stack does not contain fringe node
6363  *
6364  * notice just stops when hits fringe - does not stack fringe node because
6365  * maybe be on front of list that is multikey nod or after end
6366  */
find_fringe(word64 etim)6367 static struct bt_t *find_fringe(word64 etim)
6368 {
6369  register struct bt_t *btp;
6370  struct bt_t *hdrbtp, *last_btp;
6371 
6372  /* stack special size 1 root node */
6373  __btndhdrstk[0] = __btndstk[0] = __btqroot;
6374  hdrbtp = __btqroot->ofsu.btofs;
6375  /* must handle case of fringe immediately under root */
6376  __topi = 0;
6377  if (hdrbtp->bttyp == BTFRNGE) return(hdrbtp);
6378 
6379  for (__topi = 0;;)
6380   {
6381    /* stack 1 down header of linked node */
6382    __btndhdrstk[++__topi] = last_btp = hdrbtp;
6383    for (btp = hdrbtp->btnxt; btp != NULL; btp = btp->btnxt)
6384     {
6385      /* true here means path selects last btp node */
6386      if (etim < btp->btltim) break;
6387      last_btp = btp;
6388     }
6389    __btndstk[__topi] = last_btp;
6390    hdrbtp = last_btp->ofsu.btofs;
6391    /* notice top of stack is one above fringe node */
6392    if (hdrbtp->bttyp == BTFRNGE) break;
6393   }
6394  return(hdrbtp);
6395 }
6396 
6397 /*
6398  * insert a fringe node into a fringe node list
6399  * this is simple linear linked list insert
6400  *
6401  * if needs to allocate new node puts event into new telhdr node
6402  * else adds to end of right list
6403  * return new fringe header node ptrif added else NULL if found
6404  * even if found still adds time event to found time event header
6405  *
6406  * notice fringe node (with pointer to telhdr) never stacked on path list
6407  */
insert_fringe(struct bt_t * frnghdr,word64 etim,i_tev_ndx tevpi)6408 static struct bt_t *insert_fringe(struct bt_t *frnghdr, word64 etim,
6409  i_tev_ndx tevpi)
6410 {
6411  register int32 i;
6412  register struct bt_t *btp;
6413  int32 goes_onend;
6414  struct bt_t *last_btp, *btpnew;
6415  struct telhdr_t *telp;
6416 
6417  goes_onend = FALSE;
6418  for (last_btp = NULL, btp = frnghdr; btp != NULL; btp = btp->btnxt)
6419   {
6420    if (etim > btp->btltim) { last_btp = btp; continue; }
6421 
6422    /* found place */
6423    if (etim == btp->btltim)
6424     {
6425      telp = btp->ofsu.telp;
6426      if (telp->te_hdri == -1) telp->te_hdri = telp->te_endi = tevpi;
6427      else
6428       {
6429        if (__tevtab[tevpi].vpi_onfront)
6430         { __tevtab[tevpi].tenxti = telp->te_hdri; telp->te_hdri = tevpi; }
6431        else { __tevtab[telp->te_endi].tenxti = tevpi; telp->te_endi = tevpi; }
6432       }
6433      telp->num_events += 1;
6434      return(NULL);
6435     }
6436 
6437 do_add:
6438    /* allocate new fringe node */
6439    telp = (struct telhdr_t *) __my_malloc(sizeof(struct telhdr_t));
6440    telp->te_hdri = telp->te_endi = tevpi;
6441    telp->num_events = 1;
6442 
6443    btpnew = alloc_btnod(BTFRNGE);
6444    btpnew->btltim = etim;
6445    btpnew->ofsu.telp = telp;
6446 
6447    /* goes past end - insert after last_btp */
6448    if (goes_onend)
6449     { last_btp->btnxt = btpnew; btpnew->btnxt = NULL; }
6450    else
6451     {
6452      /* insert before btp */
6453      if (last_btp == NULL)
6454       {
6455        btpnew->btnxt = btp;
6456        btpnew->btnfill = frnghdr->btnfill;
6457        frnghdr->btnfill = 0;
6458        frnghdr = btpnew;
6459 
6460        /* this is tricky case since btpnew time less than all header nodes */
6461        /* in tree - fix using header node path */
6462        for (i = __topi; i >= 0; i--) __btndhdrstk[i]->btltim = etim;
6463       }
6464      else { btpnew->btnxt = btp; last_btp->btnxt = btpnew; }
6465     }
6466    frnghdr->btnfill = frnghdr->btnfill + 1;
6467    return(frnghdr);
6468   }
6469  goes_onend = TRUE;
6470  goto do_add;
6471 }
6472 
6473 /*
6474  * insert fringe node that gets split into non fringe upward parent node
6475  * keep propagating wide nodes up to root
6476  * know must split fringe node or will not get here
6477  *
6478  * know __topi stack of path that got to this fringe node
6479  * but top is one up from fringe node
6480  */
splitinsert_nonfringe(void)6481 static void splitinsert_nonfringe(void)
6482 {
6483  register int32 i;
6484  register struct bt_t *btp;
6485  int32 stki;
6486  struct bt_t *parbtp, *parhdr, *splt1, *splt2, *last_btp, *splt2par;
6487 
6488  /* notice fringe node not stacked, top of stack is parent of fringe */
6489  splt1 = __btndstk[__topi]->ofsu.btofs;
6490  last_btp = NULL;
6491  for (stki = __topi;;)
6492   {
6493    /* split too wide node into 2 - max must be divisible by 2 */
6494    /* know at least 1 node here */
6495    for (btp = splt1, i = 0; i < BTREEMAXWID/2; i++)
6496     { last_btp = btp; btp = btp->btnxt; }
6497    last_btp->btnxt = NULL;
6498    splt2 = btp;
6499    splt1->btnfill = BTREEMAXWID/2;
6500    splt2->btnfill = BTREEMAXWID/2;
6501 
6502    /* construct non fringe node to link splt2 node list onto */
6503    splt2par = alloc_btnod(BTNORM);
6504    splt2par->btltim = splt2->btltim;
6505    splt2par->ofsu.btofs = splt2;
6506 
6507    /* parent nodes of path used to descend to fringe */
6508    parbtp = __btndstk[stki];
6509    parhdr = __btndhdrstk[stki];
6510    /* up 1 level is special root node - must increase tree height */
6511    if (stki == 0)
6512     {
6513      /* allocate new added level parbtp (same as header for root) */
6514      parbtp = alloc_btnod(BTNORM);
6515      parbtp->btltim = splt1->btltim;
6516      parbtp->ofsu.btofs = splt1;
6517      parbtp->btnfill = 2;
6518      parbtp->btnxt = splt2par;
6519      parhdr->ofsu.btofs = parbtp;
6520      splt2par->btnxt = NULL;
6521      /* this is only way max. tree level can increase */
6522      __max_level++;
6523      return;
6524     }
6525    /* on sun += does not work for bit fields */
6526    parhdr->btnfill = parhdr->btnfill + 1;
6527 
6528    splt2par->btnxt = parbtp->btnxt;
6529    parbtp->btnxt = splt2par;
6530    if (parhdr->btnfill < BTREEMAXWID) break;
6531    stki--;
6532    splt1 = __btndstk[stki]->ofsu.btofs;
6533   }
6534 }
6535 
6536 /*
6537  * LOW LEVEL OVERFLOW QUEUE TO TIMING WHEEL ROUTINES
6538  */
6539 
6540 /*
6541  * depth first move of nodes to timing wheel
6542  */
ovflow_into_wheel(void)6543 static void ovflow_into_wheel(void)
6544 {
6545  int32 stki;
6546  struct bt_t *btphdr;
6547 
6548  /* if leftmost time in overflow tree past wheel end time, nothing to do */
6549  if (__btqroot->btltim > __whetime) return;
6550 
6551  __btndhdrstk[0] = __btndstk[0] = __btqroot;
6552  btphdr = __btqroot->ofsu.btofs;
6553  for (__topi = 0;;)
6554   {
6555    __btndhdrstk[++__topi] = btphdr;
6556    /* DBG remove ---
6557    if (__debug_flg && __ev_tracing)
6558     {
6559      word64 t, tmp;
6560 
6561      tmp = (word64) __twhsize;
6562      t = (__whetime - tmp) + 1;
6563      __tr_msg(
6564       ".. time queue move to wheel based at %s descending to level %d\n",
6565       __to_timstr(__xs, &t), __topi);
6566     }
6567    -- */
6568    /* case 1 - descended down to fringe node */
6569    if (btphdr->bttyp == BTFRNGE)
6570     {
6571      divide_fringe_node(btphdr);
6572      break;
6573     }
6574    divide_internal_node(btphdr);
6575    /* move down one level from new divide node */
6576    btphdr = __btndstk[__topi]->ofsu.btofs;
6577   }
6578  /* must set min times of list hdr nodes */
6579  /* since do not know new and left times until hit fringe */
6580  if (__btqroot == NULL) return;
6581 
6582  if (__btqroot != NULL)
6583   {
6584    for (stki = __topi; stki > 0; stki--)
6585     __btndstk[stki - 1]->btltim = __btndstk[stki]->btltim;
6586   }
6587  /* finally remove any size one nodes at top */
6588  for (;;)
6589   {
6590    btphdr = __btqroot->ofsu.btofs;
6591    if (btphdr->bttyp == BTFRNGE || btphdr->btnfill != 1) return;
6592    if (__debug_flg && __ev_tracing)
6593     {
6594      __tr_msg(".. removing redundant size 1 node under root\n");
6595     }
6596    __btqroot->ofsu.btofs = btphdr->ofsu.btofs;
6597    __my_free((char *) btphdr, sizeof(struct bt_t));
6598    btphdr = NULL;
6599   }
6600 }
6601 
6602 /*
6603  * divide a fringe node
6604  */
divide_fringe_node(struct bt_t * btphdr)6605 static void divide_fringe_node(struct bt_t *btphdr)
6606 {
6607  register struct bt_t *btp, *btp2;
6608  int32 cnum;
6609  struct bt_t *btptmp;
6610 
6611  /* fringe node low time cannot be larger than wheel end */
6612  if (btphdr->btltim > __whetime) __misc_terr(__FILE__, __LINE__);
6613 
6614  /* case 1 (illegal): all of fringe node remains in tree */
6615  cnum = btphdr->btnfill - 1;
6616  for (btp = btphdr->btnxt; btp != NULL; btp = btp->btnxt)
6617   {
6618    if (btp->btltim > __whetime)
6619     {
6620      /* case 2: from 2nd to nth is first node of new tree after removal */
6621      /* remove all nodes up to btp */
6622      for (btp2 = btphdr; btp2 != btp;)
6623       {
6624        btptmp = btp2->btnxt;
6625        mv_to_wheel(btp2->btltim, btp2->ofsu.telp);
6626        /* SJM 03/07/01 - always fringe, must free telp too */
6627        __my_free((char *) btp2->ofsu.telp, sizeof(struct telhdr_t));
6628        __my_free((char *) btp2, sizeof(struct bt_t));
6629        btp2 = btptmp;
6630       }
6631      /* fixup tree */
6632      __btndstk[__topi] = __btndhdrstk[__topi] = btp;
6633      btp->btnfill = cnum;
6634      __btndhdrstk[__topi - 1]->ofsu.btofs = btp;
6635      __btndhdrstk[__topi - 1]->btltim = btp->btltim;
6636      return;
6637     }
6638    cnum--;
6639   }
6640  /* case 3 - all fringe nodes go in timing wheel */
6641  /* remove all nodes */
6642  for (btp = btphdr; btp != NULL;)
6643   {
6644    btptmp = btp->btnxt;
6645    mv_to_wheel(btp->btltim, btp->ofsu.telp);
6646    /* AIV 05/21/04 - miss one here, must free telp too */
6647    __my_free((char *) btp->ofsu.telp, sizeof(struct telhdr_t));
6648    __my_free((char *) btp, sizeof(struct bt_t));
6649    btp = btptmp;
6650   }
6651  /* remove know 1 up empty and propagate empties up */
6652  remove_empty_upwards();
6653 }
6654 
6655 /*
6656  * divide internal node
6657  */
divide_internal_node(struct bt_t * btphdr)6658 static void divide_internal_node(struct bt_t *btphdr)
6659 {
6660  register struct bt_t *btp;
6661  int32 cnum;
6662  struct bt_t *last_btp, *btp2, *btptmp;
6663 
6664  /* case 1 (impossible) - all of tree past timing wheel */
6665  /* internal node low time cannot be larger than wheel end */
6666  if (btphdr->btltim > __whetime) __misc_terr(__FILE__, __LINE__);
6667 
6668  cnum = btphdr->btnfill - 1;
6669  last_btp = btphdr;
6670  for (btp = btphdr->btnxt; btp != NULL; btp = btp->btnxt)
6671   {
6672    if (btp->btltim > __whetime)
6673     {
6674 got_divide:
6675      /* case 2: from 2nd to nth is first node of new tree after removal */
6676      /* remove all subtrees up to last_btp (divide node) */
6677      for (btp2 = btphdr; btp2 != last_btp;)
6678       {
6679        btptmp = btp2->btnxt;
6680        mv_subtree_towheel(btp2->ofsu.btofs);
6681        __my_free((char *) btp2, sizeof(struct bt_t));
6682        btp2 = btptmp;
6683       }
6684      /* fixup tree - last_btp is divide node */
6685      last_btp->btnfill = cnum + 1;
6686      __btndstk[__topi] = last_btp;
6687      __btndhdrstk[__topi] = last_btp;
6688      __btndstk[__topi - 1]->ofsu.btofs = last_btp;
6689      __btndstk[__topi - 1]->btltim = last_btp->btltim;
6690      return;
6691     }
6692    cnum--;
6693    last_btp = btp;
6694   }
6695  /* case 3 - divide not is last node in tree */
6696  goto got_divide;
6697 }
6698 
6699 /*
6700  * when leaf empty (size == 0) must remove upward
6701  */
remove_empty_upwards(void)6702 static void remove_empty_upwards(void)
6703 {
6704  register struct bt_t *btp;
6705  struct bt_t *last_btp, *rembtp;
6706  int32 stki, stki2;
6707 
6708  /* case 1, if only root is above now empty fringe - remove all of tree */
6709  if (__topi == 1)
6710   {
6711 empty_tree:
6712    __my_free((char *) __btqroot, sizeof(struct bt_t));
6713 
6714    /* SJM 05/26/04 - notice root node is special case no telp multi */
6715    /* element not, instead just a pointer to a btp */
6716    __btqroot = NULL;
6717    return;
6718   }
6719  /* case 2, need to remove at least fringe */
6720  /* first - chain upwards of size 1 nodes that get removed */
6721  for (stki = __topi - 1; stki > 0; stki--)
6722   {
6723    /* this can never be fringe since fringe not in btndstk */
6724    if (__btndhdrstk[stki]->btnfill != 1) goto got_nonremove;
6725    __my_free((char *) __btndstk[stki], sizeof(struct bt_t));
6726    __btndstk[stki] = NULL;
6727   }
6728  goto empty_tree;
6729 
6730 got_nonremove:
6731  /* know that node at level stki stays */
6732  /* step 1: link out node */
6733  /* step 1a: find predecessor of linked out node if exists */
6734  rembtp = __btndstk[stki];
6735  /* case 2a: header node is removed */
6736  if (rembtp == __btndhdrstk[stki])
6737   {
6738    /* know node following rembtp exists */
6739    __btndhdrstk[stki - 1]->ofsu.btofs = rembtp->btnxt;
6740    __btndstk[stki] = rembtp->btnxt;
6741    __btndstk[stki]->btnfill = __btndhdrstk[stki]->btnfill - 1;
6742    __btndhdrstk[stki] = __btndstk[stki];
6743   }
6744  /* case 2b: non header removed */
6745  else
6746   {
6747    /* find predecessor of removed node since it is now node stack */
6748    last_btp = NULL;
6749    for (btp = __btndhdrstk[stki]; btp != rembtp; btp = btp->btnxt)
6750     last_btp = btp;
6751    __btndhdrstk[stki]->btnfill -= 1;
6752 
6753    /* case 2b1: last node removed */
6754    if (rembtp->btnxt == NULL)
6755     {
6756      /* last_btp is one before removed node */
6757      __btndstk[stki] = last_btp;
6758 
6759      /* SJM 08/02/01 - add if to keep lint happy */
6760      if (last_btp != NULL) last_btp->btnxt = NULL;
6761     }
6762    /* case 2b2: internal node removed */
6763    else
6764     {
6765      __btndstk[stki] = rembtp->btnxt;
6766      /* SJM 08/02/01 - add if to keep lint happy */
6767      if (last_btp != NULL) last_btp->btnxt = rembtp->btnxt;
6768     }
6769   }
6770  /* notice __btndstk that points to rembtp - has new value (moved down) */
6771  __my_free((char *) rembtp, sizeof(struct bt_t));
6772  /* finally, work upwards to fringe updating __btndstk */
6773  /* notice even if only fringe removed, this will change ndstk for fringe */
6774  for (stki2 = stki + 1; stki2 <= __topi; stki2++)
6775   __btndstk[stki2] = __btndstk[stki2 - 1]->ofsu.btofs;
6776 }
6777 
6778 /*
6779  * move an entire subtree to timing wheel and free subtree
6780  * not called for fringe nodes
6781  */
mv_subtree_towheel(struct bt_t * btphdr)6782 static void mv_subtree_towheel(struct bt_t *btphdr)
6783 {
6784  register struct bt_t *btp;
6785  struct bt_t *btp2;
6786 
6787  for (btp = btphdr; btp != NULL;)
6788   {
6789    btp2 = btp->btnxt;
6790 
6791    if (btp->bttyp == BTFRNGE)
6792     {
6793      mv_to_wheel(btp->btltim, btp->ofsu.telp);
6794      /* SJM 03/07/01 - only fringe node has telp than needs to be freed */
6795      __my_free((char *) btp->ofsu.telp, sizeof(struct telhdr_t));
6796     }
6797    else mv_subtree_towheel(btp->ofsu.btofs);
6798 
6799    /* always free the node */
6800    __my_free((char *) btp, sizeof(struct bt_t));
6801    btp = btp2;
6802   }
6803 }
6804 
6805 /*
6806  * move an event header te lst to timing wheel
6807  */
mv_to_wheel(word64 etim,struct telhdr_t * telp)6808 static void mv_to_wheel(word64 etim, struct telhdr_t *telp)
6809 {
6810  int32 twslot;
6811  word64 tmp;
6812  struct telhdr_t *twlp;
6813 
6814  /* removing fringe node that should stay at time */
6815  if (etim > __whetime) __misc_terr(__FILE__, __LINE__);
6816  /* ---
6817  if (__debug_flg && __ev_tracing)
6818   {
6819    __tr_msg(".. moving time queue to wheel based at time %s\n",
6820     __to_timstr(__xs, &etim));
6821   }
6822  --- */
6823 
6824  /* add overflow q to correct wheel slot - must go on front */
6825  /* because of wrapping later events already on wheel element */
6826  /* sim time + 1 is 0th timing wheel position */
6827  tmp = etim - __simtime;
6828  twslot = (int32) (tmp & WORDMASK_ULL);
6829 
6830  /* notice, there will always be at least one entry on overflow q */
6831  /* list or will not get here - cancelled events just marked */
6832  /* add to end if timing wheel slot already has events */
6833  /* DBG remove  --- */
6834  if (twslot < 0 || twslot > __twhsize) __misc_terr(__FILE__, __LINE__);
6835  /* --- */
6836  twlp = __twheel[twslot];
6837  /* DBG remove  --- */
6838  if (twlp == NULL) __misc_terr(__FILE__, __LINE__);
6839  /* --- */
6840 
6841  /* twlp points to current wheel events - telp to list to move on end */
6842  if (twlp->te_hdri == -1)
6843   {
6844    twlp->te_hdri = telp->te_hdri;
6845    twlp->te_endi = telp->te_endi;
6846    twlp->num_events = telp->num_events;
6847   }
6848  else
6849   {
6850    /* splice onto end and set new end - leave front as is */
6851    __tevtab[twlp->te_endi].tenxti = telp->te_hdri;
6852    twlp->te_endi = telp->te_endi;
6853    twlp->num_events += telp->num_events;
6854   }
6855  __num_twhevents += telp->num_events;
6856  __num_ovflqevents -= telp->num_events;
6857  if (__num_ovflqevents < 0) __misc_terr(__FILE__, __LINE__);
6858 }
6859 
6860 /*
6861  * find btree node after or same as tim
6862  *
6863  * LOOKATME - why is this not called
6864  */
tfind_btnode_after(struct bt_t * btphdr,word64 tim)6865 static struct telhdr_t *tfind_btnode_after(struct bt_t *btphdr, word64 tim)
6866 {
6867  register struct bt_t *btp;
6868  struct telhdr_t *telp;
6869 
6870  if (btphdr->bttyp == BTFRNGE)
6871   {
6872    for (btp = btphdr; btp != NULL; btp = btp->btnxt)
6873     { if (btp->btltim >= tim) return(btp->ofsu.telp); }
6874    return(NULL);
6875   }
6876  for (btp = btphdr; btp != NULL; btp = btp->btnxt)
6877   {
6878    if ((telp = tfind_btnode_after(btp->ofsu.btofs, tim)) != NULL)
6879     return(telp);
6880   }
6881  return(NULL);
6882 }
6883 
6884 /*
6885  * Q DEBUGGING ROUTINES
6886  */
6887 
6888 /*
6889  * dump all events in timing wheel
6890  * only called if debug flag on
6891  */
dmp_twheel(void)6892 static void dmp_twheel(void)
6893 {
6894  register int32 twi;
6895  int32 e_num, totenum;
6896 
6897  __dbg_msg("<< timing wheel that ends at %s\n", __to_timstr(__xs, &__whetime));
6898  for (twi = 0, totenum = 0;; twi++)
6899   {
6900    /* use sentinel for end */
6901    if (__twheel[twi]->num_events == -1) break;
6902    e_num = dmp_events(__twheel[twi]->te_hdri);
6903    totenum += e_num;
6904    /* DBG remove ---
6905    __dbg_msg("--index %d %d counted events and %d stored--\n", twi,
6906     e_num, __twheel[twi]->num_events);
6907    ---*/
6908   }
6909  __dbg_msg("<< total counted wheel events %d, overflow %d, wheel stored %d\n",
6910   totenum, __num_ovflqevents, __num_twhevents);
6911 }
6912 
6913 /*
6914  * dump event list
6915  */
dmp_events(register i_tev_ndx tevpi)6916 static int32 dmp_events(register i_tev_ndx tevpi)
6917 {
6918  int32 e_num;
6919  /* char s1[20], s2[RECLEN]; */
6920 
6921  for (e_num = 0; tevpi != -1; tevpi = __tevtab[tevpi].tenxti, e_num++)
6922   {
6923    /* --
6924    struct tev_t *tevp;
6925 
6926    tevp = &(__tevtab[tevpi]);
6927    __dbg_msg("^^%s event index %d in inst. %s at %s cancel=%d\n",
6928     __to_tetyp(s1, tevp->tetyp), tevpi, __msg2_blditree(s2, tevp->teitp),
6929     __to_timstr(__xs, &__simtime), tevp->te_cancel);
6930    -- */
6931   }
6932  return(e_num);
6933 }
6934 
6935 /*
6936  * dump event table and free list
6937  */
__dmp_event_tab(void)6938 extern void __dmp_event_tab(void)
6939 {
6940  register int32 ei;
6941  struct tev_t *tevp;
6942  char s1[RECLEN];
6943 
6944  /* dump all allocated events */
6945  __dbg_msg("*** DUMPING EVENT TABLE *** (high used %d)\n",
6946   __numused_tevtab);
6947  for (ei = 0; ei <= __numused_tevtab; ei++)
6948   {
6949    tevp = &(__tevtab[ei]);
6950    __dbg_msg("^^%s (%d) event index %d next %d\n",
6951     __to_tetyp(s1, tevp->tetyp), tevp->tetyp, ei, tevp->tenxti);
6952   }
6953  if (__tefreelsti != -1)
6954   {
6955    __dbg_msg("*** DUMPING FREE LIST ***\n");
6956    for (ei = __tefreelsti; ei != -1; ei = __tevtab[ei].tenxti)
6957     {
6958      tevp = &(__tevtab[ei]);
6959      __dbg_msg("^^%s (%d) free event index %d next %d\n",
6960       __to_tetyp(s1, tevp->tetyp), tevp->tetyp, ei, tevp->tenxti);
6961     }
6962   }
6963 }
6964 
6965 /*
6966  * dump a levelized tree
6967  */
dmp_btree(struct bt_t * btphdr)6968 static void dmp_btree(struct bt_t *btphdr)
6969 {
6970  register int32 i;
6971 
6972  if (__btqroot == NULL)
6973   {
6974    if (__debug_flg && __ev_tracing)
6975     __dbg_msg("--empty tree--\n");
6976    return;
6977   }
6978  for (i = 0; i <= __max_level; i++)
6979   {
6980    __nd_level = i;
6981    dmp2_btree(btphdr, 0);
6982   }
6983 }
6984 
6985 /*
6986  * dump a btree to standard output depth first using large linked b nodes
6987  */
dmp2_btree(struct bt_t * btphdr,int32 level)6988 static void dmp2_btree(struct bt_t *btphdr, int32 level)
6989 {
6990  register struct bt_t *btp;
6991 
6992  if (level > __nd_level) return;
6993  dmp_btnode(btphdr, level);
6994  if (btphdr->bttyp == BTFRNGE) return;
6995  for (btp = btphdr; btp != NULL; btp = btp->btnxt)
6996   dmp2_btree(btp->ofsu.btofs, level + 1);
6997 }
6998 
6999 /*
7000  * dump a btree node
7001  * for now assume fits on one line
7002  */
dmp_btnode(struct bt_t * btp,int32 level)7003 static void dmp_btnode(struct bt_t *btp, int32 level)
7004 {
7005  struct bt_t *btp1;
7006  int32 first_time;
7007  char s1[RECLEN];
7008 
7009  if (__nd_level != level) return;
7010  if (btp->bttyp == BTFRNGE) strcpy(s1, "fringe");
7011  else strcpy(s1, "internal");
7012 
7013  __outlinpos = 0;
7014  __dbg_msg("level %d %s node size %u:", level, s1, btp->btnfill);
7015  first_time = TRUE;
7016  for (btp1 = btp; btp1 != NULL; btp1 = btp1->btnxt)
7017   {
7018    if (btp1->bttyp == BTFRNGE)
7019     {
7020      if (first_time) first_time = FALSE; else __dbg_msg(", ");
7021      __dbg_msg("time %s(events %d)", __to_timstr(s1, &(btp1->btltim)),
7022       btp1->ofsu.telp->num_events);
7023     }
7024    else
7025     {
7026      if (first_time) first_time = FALSE; else __dbg_msg(", ");
7027      __dbg_msg("time %s", __to_timstr(s1, &(btp1->btltim)));
7028     }
7029   }
7030  __dbg_msg("\n");
7031  __outlinpos = 0;
7032 }
7033 
7034 /*
7035  * free subtree of overflow event queue btree
7036  * know always passed leftmost of multiple node node
7037  * and btphdr never nil, caller must check for empty tree
7038  */
__free_btree(struct bt_t * btphdr)7039 extern void __free_btree(struct bt_t *btphdr)
7040 {
7041  register struct bt_t *btp, *btp2;
7042 
7043  /* at bottom of tree this nodes and all right siblings fringe nodes */
7044  if (btphdr->bttyp == BTFRNGE)
7045   {
7046     for (btp = btphdr; btp != NULL;)
7047      {
7048       btp2 = btp->btnxt;
7049       /* because freeing telp record, know contents freed */
7050       /* events freed by just marking all of tev tab unused */
7051       __free_telhdr_tevs(btp->ofsu.telp);
7052       __my_free((char *) btp->ofsu.telp, sizeof(struct telhdr_t));
7053       __my_free((char *) btp, sizeof(struct bt_t));
7054      btp = btp2;
7055     }
7056    return;
7057   }
7058  /* if one node non fringe, all non fringe */
7059  for (btp = btphdr; btp != NULL;)
7060   {
7061    btp2 = btp->btnxt;
7062    __free_btree(btp->ofsu.btofs);
7063    __my_free((char *) btp, sizeof(struct bt_t));
7064    btp = btp2;
7065   }
7066 }
7067 
7068 
7069 /*
7070  * free list of tevs - either btree node or timing wheel list
7071  *
7072  * normally add entire period's events to free list - this is for reset only
7073  * because when an event is processed guts (if any) freed so can link on
7074  * free list but here need to free guts too
7075  */
__free_telhdr_tevs(register struct telhdr_t * telp)7076 extern void __free_telhdr_tevs(register struct telhdr_t *telp)
7077 {
7078  register i_tev_ndx tevpi, tevp2i;
7079 
7080  for (tevpi = telp->te_hdri; tevpi != -1;)
7081   {
7082    tevp2i = __tevtab[tevpi].tenxti;
7083    __free_1tev(tevpi);
7084    tevpi = tevp2i;
7085   }
7086  /* this is needed for timing wheel since, telp not freed */
7087  telp->te_hdri = telp->te_endi = -1;
7088  telp->num_events = 0;
7089 }
7090 
7091 /*
7092  * free 1 event - may need to free auxiliary since will never be processed
7093  * freeing just puts on front of ev free list
7094  */
__free_1tev(i_tev_ndx tevpi)7095 extern void __free_1tev(i_tev_ndx tevpi)
7096 {
7097  int32 wlen;
7098  word32 *wp;
7099  struct tev_t *tevp;
7100  struct tenbpa_t *tenbp;
7101  struct tedputp_t *tedp;
7102  struct teputv_t *tepvp;
7103 
7104  tevp = &(__tevtab[tevpi]);
7105  switch ((byte) tevp->tetyp) {
7106   case TE_WIRE: case TE_BIDPATH: case TE_MIPD_NCHG:
7107    if (tevp->tu.tenp != NULL)
7108     __my_free((char *) tevp->tu.tenp, sizeof(struct tenp_t));
7109    break;
7110   case TE_NBPA:
7111    /* for non #0 original freed here but no tenbpa - moved to new */
7112    if ((tenbp = tevp->tu.tenbpa) == NULL) break;
7113 
7114    wp = tenbp->nbawp;
7115    wlen = wlen_(tenbp->nbastp->st.spra.lhsx->szu.xclen);
7116    __my_free((char *) wp, 2*wlen*WRDBYTES);
7117    /* if needed to copy lhs expr., now free */
7118    if (tenbp->nblhsxp != NULL) __free_xtree(tenbp->nblhsxp);
7119    __my_free((char *) tevp->tu.tenbpa, sizeof(struct tenbpa_t));
7120    break;
7121   case TE_TFPUTPDEL:
7122    if ((tedp = tevp->tu.tedputp) == NULL) break;
7123    tevp->tu.tedputp = NULL;
7124    tedp->tedtfrp =  (struct tfrec_t *) __tedpfreelst;
7125    __tedpfreelst = tedp;
7126    break;
7127   case TE_VPIPUTVDEL: case TE_VPIDRVDEL:
7128    if ((tepvp = tevp->tu.teputvp) == NULL) break;
7129    tevp->tu.teputvp = NULL;
7130    tepvp->np =  (struct net_t *) __teputvfreelst;
7131    __teputvfreelst = tepvp;
7132    break;
7133   /* for these either no auxiliary rec., or must stay, or free with handle */
7134   case TE_THRD: case TE_G: case TE_CA: case TE_TFSETDEL: case TE_SYNC:
7135   case TE_VPICBDEL:
7136    break;
7137   default: __case_terr(__FILE__, __LINE__);
7138  }
7139 
7140  /* ** DBG remove --
7141  memset(tevp, 0, sizeof(struct tev_t));
7142  __dbg_msg("--- free tev at %x\n", tevp);
7143  --- */
7144  __tevtab[tevpi].tenxti = __tefreelsti;
7145  __tefreelsti = tevpi;
7146 }
7147 
7148