1 /* Copyright (c) 1991-2007 Pragmatic C Software Corp. */
2
3 /*
4 This program is free software; you can redistribute it and/or modify it
5 under the terms of the GNU General Public License as published by the
6 Free Software Foundation; either version 2 of the License, or (at your
7 option) any later version.
8
9 This program is distributed in the hope that it will be useful, but
10 WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 General Public License for more details.
13
14 You should have received a copy of the GNU General Public License along
15 with this program; if not, write to the Free Software Foundation, Inc.,
16 59 Temple Place, Suite 330, Boston, MA, 02111-1307.
17
18 We are selling our new Verilog compiler that compiles to X86 Linux
19 assembly language. It is at least two times faster for accurate gate
20 level designs and much faster for procedural designs. The new
21 commercial compiled Verilog product is called CVC. For more information
22 on CVC visit our website at www.pragmatic-c.com/cvc.htm or contact
23 Andrew at avanvick@pragmatic-c.com
24
25 */
26
27
28 /*
29 * Verilog simulation preparation routines
30 */
31
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <string.h>
35
36 #ifdef __DBMALLOC__
37 #include "../malloc.h"
38 #endif
39
40 #include "v.h"
41 #include "cvmacros.h"
42
43 /* local prototypes */
44 /* --- SJM REMOVED static void setchk_trchan_wire(struct net_t *); */
45 static void setchk_1w_fifo(struct net_t *);
46 static int32 cnt_scalar_fo(struct net_t *);
47 static int32 has_npp_isform(register struct net_pin_t *);
48 static void chkset_vec_fifo(struct net_t *, int32 *, int32 *, int32, int32,
49 int32);
50 static int32 has_rng_npp(struct net_t *);
51 static struct itree_t *cnvt_to_itp(struct mod_t *, int32);
52 static struct itree_t *cnvt_todown_itp(struct itree_t *, struct mod_t *,
53 int32);
54 static void chk_trifctrl_insame_chan(struct gate_t *, struct expr_t *);
55 static void chk_samechan_trifctrl_simple(struct mod_t *, struct gate_t *,
56 int32, struct expr_t *, struct expr_t *);
57 static int32 net_in_expr(struct expr_t *, struct net_t *);
58 static void prep_tf_rwexprs(void);
59 static int32 lhs_has_figt1(struct expr_t *);
60 static void getpat_lhs_figt1(struct mod_t *, struct expr_t *,
61 struct conta_t *);
62 static void chk_decl_siderep(struct expr_t *, struct expr_t *, char *,
63 int32, word32, int32);
64 static int32 find_var_in_xpr(struct expr_t *, struct net_t *, int32 *);
65 static void add_portbit_map(struct tenp_t *, struct expr_t *, int32);
66 static void cmp_nchgbtabsize(void);
67 static void set_1net_srep(struct net_t *);
68 static void cmp_tabsizes(void);
69 static void cmpadd_1var_storsiz(struct net_t *);
70 static void alloc_var(struct net_t *);
71 static void alloc_real_var(struct net_t *, int32);
72 static void alloc_scal_var(struct net_t *, int32);
73 static int32 all_drvrs_bidirect(struct net_t *);
74 static void alloc_sscal_var(struct net_t *, int32);
75 static void alloc_svec_var(struct net_t *, int32);
76 static void reinit_1wirereg(struct net_t *, struct mod_t *);
77 static void alloc_dce_prevval(struct dcevnt_t *, struct mod_t *);
78 static void init_dce_exprval(struct dcevnt_t *);
79 static struct mod_t *dcep_ref_mod(struct dcevnt_t *);
80 static void init_dce_prevval(struct dcevnt_t *, struct mod_t *);
81 static void prep_stskcalls(struct st_t *);
82 static struct st_t *add_loopend_goto(struct st_t *, struct st_t *);
83 static void push_prpstmt(struct st_t *);
84 static void pop_prpstmt(void);
85 static void prep_case(struct st_t *);
86 static void prep_dctrl(struct st_t *);
87 static void cnv_cmpdctl_todu(struct st_t *, struct delctrl_t *);
88 static void prep_event_dctrl(struct delctrl_t *);
89 static void bld_ev_dces(struct expr_t *, struct delctrl_t *);
90 static void bld_evxpr_dces(struct expr_t *, struct delctrl_t *, int32);
91 static void linkon_dce(struct net_t *, int32, int32, struct delctrl_t *,
92 int32, struct gref_t *);
93 static void init_iact_dce(struct dcevnt_t *, struct delctrl_t *,
94 struct gref_t *);
95 static void xmr_linkon_dce(struct net_t *, int32, int32, struct delctrl_t *,
96 int32, struct gref_t *);
97 static struct dcevnt_t *linkon2_dce(struct net_t *, int32, int32,
98 struct delctrl_t *, int32, int32, struct mod_t *, struct mod_t *);
99 static void prep_func_dsable(struct st_t *);
100 static void bld_init_qcaf_dce_lstlst(struct st_t *);
101 static void prep_qc_assign(struct st_t *, int32);
102 static void prep_qc_deassign(struct st_t *);
103 static struct dceauxlst_t *prep_noncat_qc_assign(struct st_t *,
104 struct expr_t *);
105 static struct dceauxlst_t *prep_noncat_qc_regforce(struct st_t *,
106 struct expr_t *);
107 static void init_qcval(struct qcval_t *);
108 static void prep_qc_wireforce(struct st_t *);
109 static void prep_qc_wirerelease(struct st_t *);
110 static void prep_noncat_qc_wireforce(struct st_t *, struct expr_t *,
111 struct dceauxlstlst_t *);
112 static void prep_noncat_qc_wirerelease(struct expr_t *);
113 static void bld_qcaf_dces(struct expr_t *, struct qcval_t *);
114 static void linkon_qcaf_dce(struct net_t *, int32, int32, struct gref_t *,
115 struct qcval_t *);
116 static void process_upwards_grp(struct gref_t *);
117 static void chk_downrel_inst_sels(struct gref_t *);
118 static void prep_tchks(void);
119 static struct tchg_t *bld_start_tchk_npp(struct tchk_t *, struct net_t *,
120 int32);
121 static struct chktchg_t *bld_check_tchk_npp(struct net_t *, int32);
122 static byte *bld_npp_oldval(struct net_t *, struct mod_t *);
123 static void reinit_npp_oldval(byte *, struct net_t *, struct mod_t *);
124 static void prep_pths(void);
125 static int32 chk_pthels(struct spcpth_t *);
126 static int32 bldchk_pb_pthdsts(struct spcpth_t *);
127 static struct tchg_t *try_add_npp_dpthsrc(struct spcpth_t *, struct net_t *,
128 int32);
129 static struct net_pin_t *find_1timchg_psnpp(struct net_t *, int32, int32);
130 static int32 bldchk_1bit_pthdst(struct spcpth_t *, struct net_t *, int32,
131 struct net_t *, int32, int32, struct tchg_t *);
132 static void get_pthbitwidths(struct spcpth_t *, int32 *, int32 *);
133 static char *bld_bitref(char *, struct net_t *, int32);
134 static int32 chk_biti_pthdst_driver(struct spcpth_t *, struct net_t *, int32);
135 static void emit_pthdst_bit_informs(struct mod_t *);
136 static void free_dctrl(struct delctrl_t *, int32);
137 static void free_csitemlst(register struct csitem_t *);
138
139 /* extern prototypes defined elsewhere */
140 extern void __setchk_all_fifo(void);
141 extern void __bld_pb_fifo(struct net_t *, int32 *, int32 *, int32 *, int32);
142 extern void __prep_exprs_and_ports(void);
143 extern void __alloc_tfdrv_wp(struct tfarg_t *, struct expr_t *,
144 struct mod_t *);
145 extern void __init_tfdrv(struct tfarg_t *, struct expr_t *, struct mod_t *);
146 extern void __prep_contas(void);
147 extern void __allocinit_perival(union pck_u *, int32, int32, int32);
148 extern void __allocinit_stperival(union pck_u *, int32, struct net_t *,
149 int32);
150 extern void __alloc_nchgaction_storage(void);
151 extern void __alloc_sim_storage(void);
152 extern void __allocinit_arr_var(struct net_t *, int32, int32);
153 extern void __init_vec_var(register word32 *, int32, int32, int32, word32,
154 word32);
155 extern int32 __get_initval(struct net_t *, int32 *);
156 extern void __allocinit_vec_var(struct net_t *, int32, int32);
157 extern void __reinitialize_vars(struct mod_t *);
158 extern void __initialize_dces(struct mod_t *);
159 extern void __prep_stmts(void);
160 extern struct st_t *__prep_lstofsts(struct st_t *, int32, int32);
161 extern void __push_nbstk(struct st_t *);
162 extern void __pop_nbstk(void);
163 extern void __dce_turn_chg_store_on(struct mod_t *, struct dcevnt_t *, int32);
164 extern struct dcevnt_t *__alloc_dcevnt(struct net_t *);
165 extern int32 __is_upward_dsable_syp(struct sy_t *, struct symtab_t *,
166 int32 *);
167 extern void __prep_xmrs(void);
168 extern void __fill_grp_targu_fld(struct gref_t *);
169 extern void __prep_specify(void);
170 extern void __xtract_wirng(struct expr_t *, struct net_t **, int32 *,
171 int32 *);
172 extern void __free_1stmt(struct st_t *);
173 extern void __free_xprlst(struct exprlst_t *);
174
175 extern void __my_free(char *, int32);
176 extern char *__msg2_blditree(char *, struct itree_t *);
177 extern char *__to_wtnam(char *, struct net_t *);
178 extern char *__to_tsktyp(char *, word32);
179 extern char *__my_malloc(int32);
180 extern struct st_t *__alloc2_stmt(int32, int32, int32);
181 extern void __getwir_range(struct net_t *, int32 *, int32 *);
182 extern void __set_gchg_func(struct gate_t *);
183 extern int32 __isleaf(struct expr_t *);
184 extern struct net_t *__find_tran_conn_np(struct expr_t *);
185 extern int32 __get_pcku_chars(int32, int32);
186 extern void __grow_xstk(void);
187 extern void __chg_xstk_width(struct xstk_t *, int32);
188 extern void __st_perinst_val(union pck_u, int32, register word32 *,
189 register word32 *);
190 extern char *__to_idnam(struct expr_t *);
191 extern int32 __get_arrwide(struct net_t *);
192 extern void __reinit_regwir_putvrec(struct net_t *, int32);
193 extern void __reinit_netdrvr_putvrec(struct net_t *, struct mod_t *);
194 extern void __init_1net_dces(struct net_t *, struct mod_t *);
195 extern void __alloc_1instdce_prevval(struct dcevnt_t *);
196 extern void __init_1instdce_prevval(struct dcevnt_t *);
197 extern int32 __get_dcewid(struct dcevnt_t *, struct net_t *);
198 extern void __ld_wire_sect(word32 *, word32 *, struct net_t *, register int32,
199 register int32);
200 extern struct xstk_t *__eval2_xpr(register struct expr_t *);
201 extern char *__to_sttyp(char *, word32);
202 extern char *__bld_lineloc(char *, word32, int32);
203 extern void __add_dctldel_pnp(struct st_t *);
204 extern void __prep_delay(struct gate_t *, struct paramlst_t *, int32, int32,
205 char *, int32, struct sy_t *, int32);
206 extern void __xmrpush_refgrp_to_targ(struct gref_t *);
207 extern int32 __ip_indsrch(char *);
208 extern struct itree_t *__find_unrt_targitp(struct gref_t *,
209 register struct itree_t *, int32);
210 extern void __add_tchkdel_pnp(struct tchk_t *, int32);
211 extern void __conn_npin(struct net_t *, int32, int32, int32, int32,
212 struct gref_t *, int32, char *);
213 extern void __add_pathdel_pnp(struct spcpth_t *);
214 extern char *__to_deltypnam(char *, word32);
215 extern void __free_xtree(struct expr_t *);
216 extern void __free_del(union del_u, word32, int32);
217 extern int32 __chk_0del(word32, union del_u, struct mod_t *);
218 extern void __push_wrkitstk(struct mod_t *, int32);
219 extern void __pop_wrkitstk(void);
220 extern void __dmp_exprtab(struct mod_t *, int32);
221 extern void __dmp_msttab(struct mod_t *, int32);
222 extern void __dmp_stmt(FILE *, struct st_t *, int32);
223 extern char *__regab_tostr(char *, word32 *, word32 *, int32, int32, int32);
224 extern char *__xregab_tostr(char *, word32 *, word32 *, int32,
225 struct expr_t *);
226 extern char *__to_opname(word32);
227 extern struct expr_t *__sim_copy_expr(struct expr_t *);
228 extern struct expr_t *__copy_expr(struct expr_t *);
229 extern struct mod_t *__get_mast_mdp(struct mod_t *);
230 extern struct net_t *__tranx_to_netbit(register struct expr_t *, int32,
231 int32 *, struct itree_t *oside_itp);
232 extern struct mipd_t *__get_mipd_from_port(struct mod_pin_t *, int32);
233 extern struct tenp_t *__bld_portbit_netbit_map(struct mod_pin_t *);
234 extern struct net_pin_t *__alloc_npin(int32, int32, int32);
235 extern void __alloc_qcval(struct net_t *);
236 extern void __get_qc_wirrng(struct expr_t *, struct net_t **, int32 *,
237 int32 *, struct itree_t **);
238 extern void __prep_insrc_monit(struct st_t *, int32);
239 extern int32 __cnt_dcelstels(register struct dcevnt_t *);
240 extern void __dcelst_off(struct dceauxlst_t *);
241
242 extern void __gfwarn(int32, word32, int32, char *, ...);
243 extern void __sgfwarn(int32, char *, ...);
244 extern void __gfinform(int32, word32, int32, char *, ...);
245 extern void __gferr(int32, word32, int32, char *, ...);
246 extern void __sgferr(int32, char *, ...);
247 extern void __dbg_msg(char *, ...);
248 extern void __arg_terr(char *, int32);
249 extern void __case_terr(char *, int32);
250 extern void __misc_terr(char *, int32);
251 extern void __misc_sgfterr(char *, int32);
252 extern void __misc_gfterr(char *, int32, word32, int32);
253 extern void __sgfterr(int32, char *, ...);
254 extern void __my_fprintf(FILE *, char *, ...);
255 extern void __free_stlst(register struct st_t *);
256 static void cmp_xform_delay(int32, union del_u);
257 static void cmp_xform_ports(void);
258 static void cmp_xform_ialst(void);
259 static struct st_t *cmp_xform_lstofsts(register struct st_t *);
260 static struct st_t *cmp_xform1_stmt(register struct st_t *, struct st_t *);
261 static void xform_tf_syst_enable(struct st_t *);
262 static struct expr_t *mv1_expr_totab(struct expr_t *);
263 static void xform_tf_sysf_call(struct expr_t *);
264 static void cmp_xform_csitemlst(register struct csitem_t *);
265 static void cxf_fixup_loopend_goto(struct st_t *, struct st_t *);
266 static void cxf_fixup_lstofsts_gotos(struct st_t *, int32);
267 static void cxf_fixup_case_gotos(struct st_t *);
268 static void cxf_fixup_func_dsabl_gotos(struct st_t *);
269 static void cmp_xform_inst_conns(void);
270 static void cmp_xform_gates(void);
271 static void cmp_xform_contas(void);
272 static void cmp_xform_tasks(void);
273 static void cmp_xform_specify(void);
274 static char *bld_opname(char *, struct expr_t *);
275
276 extern word32 __masktab[];
277
278 int32 __prep_numsts;
279
280 /*
281 * FAN OUT AND PORT COLLAPSING ROUTINES
282 */
283
284 /*
285 * set and check all wire fi and fo
286 * here must ignore any added for unc. bid. wires
287 * cannot check fifo for wires in tran channels
288 *
289 * LOOKATME - maybe allocate different formats depending on fi/fo
290 * to keep lists short
291 *
292 * SJM - 06/25/00 - difference from 2018c since change chg state algorithm
293 */
__setchk_all_fifo(void)294 extern void __setchk_all_fifo(void)
295 {
296 register int32 ni;
297 register struct net_t *np;
298 struct mod_t *mdp;
299
300 for (mdp = __modhdr; mdp != NULL; mdp = mdp->mnxt)
301 {
302 if (mdp->mnnum == 0) continue;
303
304 __push_wrkitstk(mdp, 0);
305 for (ni = 0, np = &(__inst_mod->mnets[0]); ni < __inst_mod->mnnum;
306 ni++, np++)
307 {
308 /* --- SJM 11/20/01 - removed since useless and slow
309 if (np->ntraux != NULL) { setchk_trchan_wire(np); continue; }
310 --- */
311 if (np->ntyp >= NONWIRE_ST) continue;
312 setchk_1w_fifo(np);
313 }
314 __pop_wrkitstk();
315 }
316 }
317
318 /*
319 * check for tran channels with some bits in and some not
320 * for non tran channel wires (no edges) remove vibp
321 * notice all tran connected nets have fi>1 set
322 */
323 /* --- SJM 11/20/01 - removed since warning useless and this takes too
324 long to make freeing rare small amount of memory worth it
325
326 static void setchk_trchan_wire(struct net_t *np)
327 {
328 register int32 bi, ii;
329 struct traux_t *trap;
330 struct itree_t *itp;
331 struct vbinfo_t *vbip;
332 char s1[RECLEN], s2[RECLEN], s3[RECLEN];
333
334 trap = np->ntraux;
335 for (bi = 0; bi < np->nwid; bi++)
336 {
337 for (ii = 0; ii < __inst_mod->flatinum; ii++)
338 {
339 vbip = trap->vbitchans[ii*np->nwid + bi];
340 if (vbip != NULL)
341 {
342 if (vbip->vivxp->vedges != NULL) continue;
343 __my_free((char *) vbip, sizeof(struct vbinfo_t));
344 trap->vbitchans[ii*np->nwid + bi] = NULL;
345 }
346 -* SJM 01/26/99 - new connect modules not in tran channels *-
347 if ((itp = cnvt_to_itp(__inst_mod, ii)) == NULL) continue;
348
349 sprintf(s1, "in %s(%s):", __inst_mod->msym->synam,
350 __msg2_blditree(__xs, itp));
351 if (np->n_isavec) sprintf(s3, "%s[%d]", np->nsym->synam, bi);
352 else strcpy(s3, np->nsym->synam);
353 __gfinform(450, np->nsym->syfnam_ind, np->nsym->sylin_cnt,
354 "%s %s %s not in any transistor channel but other bits are",
355 s1, __to_wtnam(s2, np), s3);
356 }
357 }
358 }
359 --- */
360
361 /*
362 * set and check for 1 wire
363 * but no warns or informs
364 *
365 * sets n_multfi bit, but not set for tran channel's which need fi>1 eval
366 * because instead x_multfi is set, n_multfi used for other checking
367 */
setchk_1w_fifo(struct net_t * np)368 static void setchk_1w_fifo(struct net_t *np)
369 {
370 register int32 ii;
371 int32 scalfo, nd_fi_chk;
372 int32 *pbfi, *pbfo, *pbtcfo;
373 char s1[RECLEN];
374
375 /* no matter what tri0/tri1 and supply0/supply1 must be multi-fi */
376 switch ((byte) np->ntyp) {
377 case N_TRI0: case N_TRI1: case N_TRIREG: case N_SUPPLY0: case N_SUPPLY1:
378 np->n_multfi = TRUE;
379 }
380
381 /* special case 1: pullup - must be constant - no fi */
382 nd_fi_chk = TRUE;
383 /* special case 1: supply should not have fan-in - no effect */
384 if (np->ntyp == N_SUPPLY0 || np->ntyp == N_SUPPLY1)
385 {
386 if (np->ndrvs != NULL)
387 {
388 __gfwarn(607, np->nsym->syfnam_ind, np->nsym->sylin_cnt,
389 "in %s: %s %s has driver(s) that have no effect",
390 __inst_mod->msym->synam, __to_wtnam(s1, np), np->nsym->synam);
391 }
392 /* pwr/gnd should not have drivers */
393 nd_fi_chk = FALSE;
394 }
395 /* case 2: scalar */
396 if (!np->n_isavec)
397 {
398 if (nd_fi_chk)
399 {
400 if (np->ndrvs == NULL)
401 {
402 if (np->nsym->sy_impldecl)
403 __gfinform(420, np->nsym->syfnam_ind, np->nsym->sylin_cnt,
404 "in %s: implicitly declared scalar wire %s has no drivers",
405 __inst_mod->msym->synam, np->nsym->synam);
406 else __gfinform(427, np->nsym->syfnam_ind, np->nsym->sylin_cnt,
407 "in %s: scalar wire %s has no drivers", __inst_mod->msym->synam,
408 np->nsym->synam);
409 }
410 else if (np->ndrvs->npnxt != NULL) np->n_multfi = TRUE;
411 }
412 /* scalar case multi fo set */
413 scalfo = cnt_scalar_fo(np);
414 if (scalfo == 0)
415 {
416 if (np->nsym->sy_impldecl)
417 __gfinform(429, np->nsym->syfnam_ind, np->nsym->sylin_cnt,
418 "in %s: implicitly declared scalar wire %s drives no declarative fan-out",
419 __inst_mod->msym->synam, np->nsym->synam);
420 else __gfinform(426, np->nsym->syfnam_ind, np->nsym->sylin_cnt,
421 "in %s: scalar wire %s drives no declarative fan-out",
422 __inst_mod->msym->synam, np->nsym->synam);
423 }
424 return;
425 }
426 /* case 4: wire that is not scalar */
427 pbfi = (int32 *) __my_malloc(sizeof(int32)*np->nwid);
428 pbfo = (int32 *) __my_malloc(sizeof(int32)*np->nwid);
429 pbtcfo = (int32 *) __my_malloc(sizeof(int32)*np->nwid);
430 if (!has_npp_isform(np->ndrvs))
431 {
432 /* notice not using t chg counts for now */
433 __bld_pb_fifo(np, pbfi, pbfo, pbtcfo, 0);
434 chkset_vec_fifo(np, pbfi, pbfo, 0, FALSE, nd_fi_chk);
435 }
436 else
437 {
438 /* tricky has IS form case - must do for every inst */
439 /* if any inst. has fi > 1, then wire must be all multi fan in */
440 for (ii = 0; ii < __inst_mod->flatinum; ii++)
441 chkset_vec_fifo(np, pbfi, pbfo, ii, TRUE, nd_fi_chk);
442 }
443 /* notice task/func/lb variables always regs - never fi > 1 */
444 /* done free the tables */
445 __my_free((char *) pbfi, sizeof(int32)*np->nwid);
446 __my_free((char *) pbfo, sizeof(int32)*np->nwid);
447 __my_free((char *) pbtcfo, sizeof(int32)*np->nwid);
448 }
449
450 /*
451 * return number of non TCHG fan out for scalar
452 * MIPD never seen here - only added by PLI or SDF after elaboration
453 */
cnt_scalar_fo(struct net_t * np)454 static int32 cnt_scalar_fo(struct net_t *np)
455 {
456 register struct net_pin_t *npp;
457 int32 nfi;
458
459 for (nfi = 0, npp = np->nlds; npp != NULL; npp = npp->npnxt)
460 {
461 if (npp->npntyp != NP_TCHG) nfi++;
462 }
463 return(nfi);
464 }
465
466 /*
467 * return T if has IS (-2) form net pin list entry
468 */
has_npp_isform(register struct net_pin_t * npp)469 static int32 has_npp_isform(register struct net_pin_t *npp)
470 {
471 struct npaux_t *npauxp;
472
473 for (; npp != NULL; npp = npp->npnxt)
474 { if ((npauxp = npp->npaux) != NULL && npauxp->nbi1 == -2) return(TRUE); }
475 return(FALSE);
476 }
477
478 /*
479 * build per bit fi and fo tables - caller must pass wide enough tables
480 */
__bld_pb_fifo(struct net_t * np,int32 * pbfi,int32 * pbfo,int32 * pbtcfo,int32 ii)481 extern void __bld_pb_fifo(struct net_t *np, int32 *pbfi, int32 *pbfo,
482 int32 *pbtcfo, int32 ii)
483 {
484 register struct net_pin_t *npp;
485 register int32 bi;
486 register struct npaux_t *npauxp;
487 word32 *wp;
488
489 memset(pbfi, 0, sizeof(int32)*np->nwid);
490 memset(pbfo, 0, sizeof(int32)*np->nwid);
491 memset(pbtcfo, 0, sizeof(int32)*np->nwid);
492
493 for (npp = np->ndrvs; npp != NULL; npp = npp->npnxt)
494 {
495 if ((npauxp = npp->npaux) == NULL || npauxp->nbi1 == -1)
496 {
497 for (bi = 0; bi < np->nwid; bi++) (pbfi[bi])++;
498 continue;
499 }
500 if (npauxp->nbi1 == -2)
501 {
502 /* SJM 10/12/04 - because contab realloced, must be ndx base of IS */
503 wp = &(__contab[npauxp->nbi2.xvi]);
504 /* DBG remove - here should never has x in index */
505 if (wp[2*ii + 1] != 0L) __arg_terr(__FILE__, __LINE__);
506 bi = (int32) wp[2*ii];
507 (pbfi[bi])++;
508 continue;
509 }
510 for (bi = npauxp->nbi1; bi >= npauxp->nbi2.i; bi--) (pbfi[bi])++;
511 }
512 for (npp = np->nlds; npp != NULL; npp = npp->npnxt)
513 {
514 if ((npauxp = npp->npaux) == NULL || npauxp->nbi1 == -1)
515 {
516 for (bi = 0; bi < np->nwid; bi++)
517 {
518 /* MIPD never seen here - only added by PLI or SDF after elaboration */
519 if (npp->npntyp == NP_TCHG) (pbtcfo[bi])++; else (pbfo[bi])++;
520 }
521 continue;
522 }
523 if (npauxp->nbi1 == -2)
524 {
525 /* SJM 10/12/04 - because contab realloced, must be ndx base of IS */
526 wp = &(__contab[npauxp->nbi2.xvi]);
527
528 /* DBG remove - here should never has x in index */
529 if (wp[2*ii + 1] != 0L) __arg_terr(__FILE__, __LINE__);
530 bi = (int32) wp[2*ii];
531 if (npp->npntyp == NP_TCHG) (pbtcfo[bi])++; else (pbfo[bi])++;
532 continue;
533 }
534 for (bi = npauxp->nbi1; bi >= npauxp->nbi2.i; bi--)
535 { if (npp->npntyp == NP_TCHG) (pbtcfo[bi])++; else (pbfo[bi])++; }
536 }
537 }
538
539 /*
540 * check fan-in and fan-out and set multiple driver net bit for vector
541 * know vector or will not be called
542 * could call lds and drivers reorganization routine from in here
543 */
chkset_vec_fifo(struct net_t * np,int32 * pbfi,int32 * pbfo,int32 ii,int32 isform,int32 nd_fi_chk)544 static void chkset_vec_fifo(struct net_t *np, int32 *pbfi, int32 *pbfo,
545 int32 ii, int32 isform, int32 nd_fi_chk)
546 {
547 register int32 bi;
548 int32 r1, r2;
549 int32 someno_fanin, someno_fanout, allno_fanin, allno_fanout;
550 struct itree_t *itp;
551 char s1[IDLEN], s2[RECLEN], s3[RECLEN];
552
553 /* DBG remove */
554 if (!np->vec_scalared && (has_rng_npp(np) || np->nsym->sy_impldecl))
555 __misc_terr(__FILE__, __LINE__);
556
557 someno_fanin = someno_fanout = FALSE;
558 allno_fanin = allno_fanout = TRUE;
559 /* first check to see if all of wire has no fan-in */
560 for (bi = 0; bi < np->nwid; bi++)
561 {
562 if (pbfi[bi] == 0) someno_fanin = TRUE;
563 /* notice for is form, any > 1 will set for entire wire */
564 else { allno_fanin = FALSE; if (pbfi[bi] >= 2) np->n_multfi = TRUE; }
565 if (pbfo[bi] == 0) someno_fanout = TRUE; else allno_fanout = FALSE;
566 }
567 if (!allno_fanin && !someno_fanin && !allno_fanout && !someno_fanout)
568 return;
569 /* if added net, no messages multi-fo set */
570 /* mark for cases where cannot determine fi/fo from load and driver list */
571 if (isform)
572 {
573 if ((itp = cnvt_to_itp(__inst_mod, ii)) != NULL)
574 {
575 sprintf(s1, "in %s(%s):", __inst_mod->msym->synam, __msg2_blditree(__xs,
576 itp));
577 }
578 else sprintf(s1, "in %s(CONNECT?):", __inst_mod->msym->synam);
579 }
580 else sprintf(s1, "in %s:", __inst_mod->msym->synam);
581 if (np->vec_scalared) strcpy(s2, ""); else strcpy(s2, " vectored");
582 __getwir_range(np, &r1, &r2);
583 if (allno_fanin && nd_fi_chk)
584 {
585 __gfinform(418, np->nsym->syfnam_ind, np->nsym->sylin_cnt,
586 "%s %s%s [%d:%d] %s has no drivers (no bit has fan-in)", s1,
587 __to_wtnam(s3, np), s2, r1, r2, np->nsym->synam);
588 }
589 if (allno_fanout)
590 {
591 __gfinform(428, np->nsym->syfnam_ind, np->nsym->sylin_cnt,
592 "%s %s%s [%d:%d] %s drives nothing (no bit has fan-out)", s1,
593 __to_wtnam(s3, np), s2, r1, r2, np->nsym->synam);
594 }
595 /* next emit bit by bit errors - know at least one error */
596 if (nd_fi_chk)
597 {
598 if (someno_fanin && !allno_fanin)
599 {
600 for (bi = 0; bi < np->nwid; bi++)
601 {
602 if (pbfi[bi] == 0)
603 {
604 __gfinform(418, np->nsym->syfnam_ind, np->nsym->sylin_cnt,
605 "%s %s%s [%d:%d] %s bit %d has no declarative drivers (no fan-in)",
606 s1, __to_wtnam(s3, np), s2, r1, r2, np->nsym->synam, bi);
607 }
608 }
609 }
610 }
611 if (someno_fanout && !allno_fanout)
612 {
613 for (bi = 0; bi < np->nwid; bi++)
614 {
615 if (pbfo[bi] == 0)
616 {
617 __gfinform(428, np->nsym->syfnam_ind, np->nsym->sylin_cnt,
618 "%s %s%s [%d:%d] %s bit %d drives nothing (no declartive fan-out)",
619 s1, __to_wtnam(s3, np), s2, r1, r2, np->nsym->synam, bi);
620 }
621 }
622 }
623 }
624
625 /*
626 * return T if any net pin is non -1 form
627 */
has_rng_npp(struct net_t * np)628 static int32 has_rng_npp(struct net_t *np)
629 {
630 register struct net_pin_t *npp;
631 struct npaux_t *npauxp;
632
633 for (npp = np->nlds; npp != NULL; npp = npp->npnxt)
634 {
635 if ((npauxp = npp->npaux) != NULL && npauxp->nbi1 != -1) return(TRUE);
636 }
637 return(FALSE);
638 }
639
640 /*
641 * convert a module and an itinum to the corresponding itp location
642 * this searches
643 */
cnvt_to_itp(struct mod_t * mdp,int32 itino)644 static struct itree_t *cnvt_to_itp(struct mod_t *mdp, int32 itino)
645 {
646 register int32 ii;
647 struct itree_t *itp;
648
649 for (ii = 0; ii < __numtopm; ii++)
650 {
651 if ((itp = cnvt_todown_itp(__it_roots[ii], mdp, itino)) != NULL)
652 return(itp);
653 }
654 __arg_terr(__FILE__, __LINE__);
655 return(NULL);
656 }
657
658 /*
659 * dump a down level of a tree
660 */
cnvt_todown_itp(struct itree_t * itp,struct mod_t * mdp,int32 itino)661 static struct itree_t *cnvt_todown_itp(struct itree_t *itp,
662 struct mod_t *mdp, int32 itino)
663 {
664 register int32 ii;
665 int32 ofsnum;
666 struct itree_t *itp2;
667
668 if (itp->itip->imsym->el.emdp == mdp && itp->itinum == itino) return(itp);
669 ofsnum = itp->itip->imsym->el.emdp->minum;
670 for (ii = 0; ii < ofsnum; ii++)
671 {
672 if ((itp2 = cnvt_todown_itp(&(itp->in_its[ii]), mdp, itino)) != NULL)
673 return(itp2);
674 }
675 return(NULL);
676 }
677
678 /*
679 * ROUTINE TO PREPARE EXPRESSIONS
680 */
681
682 /*
683 * check all expr. things that cannot be checked until most of prep done
684 * 1) set expr. >1 fi bit
685 * check for port and inst psel direction mismatch (warning)
686 * inout ports multi-fi and set here
687 */
__prep_exprs_and_ports(void)688 extern void __prep_exprs_and_ports(void)
689 {
690 register int32 pi, ii, gi, cai;
691 register struct mod_pin_t *mpp;
692 int32 pnum, derrtyp;
693 struct inst_t *ip;
694 struct mod_t *mdp, *imdp;
695 struct gate_t *gp;
696 struct conta_t *cap;
697 struct expr_t *xp;
698 struct conta_t *pbcap;
699 char s1[RECLEN];
700
701 for (mdp = __modhdr; mdp != NULL; mdp = mdp->mnxt)
702 {
703 /* port lhs expr. that drive fi > 1 must be set for each inst. in mod */
704 for (ii = 0; ii < mdp->minum; ii++)
705 {
706 ip = &(mdp->minsts[ii]);
707 imdp = ip->imsym->el.emdp;
708 if ((pnum = imdp->mpnum) == 0) continue;
709
710 for (pi = 0; pi < pnum; pi++)
711 {
712 mpp = &(imdp->mpins[pi]);
713 /* any up iconn connection to inout is fi>1 here */
714 xp = ip->ipins[pi];
715
716 /* input port never lhs */
717 if (mpp->mptyp == IO_IN) continue;
718
719 /* will never see inouts, if in tran chan. no drivers */
720 if (lhs_has_figt1(xp)) xp->x_multfi = TRUE;
721 }
722 }
723 /* gate outputs may drive fi > 1 and be not strength */
724 for (gi = 0; gi < mdp->mgnum; gi++)
725 {
726 gp = &(mdp->mgates[gi]);
727
728 switch ((byte) gp->g_class) {
729 case GC_LOGIC: case GC_UDP: case GC_BUFIF: case GC_MOS: case GC_CMOS:
730 if (lhs_has_figt1(gp->gpins[0])) gp->gpins[0]->x_multfi = TRUE;
731 for (pi = 1; pi < (int32) gp->gpnum; pi++)
732 {
733 if (gp->g_class != GC_UDP)
734 sprintf(s1, "%s gate input %d", gp->gmsym->synam, pi + 1);
735 else sprintf(s1, "udp \"%s\" input %d", gp->gmsym->synam, pi + 1);
736 xp = gp->gpins[pi];
737
738 /* check for lhs and rhs same wire, delay type determines error */
739 derrtyp = __chk_0del(gp->g_delrep, gp->g_du, mdp);
740 chk_decl_siderep(gp->gpins[0], xp, s1, derrtyp,
741 gp->gsym->syfnam_ind, gp->gsym->sylin_cnt);
742 }
743 /* set the input change eval routine for the gate */
744 __set_gchg_func(gp);
745 break;
746 /* no processing for trans - in separate tran switch channel */
747 /* tran. channel expressions never seen */
748 /* hard (channel input) driver expressions will be marked as fi>1 */
749 case GC_TRAN:
750 break;
751 case GC_TRANIF:
752 chk_trifctrl_insame_chan(gp, gp->gpins[2]);
753 /* if tranif ctrl expr non leaf, warn if in same channel as term */
754 if (!__isleaf(gp->gpins[2]))
755 {
756 chk_samechan_trifctrl_simple(mdp, gp, 0, gp->gpins[0], gp->gpins[2]);
757 chk_samechan_trifctrl_simple(mdp, gp, 1, gp->gpins[1], gp->gpins[2]);
758 }
759 break;
760 /* pull really source on wire not lhs */
761 case GC_PULL: break;
762 default: __case_terr(__FILE__, __LINE__);
763 }
764 }
765
766 for (cap = &(mdp->mcas[0]), cai = 0; cai < mdp->mcanum; cai++, cap++)
767 {
768 if (lhs_has_figt1(cap->lhsx))
769 {
770 cap->lhsx->x_multfi = TRUE;
771 if (cap->ca_pb_sim)
772 {
773 for (pi = 0; pi < cap->lhsx->szu.xclen; pi++)
774 {
775 pbcap = &(cap->pbcau.pbcaps[pi]);
776 pbcap->lhsx->x_multfi = TRUE;
777 }
778 }
779 }
780 }
781
782 /* module in or inout ports can be non strength but drive fi > 1 */
783 pnum = mdp->mpnum;
784 for (pi = 0; pi < pnum; pi++)
785 {
786 mpp = &(mdp->mpins[pi]);
787 if (mpp->mptyp == IO_OUT) continue;
788 xp = mpp->mpref;
789 if (mpp->mptyp == IO_BID) { xp->x_multfi = TRUE; continue; }
790 if (lhs_has_figt1(xp)) xp->x_multfi = TRUE;
791 }
792 }
793 prep_tf_rwexprs();
794 }
795
796 /*
797 * check tranif 3rd ctrl input in same channel
798 * LOOKATME - possible for xmr to cause this to be ok
799 */
chk_trifctrl_insame_chan(struct gate_t * gp,struct expr_t * ndp)800 static void chk_trifctrl_insame_chan(struct gate_t *gp, struct expr_t *ndp)
801 {
802 struct net_t *np0, *np1, *np2;
803 int32 pi;
804
805 if (__isleaf(ndp))
806 {
807 if (ndp->optyp == ID || ndp->optyp == GLBREF)
808 {
809 np2 = ndp->lu.sy->el.enp;
810 if (np2->ntraux == NULL) return;
811 np0 = __find_tran_conn_np(gp->gpins[0]);
812 np1 = __find_tran_conn_np(gp->gpins[1]);
813 pi = -1;
814 if (np2 == np0) pi = 0;
815 if (pi != -1 && (np2->ntyp != N_SUPPLY0 && np2->ntyp != N_SUPPLY1))
816 {
817 __gfinform(3011, gp->gsym->syfnam_ind, gp->gsym->sylin_cnt,
818 "%s %s non supply net %s (port %d) appears in tran channel and control input - possible infinite loop oscillation",
819 gp->gmsym->synam, gp->gsym->synam, np2->nsym->synam, pi);
820 }
821 pi = -1;
822 if (np2 == np1) pi = 1;
823 if (pi != -1 && (np2->ntyp != N_SUPPLY0 && np2->ntyp != N_SUPPLY1))
824 {
825 __gfinform(3011, gp->gsym->syfnam_ind, gp->gsym->sylin_cnt,
826 "%s %s non supply net %s (port %d) appears in tran channel and control input - possible infinite loop oscillation",
827 gp->gmsym->synam, gp->gsym->synam, np2->nsym->synam, pi);
828 }
829 }
830 return;
831 }
832 if (ndp->lu.x != NULL) chk_trifctrl_insame_chan(gp, ndp->lu.x);
833 if (ndp->ru.x != NULL) chk_trifctrl_insame_chan(gp, ndp->ru.x);
834 }
835
836 /*
837 * check to see if tranif enable in same channel as the bidirect terminals
838 * and expr non simple (if simple will use tranif node vertex value)
839 * if complex will not update the enable during tran switch channel relax
840 */
chk_samechan_trifctrl_simple(struct mod_t * mdp,struct gate_t * gp,int32 pi,struct expr_t * termxp,struct expr_t * ctrlxp)841 static void chk_samechan_trifctrl_simple(struct mod_t *mdp, struct gate_t *gp,
842 int32 pi, struct expr_t *termxp, struct expr_t *ctrlxp)
843 {
844 register int32 ii;
845 int32 bi, bi2, chanid0, chanid2, inum2;
846 struct net_t *np0, *np2;
847 struct vbinfo_t *vbip;
848 struct gref_t *grp;
849
850 np0 = __find_tran_conn_np(termxp);
851 /* if terminal net is not in enable expr no problem possible */
852 if (!net_in_expr(ctrlxp, np0)) return;
853
854 /* DBG remove */
855 if (np0->ntraux == NULL) __misc_terr(__FILE__, __LINE__);
856 /* --- */
857 __push_itstk(mdp->moditps[0]);
858 /* SJM 08/02/01 - only called for tranif so other side itp same */
859 /* get terminal net/bit */
860 np0 = __tranx_to_netbit(termxp, 0, &bi, __inst_ptr);
861 /* DBG remove */
862 if (np0->ntraux == NULL) __misc_terr(__FILE__, __LINE__);
863 /* --- */
864
865 bi2 = (bi == -1) ? 0 : bi;
866 vbip = np0->ntraux->vbitchans[np0->nwid*__inum + bi2];
867 /* SJM 08/07/01 - bit may not be in channel */
868 if (vbip == NULL) goto done;
869 chanid0 = vbip->chan_id;
870
871 if (ctrlxp->optyp == LSB)
872 {
873 /* BEWARE - this assumes all constant folded */
874 if (termxp->ru.x->optyp != NUMBER)
875 {
876 __gfwarn(3112, gp->gsym->syfnam_ind, gp->gsym->sylin_cnt,
877 "%s %s third enable input net %s probably in same switch channel as terminal %d but bit select index non constant - updated node value not used when solving channel",
878 gp->gmsym->synam, gp->gsym->synam, np0->nsym->synam, pi);
879 }
880 goto done;
881 }
882 if (termxp->optyp == PARTSEL)
883 {
884 chanid2 = vbip->chan_id;
885 np2 = ctrlxp->lu.x->lu.sy->el.enp;
886 if (ctrlxp->lu.x->optyp == GLBREF)
887 {
888 grp = ctrlxp->lu.x->ru.grp;
889 for (ii = 0; ii < grp->targmdp->flatinum; ii++)
890 {
891 /* part select here uses low bit */
892
893 inum2 = grp->targmdp->moditps[ii]->itinum;
894 vbip = np0->ntraux->vbitchans[np0->nwid*inum2];
895 /* SJM 08/07/01 - low bit may not be in channel */
896 if (vbip == NULL) goto done;
897 chanid2 = vbip->chan_id;
898 if (chanid0 == chanid2)
899 {
900 __gfwarn(3115, gp->gsym->syfnam_ind, gp->gsym->sylin_cnt,
901 "%s %s third enable input heirarchical reference part select of net %s in same switch channel as terminal %d - updated node value not used when solving channel",
902 gp->gmsym->synam, gp->gsym->synam, np0->nsym->synam, pi);
903 goto done;
904 }
905 }
906 goto done;
907 }
908 /* non xmr part select case */
909 bi = __contab[ctrlxp->ru.x->ru.xvi];
910 /* DBG remove */
911 if (np2->ntraux == NULL) __misc_terr(__FILE__, __LINE__);
912 /* --- */
913 vbip = np2->ntraux->vbitchans[np0->nwid*__inum + bi];
914 /* SJM 08/07/01 - low bit may not be in channel */
915 if (vbip == NULL) goto done;
916 chanid2 = vbip->chan_id;
917 if (chanid0 == chanid2)
918 {
919 __gfwarn(3116, gp->gsym->syfnam_ind, gp->gsym->sylin_cnt,
920 "%s %s third enable input part select of net %s in same switch channel as terminal %d - updated node value not used when solving channel",
921 gp->gmsym->synam, gp->gsym->synam, np0->nsym->synam, pi);
922 }
923 goto done;
924 }
925 /* complex expression - can't tell if really in */
926 __gfwarn(3118, gp->gsym->syfnam_ind, gp->gsym->sylin_cnt,
927 "%s %s third enable input complex expression contains net %s possibly in same switch channel as terminal %d - updated node value not used when solving channel",
928 gp->gmsym->synam, gp->gsym->synam, np0->nsym->synam, pi);
929
930 done:
931 __pop_itstk();
932 }
933
934 /*
935 * return T if net in expr
936 */
net_in_expr(struct expr_t * ndp,struct net_t * np)937 static int32 net_in_expr(struct expr_t *ndp, struct net_t *np)
938 {
939 struct net_t *np1;
940
941 if (__isleaf(ndp))
942 {
943 if (ndp->optyp == ID || ndp->optyp == GLBREF)
944 {
945 /* T even if different instances */
946 np1 = ndp->lu.sy->el.enp;
947 if (np == np1) return(TRUE);
948 }
949 return(FALSE);
950 }
951 if (ndp->lu.x != NULL)
952 { if (net_in_expr(ndp->lu.x, np)) return(TRUE); }
953 if (ndp->ru.x != NULL)
954 { if (net_in_expr(ndp->ru.x, np)) return(TRUE); }
955 return(FALSE);
956 }
957
958 /*
959 * prepare tf rw expressions
960 * set multfi bits for tf_ rw expressions involving wires
961 * concat never tf_ rw
962 */
prep_tf_rwexprs(void)963 static void prep_tf_rwexprs(void)
964 {
965 register int32 pi;
966 register struct tfrec_t *tfrp;
967 register struct tfarg_t *tfap;
968 struct expr_t *xp;
969
970 for (tfrp = __tfrec_hdr; tfrp != NULL; tfrp = tfrp->tfrnxt)
971 {
972 for (pi = 1; pi < tfrp->tfanump1; pi++)
973 {
974 tfap = &(tfrp->tfargs[pi]);
975 xp = tfap->arg.axp;
976 if (!xp->tf_isrw) continue;
977 /* only wires have multiple fan in */
978 if (tfap->anp->ntyp >= NONWIRE_ST) continue;
979
980 /* no context module or inst neede in here */
981 if (lhs_has_figt1(xp))
982 { xp->x_multfi = TRUE; __alloc_tfdrv_wp(tfap, xp, tfrp->tf_inmdp); }
983 }
984 }
985 }
986
987 /*
988 * allocate the tfdrv wp
989 * notice this can never be array
990 */
__alloc_tfdrv_wp(struct tfarg_t * tfap,struct expr_t * xp,struct mod_t * mdp)991 extern void __alloc_tfdrv_wp(struct tfarg_t *tfap, struct expr_t *xp,
992 struct mod_t *mdp)
993 {
994 int32 totchars;
995
996 if (xp->x_stren)
997 tfap->tfdrv_wp.bp = (byte *) __my_malloc(xp->szu.xclen*mdp->flatinum);
998 else
999 {
1000 totchars = __get_pcku_chars(xp->szu.xclen, mdp->flatinum);
1001 tfap->tfdrv_wp.wp = (word32 *) __my_malloc(totchars);
1002 }
1003 __init_tfdrv(tfap, xp, mdp);
1004 }
1005
1006 /*
1007 * initialize tf arg value to z
1008 * may be strength
1009 */
__init_tfdrv(struct tfarg_t * tfap,struct expr_t * xp,struct mod_t * mdp)1010 extern void __init_tfdrv(struct tfarg_t *tfap, struct expr_t *xp,
1011 struct mod_t *mdp)
1012 {
1013 register int32 i;
1014 byte *sbp;
1015 struct xstk_t *xsp;
1016
1017 if (xp->x_stren)
1018 {
1019 sbp = (byte *) tfap->tfdrv_wp.bp;
1020 set_byteval_(sbp, mdp->flatinum*xp->szu.xclen, ST_HIZ);
1021 }
1022 else
1023 {
1024 push_xstk_(xsp, xp->szu.xclen);
1025 zero_allbits_(xsp->ap, xp->szu.xclen);
1026 one_allbits_(xsp->bp, xp->szu.xclen);
1027 /* this does not access mod con tab */
1028
1029 __push_wrkitstk(mdp, 0);
1030 for (i = 0; i < mdp->flatinum; i++)
1031 {
1032 __inst_ptr->itinum = i;
1033 __inum = i;
1034 /* no need to access mod con table here */
1035 __st_perinst_val(tfap->tfdrv_wp, xp->szu.xclen, xsp->ap, xsp->bp);
1036 }
1037 __pop_xstk();
1038 __pop_wrkitstk();
1039 }
1040 }
1041
1042 /*
1043 * return T if lhs has at least 1 fi > 1 net
1044 * this is needed because expr. bit also set for any strength but for
1045 * fi == 1 strength do not need
1046 * any wire in a tran/inout channel must be fi>1
1047 */
lhs_has_figt1(struct expr_t * lhsx)1048 static int32 lhs_has_figt1(struct expr_t *lhsx)
1049 {
1050 struct expr_t *xp;
1051 struct net_t *np;
1052
1053 switch ((byte) lhsx->optyp ) {
1054 case OPEMPTY: break;
1055 case ID:
1056 case GLBREF:
1057 np = lhsx->lu.sy->el.enp;
1058 chk_net_bit:
1059 /* any wire in tran channel is fi>1 */
1060 if (np->ntraux != NULL) return(TRUE);
1061 if (np->n_multfi) return(TRUE);
1062 break;
1063 case LSB:
1064 case PARTSEL:
1065 np = lhsx->lu.x->lu.sy->el.enp;
1066 goto chk_net_bit;
1067 case LCB:
1068 /* know for lhs at most 1 level of concatenate */
1069 for (xp = lhsx->ru.x; xp != NULL; xp = xp->ru.x)
1070 { if (lhs_has_figt1(xp->lu.x)) return(TRUE); }
1071 break;
1072 default: __case_terr(__FILE__, __LINE__);
1073 }
1074 return(FALSE);
1075 }
1076
1077 /*
1078 * additional prep and checking of wide continuous assigns after fi known
1079 *
1080 * rules for delay and fi combinations of drivers and rhs save expr.:
1081 * fi==1, delay =0 => driver access by loading net, do not need rhs val wp
1082 * because no sched. value to re-eval
1083 * (eval and store)
1084 * fi==1, delay >0 => driver access by loading net, has rhs val wp
1085 * because need to re-eval expr. after delay for assign
1086 * and so expr. no changes can be killed off early
1087 * fi>1, delay =0 => driver access just by loading rhs, may need to save
1088 * rhs val save
1089 * fi>1, delay >0 => need to save driver wp and need rhs val wp for sched.
1090 *
1091 * notice delays prepared before here
1092 */
__prep_contas(void)1093 extern void __prep_contas(void)
1094 {
1095 register int32 i, bi;
1096 register struct conta_t *cap, *pbcap;
1097 int32 cai, insts, derrtyp;
1098 struct mod_t *mdp;
1099 char s1[RECLEN];
1100
1101 for (mdp = __modhdr; mdp != NULL; mdp = mdp->mnxt)
1102 {
1103 for (cap = &(mdp->mcas[0]), cai = 0; cai < mdp->mcanum; cai++, cap++)
1104 {
1105 insts = mdp->flatinum;
1106
1107 /* first check and eliminate getpat conta form */
1108 /* has neither rhsval wp or driver wp and no delay or error */
1109 /* SJM 09/28/02 - get pat lhs never PB decomposed rhs concat form */
1110 if (cap->lhsx->getpatlhs)
1111 { getpat_lhs_figt1(mdp, cap->lhsx, cap); continue; }
1112
1113 /* need to eval rhs because of delay bit */
1114 /* know 6 and 12 not possible here but is 4x possible here ? */
1115 switch ((byte) cap->ca_delrep) {
1116 case DT_4V: case DT_IS4V: case DT_IS4V1: case DT_IS4V2: case DT_4X:
1117 cap->ca_4vdel = TRUE;
1118 break;
1119 default: cap->ca_4vdel = FALSE;
1120 }
1121
1122 if (!cap->ca_pb_sim)
1123 {
1124 /* need to save rhs driver for any fi > 1 case either because driver */
1125 /* different than rhs (delay > 0) or to avoid possible re-evaluation */
1126 /* of rhs func. call also need to save if has delay for accurate */
1127 /* inertial delay algorithm */
1128 if (cap->lhsx->x_multfi || cap->ca_delrep != DT_NONE)
1129 __allocinit_perival(&cap->ca_drv_wp, insts, cap->lhsx->szu.xclen,
1130 TRUE);
1131
1132 /* if conta has delay, need scheduled event array */
1133 /* fi >1 but no delay does not need schedule event table */
1134 if (cap->ca_delrep != DT_NONE)
1135 {
1136 __allocinit_perival(&(cap->schd_drv_wp), insts,
1137 cap->lhsx->szu.xclen, TRUE);
1138 cap->caschd_tevs = (i_tev_ndx *)
1139 __my_malloc(insts*sizeof(i_tev_ndx));
1140 for (i = 0; i < insts; i++) cap->caschd_tevs[i] = -1;
1141 }
1142 /* need to check for variable on both sides, even if 0 delay */
1143 derrtyp = __chk_0del(cap->ca_delrep, cap->ca_du, mdp);
1144 chk_decl_siderep(cap->lhsx, cap->rhsx, "continuous assign", derrtyp,
1145 cap->casym->syfnam_ind, cap->casym->sylin_cnt);
1146 }
1147 else
1148 {
1149 derrtyp = __chk_0del(cap->ca_delrep, cap->ca_du, mdp);
1150 for (bi = 0; bi < cap->lhsx->szu.xclen; bi++)
1151 {
1152 pbcap = &(cap->pbcau.pbcaps[bi]);
1153 /* DBG remove -- */
1154 if (pbcap->lhsx->szu.xclen != 1) __misc_terr(__FILE__, __LINE__);
1155 /* --- */
1156 /* if conta lhs expr fi>1, then all bit must be and need drv wp */
1157 if (cap->lhsx->x_multfi || cap->ca_delrep != DT_NONE)
1158 __allocinit_perival(&(pbcap->ca_drv_wp), insts, 1, TRUE);
1159 if (cap->ca_delrep != DT_NONE)
1160 {
1161 __allocinit_perival(&(pbcap->schd_drv_wp), insts, 1, TRUE);
1162 pbcap->caschd_tevs = (i_tev_ndx *)
1163 __my_malloc(insts*sizeof(i_tev_ndx));
1164 for (i = 0; i < insts; i++) pbcap->caschd_tevs[i] = -1;
1165 }
1166 /* need to check for variable on both sides, even if 0 delay */
1167 sprintf(s1, "per bit %d continuous assign", bi);
1168 chk_decl_siderep(pbcap->lhsx, pbcap->rhsx, s1, derrtyp,
1169 cap->casym->syfnam_ind, cap->casym->sylin_cnt);
1170 }
1171 }
1172 }
1173 }
1174 }
1175
1176 /*
1177 * initialize and maybe allocate the continuous assign sched. and drive tabs
1178 *
1179 * this is always used for mod port input and output half drivers using
1180 * dummy conta for union place holder
1181 *
1182 * initial value is z since needed for inout port in and out drivers
1183 * think also better for conta but for conta's always overwritten?
1184 * this is only for non strength case - stren must be initialized
1185 * to strength wire type init value
1186 */
__allocinit_perival(union pck_u * nvap,int32 insts,int32 blen,int32 nd_alloc)1187 extern void __allocinit_perival(union pck_u *nvap, int32 insts, int32 blen,
1188 int32 nd_alloc)
1189 {
1190 int32 totchars, wlen;
1191
1192 if (blen == 1)
1193 {
1194 totchars = insts;
1195 if (nd_alloc) nvap->bp = (byte *) __my_malloc(totchars);
1196 set_byteval_(nvap->bp, insts, 2);
1197 }
1198 else
1199 {
1200 wlen = wlen_(blen);
1201 totchars = 2*insts*wlen*WRDBYTES;
1202 if (nd_alloc) nvap->wp = (word32 *) __my_malloc(totchars);
1203 /* initialize to z - do not know driver - sim init will set */
1204 __init_vec_var(nvap->wp, insts, wlen, blen, 0L, 0xffffffffL);
1205 }
1206 }
1207
1208 /*
1209 * allocate (if flag T) and initialize strength perinst value
1210 * this is for tran channels
1211 */
__allocinit_stperival(union pck_u * nvap,int32 insts,struct net_t * np,int32 nd_alloc)1212 extern void __allocinit_stperival(union pck_u *nvap, int32 insts,
1213 struct net_t *np, int32 nd_alloc)
1214 {
1215 int32 totbits, stval;
1216 byte sval;
1217
1218 totbits = insts*np->nwid;
1219 if (nd_alloc) nvap->bp = (byte *) __my_malloc(totbits);
1220 /* for trireg in tran channel, hard driver must be z - it will set value */
1221 if (np->ntraux != NULL && np->ntyp == N_TRIREG) stval = ST_HIZ;
1222 else __get_initval(np, &stval);
1223 sval = (byte) stval;
1224 set_byteval_(nvap->bp, totbits, sval);
1225 }
1226
1227 /*
1228 * check a lhs expr. and emit error for every wire that has fi > 1
1229 * message for getpat only
1230 * LOOKATME - should bit select of scalared wire be legal for getpattern
1231 */
getpat_lhs_figt1(struct mod_t * mdp,struct expr_t * lhsx,struct conta_t * cap)1232 static void getpat_lhs_figt1(struct mod_t *mdp, struct expr_t *lhsx,
1233 struct conta_t *cap)
1234 {
1235 struct net_t *np;
1236
1237 switch ((byte) lhsx->optyp) {
1238 case OPEMPTY: break;
1239 case ID:
1240 case GLBREF:
1241 np = lhsx->lu.sy->el.enp;
1242 if (np->n_multfi)
1243 {
1244 /* wire for getpat will be scalar or will not get this far */
1245 __gferr(858, cap->casym->syfnam_ind, cap->casym->sylin_cnt,
1246 "$getpattern lvalue wire %s more than one driver illegal - no way to removebus contention",
1247 __to_idnam(lhsx));
1248 }
1249 if (np->ntraux != NULL)
1250 {
1251 __gferr(858, cap->casym->syfnam_ind, cap->casym->sylin_cnt,
1252 "$getpattern lvalue wire %s inout port or tran connection illegal",
1253 __to_idnam(lhsx));
1254 }
1255 if (np->nrngrep == NX_DWIR)
1256 {
1257 __gferr(938, cap->casym->syfnam_ind, cap->casym->sylin_cnt,
1258 "$getpattern lvalue wire %s delay or path destination illegal",
1259 __to_idnam(lhsx));
1260 }
1261 break;
1262 case LCB:
1263 /* know for lhs at most 1 level of concatenate */
1264 {
1265 struct expr_t *xp2;
1266 for (xp2 = lhsx->ru.x; xp2 != NULL; xp2 = xp2->ru.x)
1267 getpat_lhs_figt1(mdp, xp2->lu.x, cap);
1268 }
1269 break;
1270 default: __case_terr(__FILE__, __LINE__);
1271 }
1272 }
1273
1274 /*
1275 * check continous assign or gate for same variable on both sides
1276 * if delay then inform, if no delay warn - probable inf. loop
1277 *
1278 * possible for 2 same wire globals to be on both sides but not caught here
1279 */
chk_decl_siderep(struct expr_t * lhsx,struct expr_t * rhsx,char * objnam,int32 deltyp,word32 fnind,int32 lcnt)1280 static void chk_decl_siderep(struct expr_t *lhsx, struct expr_t *rhsx,
1281 char *objnam, int32 deltyp, word32 fnind, int32 lcnt)
1282 {
1283 int32 nd_inform, wire_issel;
1284 struct expr_t *ndp;
1285 struct net_t *np;
1286 char s1[RECLEN];
1287
1288 /* expect rhs to be wider */
1289 nd_inform = TRUE;
1290 switch (rhsx->optyp) {
1291 case ID:
1292 nd_inform = FALSE;
1293 np = rhsx->lu.sy->el.enp;
1294 cmp_wire:
1295 if (!find_var_in_xpr(lhsx, np, &wire_issel)) break;
1296
1297 if (deltyp == DBAD_NONE) { strcpy(s1, "no delay"); nd_inform = FALSE; }
1298 else if (deltyp == DBAD_EXPR || deltyp == DBAD_MAYBE0)
1299 { strcpy(s1, "possible 0 delay"); nd_inform = FALSE; }
1300 else if (deltyp == DBAD_0)
1301 { strcpy(s1, "all 0 delay"); nd_inform = FALSE; }
1302 else strcpy(s1, "delay");
1303
1304 if (nd_inform || wire_issel)
1305 __gfinform(444, fnind, lcnt,
1306 "wire %s repeated on both sides of %s - has %s", np->nsym->synam,
1307 objnam, s1);
1308 else __gfwarn(624, fnind, lcnt,
1309 "wire %s repeated on both sides of %s - has %s", np->nsym->synam, objnam,
1310 s1);
1311 break;
1312 case LSB: case PARTSEL:
1313 np = rhsx->lu.x->lu.sy->el.enp;
1314 goto cmp_wire;
1315 case LCB:
1316 for (ndp = rhsx->ru.x; ndp != NULL; ndp = ndp->ru.x)
1317 chk_decl_siderep(lhsx, ndp->lu.x, objnam, deltyp, fnind, lcnt);
1318 break;
1319 case FCALL:
1320 for (ndp = rhsx->ru.x; ndp != NULL; ndp = ndp->ru.x)
1321 chk_decl_siderep(lhsx, ndp->lu.x, objnam, deltyp, fnind, lcnt);
1322 }
1323 }
1324
1325 /*
1326 * find a variable in an expr.
1327 * if same variable but global xmr, not a match
1328 */
find_var_in_xpr(struct expr_t * xp,struct net_t * np,int32 * wire_sel)1329 static int32 find_var_in_xpr(struct expr_t *xp, struct net_t *np,
1330 int32 *wire_sel)
1331 {
1332 register struct expr_t *ndp;
1333 struct net_t *npx;
1334
1335 if (np->ntyp >= NONWIRE_ST) return(FALSE);
1336
1337 *wire_sel = TRUE;
1338 switch (xp->optyp) {
1339 case ID:
1340 *wire_sel = FALSE;
1341 npx = xp->lu.sy->el.enp;
1342 comp_net:
1343 return(np == npx);
1344 case LSB: case PARTSEL:
1345 npx = xp->lu.x->lu.sy->el.enp;
1346 goto comp_net;
1347 case LCB:
1348 for (ndp = xp->ru.x; ndp != NULL; ndp = ndp->ru.x)
1349 { if (find_var_in_xpr(ndp->lu.x, np, wire_sel)) return(TRUE); }
1350 break;
1351 case FCALL:
1352 for (ndp = xp->ru.x; ndp != NULL; ndp = ndp->ru.x)
1353 { if (find_var_in_xpr(ndp->lu.x, np, wire_sel)) return(TRUE); }
1354 }
1355 return(FALSE);
1356 }
1357
1358 /*
1359 * ROUTINES TO DYNAMICALLY SET UP MIPDS
1360 */
1361
1362 /*
1363 * allocate, initialize, and link in NP MIPD load delay npp for a net
1364 *
1365 * reinit does not turn off SDF annotated delays - if task called
1366 * with replace form works, increment adds
1367 */
__add_alloc_mipd_npp(struct net_t * np,struct mod_t * mdp)1368 extern void __add_alloc_mipd_npp(struct net_t *np, struct mod_t *mdp)
1369 {
1370 register int32 bi;
1371 int32 ii;
1372 struct net_pin_t *npp;
1373 struct mipd_t *mipdp;
1374
1375 /* DBG remove --- */
1376 if (np->nlds != NULL && np->nlds->npntyp == NP_MIPD_NCHG)
1377 __misc_terr(__FILE__, __LINE__);
1378 /* --- */
1379
1380 __cur_npnp = np;
1381 __cur_npnum = 0;
1382 /* always for entire net - table nil if no MIPD on bit for any inst */
1383 /* this also inserts on front of list */
1384 npp = __alloc_npin(NP_MIPD_NCHG, -1, -1);
1385
1386 if (np->nlds == NULL)
1387 {
1388 /* DBG remove -- */
1389 if (__optimized_sim && !__sdf_from_cmdarg)
1390 {
1391 __misc_terr(__FILE__, __LINE__);
1392 }
1393 /* -- */
1394
1395 /* LOOKATME - think since input port will always have load */
1396 /* add the one new mipd net pin - know np never a reg becaus in/inout */
1397 np->nlds = npp;
1398
1399 /* SJM 07/25/01 - was not setting all needed bits right */
1400 /* need to set the various bits to indicate has load so net changes */
1401 /* put on nchg list */
1402 np->nchg_has_lds = TRUE;
1403
1404 /* when add mipd load, must turn off all chged */
1405 /* even if dce list was not empty, if match itp dces some action bits */
1406 /* will be wrongly off */
1407 /* SJM 01/06/03 - is is possible to only turn on current inst? */
1408 for (ii = 0; ii < mdp->flatinum; ii++)
1409 { np->nchgaction[ii] &= ~(NCHG_ALL_CHGED); }
1410 }
1411 else { npp->npnxt = np->nlds; np->nlds = npp; }
1412
1413 /* SJM 07/26/01 - alloc was wrong size - needs to be one per bit */
1414 npp->elnpp.emipdbits = (struct mipd_t *)
1415 __my_malloc(np->nwid*sizeof(struct mipd_t));
1416
1417 /* need basic setup especially turning on no mipd bit for each */
1418 for (bi = 0; bi < np->nwid; bi++)
1419 {
1420 mipdp = &(npp->elnpp.emipdbits[bi]);
1421 /* BEWARE - this is crucial T bit indicating no path for this bit */
1422 mipdp->no_mipd = TRUE;
1423 mipdp->pth_mipd = FALSE;
1424 mipdp->impthtab = NULL;
1425 /* rest of fields set if path ends on bit */
1426 }
1427 /* SJM 02/06/03 - may have npps but not dces so must turn this on */
1428 /* SJM 06/23/04 - ### ??? LOOKATME - is this needed without regen? */
1429 /* since nchg nd chgstore on, know nchg action right */
1430 if (np->ntyp >= NONWIRE_ST) np->nchg_has_dces = TRUE;
1431 }
1432
1433 /*
1434 * initialize a (PORT form path - first step in annotating either path delay
1435 * value is the (PORT form destination for one bit
1436 */
__setup_mipd(struct mipd_t * mipdp,struct net_t * np,int32 ninsts)1437 extern void __setup_mipd(struct mipd_t *mipdp, struct net_t *np, int32 ninsts)
1438 {
1439 register int32 ii;
1440 int32 stval;
1441 byte bv;
1442
1443 mipdp->no_mipd = FALSE;
1444 mipdp->pb_mipd_delrep = DT_1V;
1445 mipdp->pb_mipd_du.d1v = (word64 *) __my_malloc(sizeof(word64));
1446 /* SJM 07/22/01 - nee to start value at 0 so unset stay as 0 */
1447 mipdp->pb_mipd_du.d1v[0] = 0ULL;
1448
1449 mipdp->oldvals = (byte *) __my_malloc(ninsts);
1450
1451 if (!np->n_stren) bv = (byte) __get_initval(np, &stval);
1452 else { __get_initval(np, &stval); bv = (byte) stval; }
1453 for (ii = 0; ii < ninsts; ii++) mipdp->oldvals[ii] = bv;
1454
1455 mipdp->mipdschd_tevs = (i_tev_ndx *) __my_malloc(ninsts*sizeof(i_tev_ndx));
1456 for (ii = 0; ii < ninsts; ii++) mipdp->mipdschd_tevs[ii] = -1;
1457 if (mipdp->pth_mipd)
1458 {
1459 mipdp->impthtab = (struct impth_t **)
1460 __my_malloc(ninsts*sizeof(struct impth_t *));
1461 for (ii = 0; ii < ninsts; ii++) mipdp->impthtab[ii] = NULL;
1462 }
1463 }
1464
1465 /*
1466 * re-init all mipds for a port - only called if port has mipds
1467 * this just reset old value
1468 */
__reinit_mipd(struct mod_pin_t * mpp,struct mod_t * mdp)1469 extern void __reinit_mipd(struct mod_pin_t *mpp, struct mod_t *mdp)
1470 {
1471 register int32 ndx, bi, ii;
1472 register struct impth_t *impthp;
1473 int32 stval;
1474 byte bv;
1475 struct tenp_t *prtnetmap;
1476 struct mipd_t *mipdp;
1477 struct net_t *np;
1478
1479 prtnetmap = __bld_portbit_netbit_map(mpp);
1480 /* for every port bit - just reinit connected bits */
1481 /* other ports will reinit other bits if used */
1482 for (ndx = 0; ndx < mpp->mpwide; ndx++)
1483 {
1484 np = prtnetmap[ndx].tenu.np;
1485 /* notice ndx is port bit index but bi is connecting net bit index */
1486 bi = prtnetmap[ndx].nbi;
1487 /* DBG remove -- */
1488 if (np->nlds == NULL || np->nlds->npntyp != NP_MIPD_NCHG)
1489 __misc_terr(__FILE__, __LINE__);
1490 /* -- */
1491 mipdp = &(np->nlds->elnpp.emipdbits[bi]);
1492
1493 if (!np->n_stren) bv = (byte) __get_initval(np, &stval);
1494 else { __get_initval(np, &stval); bv = (byte) stval; }
1495 for (ii = 0; ii < mdp->flatinum; ii++) mipdp->oldvals[ii] = bv;
1496
1497 for (ii = 0; ii < mdp->flatinum; ii++) mipdp->mipdschd_tevs[ii] = -1;
1498
1499 if (mipdp->pth_mipd)
1500 {
1501 /* only need to re-init change times */
1502 for (ii = 0; ii < mdp->flatinum; ii++)
1503 {
1504 impthp = mipdp->impthtab[ii];
1505 for (; impthp != NULL; impthp = impthp->impthnxt)
1506 { impthp->lastchg = 0ULL; }
1507 }
1508 }
1509 }
1510 __my_free((char *) prtnetmap, mpp->mpwide*sizeof(struct tenp_t));
1511 }
1512
1513 /*
1514 * access mipd from port and port index
1515 *
1516 * for scalar ndx passed as 0 not -1 here
1517 * only called if port has mipd
1518 */
__get_mipd_from_port(struct mod_pin_t * mpp,int32 ndx)1519 extern struct mipd_t *__get_mipd_from_port(struct mod_pin_t *mpp, int32 ndx)
1520 {
1521 int32 bi;
1522 struct mipd_t *mipdp;
1523 struct tenp_t *prtnetmap;
1524 struct net_t *np;
1525
1526 prtnetmap = __bld_portbit_netbit_map(mpp);
1527 np = prtnetmap[ndx].tenu.np;
1528 bi = prtnetmap[ndx].nbi;
1529 mipdp = &(np->nlds->elnpp.emipdbits[bi]);
1530 __my_free((char *) prtnetmap, mpp->mpwide*sizeof(struct tenp_t));
1531 return(mipdp);
1532 }
1533
1534 /*
1535 * for a lhs port, malloc and build tenp map from port bit to net bit
1536 * know port always lhs here
1537 */
__bld_portbit_netbit_map(struct mod_pin_t * mpp)1538 extern struct tenp_t *__bld_portbit_netbit_map(struct mod_pin_t *mpp)
1539 {
1540 register struct expr_t *catxp;
1541 int32 pi;
1542 struct tenp_t *prtnetmap;
1543
1544 prtnetmap = (struct tenp_t *) __my_malloc(mpp->mpwide*sizeof(struct tenp_t));
1545
1546 /* concatenate - add each component */
1547 if (mpp->mpref->optyp == LCB)
1548 {
1549 pi = mpp->mpwide - 1;
1550 for (catxp = mpp->mpref->ru.x; catxp != NULL; catxp = catxp->ru.x)
1551 {
1552 pi -= catxp->lu.x->szu.xclen;
1553 add_portbit_map(prtnetmap, catxp->lu.x, pi);
1554 }
1555 return(prtnetmap);
1556 }
1557 /* add simple port to map */
1558 add_portbit_map(prtnetmap, mpp->mpref, 0);
1559 return(prtnetmap);
1560 }
1561
1562 /*
1563 * for a lhs port, add non concat lhs port to port to bit map
1564 * notice nbi index is 0 for scalar
1565 */
add_portbit_map(struct tenp_t * prtnetmap,struct expr_t * xp,int32 base_pi)1566 static void add_portbit_map(struct tenp_t *prtnetmap, struct expr_t *xp,
1567 int32 base_pi)
1568 {
1569 register int32 pi, bi;
1570 int32 psi1, psi2;
1571 word32 *wp;
1572 struct net_t *np;
1573 struct expr_t *ndx;
1574
1575 switch ((byte) xp->optyp) {
1576 case ID:
1577 np = xp->lu.sy->el.enp;
1578 if (!np->n_isavec)
1579 {
1580 prtnetmap[base_pi].tenu.np = np;
1581 prtnetmap[base_pi].nbi = 0;
1582 }
1583 else
1584 {
1585 for (pi = base_pi; pi < base_pi + xp->szu.xclen; pi++)
1586 { prtnetmap[pi].tenu.np = np; prtnetmap[pi].nbi = pi - base_pi; }
1587 }
1588 break;
1589 case OPEMPTY:
1590 for (pi = base_pi; pi < base_pi + xp->szu.xclen; pi++)
1591 {
1592 /* LOOKATME - index illegal -1 here since think won't occur */
1593 prtnetmap[pi].tenu.np = NULL;
1594 prtnetmap[pi].nbi = -1;
1595 }
1596 break;
1597 case PARTSEL:
1598 np = xp->lu.x->lu.sy->el.enp;
1599 ndx = xp->ru.x;
1600 wp = &(__contab[ndx->lu.x->ru.xvi]);
1601 psi1 = (int32) wp[0];
1602 wp = &(__contab[ndx->ru.x->ru.xvi]);
1603 psi2 = (int32) wp[0];
1604 /* part select always constant */
1605 for (pi = base_pi, bi = psi2; pi < base_pi + xp->szu.xclen; pi++, bi++)
1606 {
1607 prtnetmap[pi].tenu.np = np;
1608 prtnetmap[pi].nbi = bi;
1609 }
1610 break;
1611 case LSB:
1612 /* LOOKATME - think IS bit selects always split before here */
1613 /* DBG remove */
1614 if (xp->ru.x->optyp != NUMBER) __misc_terr(__FILE__, __LINE__);
1615 /* --- */
1616 np = xp->lu.x->lu.sy->el.enp;
1617 ndx = xp->ru.x;
1618 wp = &(__contab[ndx->ru.xvi]);
1619 psi1 = (int32) wp[0];
1620 prtnetmap[base_pi].tenu.np = np;
1621 prtnetmap[base_pi].nbi = psi1;
1622 break;
1623 /* since know only 1 level, removed before here */
1624 case LCB: __case_terr(__FILE__, __LINE__);
1625 /* xmr can't connect to port */
1626 default: __case_terr(__FILE__, __LINE__);
1627 }
1628 }
1629
1630 /*
1631 * ROUTINES TO ALLOCATE NCHG ACTION STORAGE
1632 */
1633
1634 /*
1635 * allocate nchg byte table and set nchgaction net ptrs
1636 * even for cver-cc compiler, nchg storage malloc since access through net
1637 *
1638 * 08/22/02 - always need nchg store for dmpvars
1639 * FIXME - should align ptrs at least on 4 byte boundaries
1640 */
__alloc_nchgaction_storage(void)1641 extern void __alloc_nchgaction_storage(void)
1642 {
1643 register int32 ni;
1644 register struct net_t *np;
1645 struct mod_t *mdp;
1646 struct task_t *tskp;
1647
1648 __nchgbtabbsiz = 0;
1649 cmp_nchgbtabsize();
1650 if (__nchgbtabbsiz > 0)
1651 {
1652 __nchgbtab = (byte *) __my_malloc(__nchgbtabbsiz);
1653 }
1654 __nchgbtabbi = 0;
1655 for (mdp = __modhdr; mdp != NULL; mdp = mdp->mnxt)
1656 {
1657 if (mdp->mnnum != 0)
1658 {
1659 for (ni = 0, np = &(mdp->mnets[0]); ni < mdp->mnnum; ni++, np++)
1660 {
1661 /* always need the changed byte array even for event */
1662 np->nchgaction = (byte *) &(__nchgbtab[__nchgbtabbi]);
1663 /* set to zero for now - initialize after lds/dces added */
1664 memset(np->nchgaction, 0, mdp->flatinum);
1665 __nchgbtabbi += mdp->flatinum;
1666 }
1667 }
1668 for (tskp = mdp->mtasks; tskp != NULL; tskp = tskp->tsknxt)
1669 {
1670 if (tskp->trnum == 0) continue;
1671
1672 for (ni = 0, np = &(tskp->tsk_regs[0]); ni < tskp->trnum; ni++, np++)
1673 {
1674 /* always need the changed byte array even for event */
1675 np->nchgaction = (byte *) &(__nchgbtab[__nchgbtabbi]);
1676 /* set to zero for now - initialize after dces/lds built */
1677 memset(np->nchgaction, 0, mdp->flatinum);
1678 __nchgbtabbi += mdp->flatinum;
1679 }
1680 }
1681 }
1682 }
1683
1684 /*
1685 * compute size of needed nchg byte table in bytes
1686 * also sets srep since always called
1687 */
cmp_nchgbtabsize(void)1688 static void cmp_nchgbtabsize(void)
1689 {
1690 register int32 ni;
1691 register struct net_t *np;
1692 struct mod_t *mdp;
1693 struct task_t *tskp;
1694
1695 for (mdp = __modhdr; mdp != NULL; mdp = mdp->mnxt)
1696 {
1697 if (mdp->mnnum != 0)
1698 {
1699 for (ni = 0, np = &(mdp->mnets[0]); ni < mdp->mnnum; ni++, np++)
1700 {
1701 /* storage for byte per inst. all changed table */
1702 /* change state bytes go into scalar (byte) storage table */
1703 __nchgbtabbsiz += mdp->flatinum;
1704
1705 set_1net_srep(np);
1706 }
1707 }
1708 for (tskp = mdp->mtasks; tskp != NULL; tskp = tskp->tsknxt)
1709 {
1710 if (tskp->trnum == 0) continue;
1711 for (ni = 0, np = &(tskp->tsk_regs[0]); ni < tskp->trnum; ni++, np++)
1712 {
1713 /* always need the changed byte array even for event */
1714 /* change state bytes go into scalar (byte) storage table */
1715 __nchgbtabbsiz += mdp->flatinum;
1716
1717 set_1net_srep(np);
1718 }
1719 }
1720 }
1721 }
1722
1723 /*
1724 * set the storage rep type for one net
1725 *
1726 * SJM 05/04/05 - since not calling alloc storage routine for cver-cc,
1727 * must set sreps in separate routine called from nchg routines that
1728 * are always called
1729 */
set_1net_srep(struct net_t * np)1730 static void set_1net_srep(struct net_t *np)
1731 {
1732 if (np->ntyp == N_EVENT) return;
1733
1734 if (np->n_isarr) { np->srep = SR_ARRAY; return; }
1735 if (np->ntyp == N_REAL) { np->srep = SR_VEC; return; }
1736 if (!np->n_isavec)
1737 {
1738 if (np->n_stren) np->srep = SR_SSCAL; else np->srep = SR_SCAL;
1739 return;
1740 }
1741 if (!np->n_stren) np->srep = SR_VEC; else np->srep = SR_SVEC;
1742 }
1743
1744 /*
1745 * initialize nchg action byte table for all modules
1746 *
1747 * always assume dumpvars off also after reset (re-initialize)
1748 * this must be called after dces reinited
1749 */
__set_nchgaction_bits(void)1750 extern void __set_nchgaction_bits(void)
1751 {
1752 register int32 ii, ni;
1753 register struct net_t *np;
1754 struct mod_t *mdp;
1755 struct task_t *tskp;
1756
1757 for (mdp = __modhdr; mdp != NULL; mdp = mdp->mnxt)
1758 {
1759 for (ni = 0, np = &(mdp->mnets[0]); ni < mdp->mnnum; ni++, np++)
1760 {
1761 for (ii = 0; ii < mdp->flatinum; ii++)
1762 {
1763 /* start with all dumpvars off - now independent of var chg */
1764 /* all all other fields 0 off */
1765 np->nchgaction[ii] = NCHG_DMPVNOTCHGED;
1766
1767 if (np->ntyp >= NONWIRE_ST)
1768 {
1769 /* SJM - 07/01/00 - for regs, if no lds, all var insts stay */
1770 /* all chged and never record */
1771 if (np->nlds == NULL) np->nchgaction[ii] |= NCHG_ALL_CHGED;
1772 }
1773 else
1774 {
1775 /* SJM 07/24/00 - for wires, if has dces not all changed */
1776 if (np->nlds == NULL && (np->dcelst == NULL
1777 || __cnt_dcelstels(np->dcelst) == 0))
1778 np->nchgaction[ii] |= NCHG_ALL_CHGED;
1779 }
1780 }
1781 /* SJM - 07/01/00 - set various per variable bits in 0th element */
1782 /* SJM - 03/15/01 - change to fields in net record */
1783 if (np->nlds != NULL) np->nchg_has_lds = TRUE;
1784
1785 /* SJM 07/24/00 - only nchg has dces on for regs immediate prop/wakeup */
1786 if (np->ntyp >= NONWIRE_ST && np->dcelst != NULL)
1787 np->nchg_has_dces = TRUE;
1788
1789 /* SJM REMOVEME */
1790 /* ---
1791 if (np->nlds == NULL && np->dcelst == NULL && np->dmpv_in_src)
1792 __misc_terr(__FILE__, __LINE__);
1793 -- */
1794
1795 /* if any lds, dces or dmpvs, need chg store */
1796 if (np->nlds != NULL || np->dcelst != NULL || np->dmpv_in_src)
1797 np->nchg_nd_chgstore = TRUE;
1798 }
1799
1800 for (tskp = mdp->mtasks; tskp != NULL; tskp = tskp->tsknxt)
1801 {
1802 for (ni = 0, np = &(tskp->tsk_regs[0]); ni < tskp->trnum; ni++, np++)
1803 {
1804 for (ii = 0; ii < mdp->flatinum; ii++)
1805 {
1806 /* start with all dumpvars off - now independent of var chg */
1807 /* all all other fields 0 off */
1808 np->nchgaction[ii] = NCHG_DMPVNOTCHGED;
1809
1810 /* SJM - 07/01/00 - if no lds, all var insts stay all chged */
1811 /* now all chged only for loads */
1812 /* SJM 07/24/00 - here since know reg, all chged if no lds */
1813 if (np->nlds == NULL) np->nchgaction[ii] |= NCHG_ALL_CHGED;
1814 }
1815 /* SJM - 07/01/00 - set various per var bits in 0th element */
1816 /* SJM 03/15/01 - change to fields in net record */
1817 if (np->nlds != NULL) np->nchg_has_lds = TRUE;
1818
1819 /* SJM 07/24/00 - only nchg has dces on for regs but task vars regs */
1820 if (np->dcelst != NULL) np->nchg_has_dces = TRUE;
1821
1822 /* if any lds, dces or dmpvs, need chg store */
1823 if (np->nlds != NULL || np->dcelst != NULL || np->dmpv_in_src)
1824 np->nchg_nd_chgstore = TRUE;
1825 }
1826 }
1827 }
1828 }
1829
1830 /*
1831 * set computed optimtab bits for all vars in entire design
1832 */
__set_optimtab_bits(void)1833 extern void __set_optimtab_bits(void)
1834 {
1835 register int32 ni;
1836 register struct net_t *np;
1837 struct mod_t *mdp;
1838 struct task_t *tskp;
1839
1840 for (mdp = __modhdr; mdp != NULL; mdp = mdp->mnxt)
1841 {
1842 for (ni = 0, np = &(mdp->mnets[0]); ni < mdp->mnnum; ni++, np++)
1843 {
1844 /* assume need chg store */
1845 if (__dv_allform_insrc || mdp->mod_dvars_in_src)
1846 {
1847 np->dmpv_in_src = TRUE;
1848 np->nchg_nd_chgstore = TRUE;
1849 }
1850 }
1851
1852 for (tskp = mdp->mtasks; tskp != NULL; tskp = tskp->tsknxt)
1853 {
1854 for (ni = 0, np = &(tskp->tsk_regs[0]); ni < tskp->trnum; ni++, np++)
1855 {
1856 if (__dv_allform_insrc || mdp->mod_dvars_in_src)
1857 {
1858 np->dmpv_in_src = TRUE;
1859 np->nchg_nd_chgstore = TRUE;
1860 }
1861 }
1862 }
1863 }
1864 }
1865
1866 /*
1867 * ROUTINES TO ALLOCATE VARIABLE STORAGE
1868 */
1869
1870 /*
1871 * allocate storage for all simulation variables
1872 */
__alloc_sim_storage(void)1873 extern void __alloc_sim_storage(void)
1874 {
1875 register int32 ni;
1876 register struct net_t *np;
1877 struct mod_t *mdp;
1878 struct task_t *tskp;
1879
1880 /* always calculate var storage size - but only emit for interpreter */
1881 __wtabwsiz = 0;
1882 __btabbsiz = 0;
1883 cmp_tabsizes();
1884
1885 /* for cver-cc, gen .comm lable in bss section */
1886 /* LOOKATME - is 0 storage size possible - think yes */
1887 if (__btabbsiz > 0)
1888 {
1889 __btab = (byte *) __my_malloc(__btabbsiz);
1890 }
1891 if (__wtabwsiz > 0)
1892 {
1893 __wtab = (word32 *) __my_malloc(__wtabwsiz*sizeof(word32));
1894 }
1895 __wtabwi = 0;
1896 __btabbi = 0;
1897 for (mdp = __modhdr; mdp != NULL; mdp = mdp->mnxt)
1898 {
1899 __push_wrkitstk(mdp, 0);
1900
1901 if (__inst_mod->mnnum != 0)
1902 {
1903 for (ni = 0, np = &(__inst_mod->mnets[0]); ni < __inst_mod->mnnum;
1904 ni++, np++)
1905 {
1906 /* no allocated storage here for parameters - in different list */
1907 /* also none for events (if dce will be allocated when used) */
1908 if (np->ntyp == N_EVENT) continue;
1909
1910 /* for now always zeroing variables - when x/z */
1911 /* could free later if no fan-in and no fan-out */
1912 alloc_var(np);
1913 }
1914 }
1915 for (tskp = __inst_mod->mtasks; tskp != NULL; tskp = tskp->tsknxt)
1916 {
1917 if (tskp->trnum == 0) continue;
1918 for (ni = 0, np = &(tskp->tsk_regs[0]); ni < tskp->trnum; ni++, np++)
1919 {
1920 if (np->ntyp == N_EVENT) continue;
1921
1922 /* for now always zeroing variables - when x/z */
1923 alloc_var(np);
1924 }
1925 }
1926 __pop_wrkitstk();
1927 }
1928 }
1929
1930 /*
1931 * compute size of needed startage tables
1932 *
1933 * PORTABILITY FIXME - maybe need 8 byte alignment for ptr too?
1934 * SJM 05/02/05 - now setting net srep here
1935 */
cmp_tabsizes(void)1936 static void cmp_tabsizes(void)
1937 {
1938 register int32 ni;
1939 register struct net_t *np;
1940 struct mod_t *mdp;
1941 struct task_t *tskp;
1942
1943 for (mdp = __modhdr; mdp != NULL; mdp = mdp->mnxt)
1944 {
1945 __push_wrkitstk(mdp, 0);
1946
1947 if (__inst_mod->mnnum != 0)
1948 {
1949 for (ni = 0, np = &(__inst_mod->mnets[0]); ni < __inst_mod->mnnum;
1950 ni++, np++)
1951 {
1952 /* no allocated storage here for parameters - in different list */
1953
1954 /* also none for events (if dce will be allocated when used) */
1955 if (np->ntyp == N_EVENT) continue;
1956
1957 /* because arrays may be large, must really alloc - so not counted */
1958 if (np->n_isarr) continue;
1959
1960 cmpadd_1var_storsiz(np);
1961 }
1962 }
1963 for (tskp = __inst_mod->mtasks; tskp != NULL; tskp = tskp->tsknxt)
1964 {
1965 if (tskp->trnum == 0) continue;
1966 for (ni = 0, np = &(tskp->tsk_regs[0]); ni < tskp->trnum; ni++, np++)
1967 {
1968 if (np->ntyp == N_EVENT) continue;
1969
1970 /* no allocated storage here for parameters - in different list */
1971 /* SJM 05/02/05 - was previously counting task arrays as part of */
1972 /* the tab storage but still mallocing for interpreter */
1973 if (np->n_isarr) continue;
1974
1975 /* for now always zeroing variables - when x/z */
1976 cmpadd_1var_storsiz(np);
1977 }
1978 }
1979 __pop_wrkitstk();
1980 }
1981 }
1982
1983 /*
1984 * computer size for one variable
1985 *
1986 * never called for memories
1987 */
cmpadd_1var_storsiz(struct net_t * np)1988 static void cmpadd_1var_storsiz(struct net_t *np)
1989 {
1990 register int32 insts;
1991
1992 /* allocate array of srep structs for each inst. */
1993 insts = __inst_mod->flatinum;
1994
1995 /* 05/03/05 - reals now just put in wtab (take 2 words) */
1996 if (np->ntyp == N_REAL) { __wtabwsiz += 2*insts; return; }
1997
1998 /* compute needed size in bits */
1999 /* non vectors */
2000 if (!np->n_isavec)
2001 {
2002 /* SJM 10/16/99 - now scalars always 1 byte even non strength */
2003 __btabbsiz += insts;
2004 }
2005 else
2006 {
2007 if (!np->n_stren)
2008 {
2009 /* hard non strength packed vector case - bits later converted to wrds */
2010 /* SJM 12/16/99 - now packed vector packs from 2 to 16 bits into 1 word */
2011 /* SJM 07/15/00 - now for vars only bits packed into bytes */
2012 __wtabwsiz += 2*wlen_(np->nwid)*insts;
2013 }
2014 else
2015 {
2016 /* strength vector 1 byte per bit case */
2017 __btabbsiz += insts*np->nwid;
2018 }
2019 }
2020 }
2021
2022 /*
2023 * allocate storage for a variable
2024 * know at this point storage form is compile (ct) union member
2025 * allocation here is module specific - all inst here indistinguishable
2026 * but at this point types determine initial values
2027 */
alloc_var(struct net_t * np)2028 static void alloc_var(struct net_t *np)
2029 {
2030 int32 insts;
2031
2032 /* allocate array of srep structs for each inst. */
2033 insts = __inst_mod->flatinum;
2034
2035 /* need to handle REAL as special case - has special representation */
2036 /* and now real can be array */
2037 if (np->ntyp == N_REAL) { alloc_real_var(np, insts); return; }
2038
2039 if (np->n_isarr) __allocinit_arr_var(np, insts, TRUE);
2040 else if (!np->n_isavec)
2041 {
2042 if (!np->n_stren) alloc_scal_var(np, insts);
2043 else alloc_sscal_var(np, insts);
2044 }
2045 else
2046 {
2047 if (!np->n_stren) __allocinit_vec_var(np, insts, TRUE);
2048 else alloc_svec_var(np, insts);
2049 }
2050 }
2051
2052 /*
2053 * initialize a real variable
2054 */
alloc_real_var(struct net_t * np,int32 insts)2055 static void alloc_real_var(struct net_t *np, int32 insts)
2056 {
2057 register int32 i;
2058 int32 arrw, totchars;
2059 double *dp;
2060
2061 /* case 1: new real array - must be malloced */
2062 /* must malloc arrays because they can be large */
2063 if (np->n_isarr)
2064 {
2065 arrw = __get_arrwide(np);
2066 totchars = arrw*(2*WRDBYTES*insts*wlen_(REALBITS));
2067 np->nva.wp = (word32 *) __my_malloc(totchars);
2068 /* reals arrays contiguous a/b 8 bytes with no x/z */
2069 dp = np->nva.dp;
2070 for (i = 0; i < arrw*insts; i++)
2071 {
2072 *dp++ = 0.0;
2073 }
2074 __arrvmem_use += totchars;
2075 return;
2076 }
2077
2078 /* case 2 non array */
2079 np->nva.wp = (word32 *) &(__wtab[__wtabwi]);
2080 dp = np->nva.dp;
2081 for (i = 0; i < insts; i++) *dp++ = 0.0;
2082 __wtabwi += 2*insts;
2083 }
2084
2085 /*
2086 * allocate all instances for a array var
2087 *
2088 * notice for now to access must call get packbits - store somewhere
2089 * never need to access nva through stu strength union because array cannot
2090 * have strength
2091 *
2092 * initialization easy since know arrays are registers that are always
2093 * initialized to x's
2094 * notice this routine is somewhat dependent on 32 bit words
2095 */
__allocinit_arr_var(struct net_t * np,int32 insts,int32 nd_alloc)2096 extern void __allocinit_arr_var(struct net_t *np, int32 insts,
2097 int32 nd_alloc)
2098 {
2099 register int32 i;
2100 int32 arrw, wlen, totchars, elwlen, totcells;
2101 word32 *rap, mask;
2102
2103 totchars = 0;
2104 /* arrw is number of cells in memory */
2105 arrw = __get_arrwide(np);
2106 /* case 1, each cell is a scalar */
2107 if (!np->n_isavec)
2108 {
2109 wlen = wlen_(2*arrw*insts);
2110 if (nd_alloc)
2111 {
2112 totchars = WRDBYTES*wlen;
2113 np->nva.wp = (word32 *) __my_malloc(totchars);
2114 __arrvmem_use += totchars;
2115 }
2116 /* notice packed densly, index by cell array of 2 bit cells */
2117 for (i = 0; i < wlen; i++) np->nva.wp[i] = ALL1W;
2118 /* must mask off unused bits in last word32 */
2119 np->nva.wp[wlen - 1] &= __masktab[ubits_(2*arrw*insts)];
2120 goto done;
2121 }
2122
2123 /* case 2: each cell cannot be packed */
2124 if (np->nwid > WBITS/2)
2125 {
2126 if (nd_alloc)
2127 {
2128 wlen = arrw*wlen_(np->nwid);
2129 totchars = 2*WRDBYTES*insts*wlen;
2130 np->nva.wp = (word32 *) __my_malloc(totchars);
2131 __arrvmem_use += totchars;
2132 }
2133
2134 /* array is linear array of arrw*insts elements */
2135 /* each element has 2 elwlen x (1w) regions */
2136 elwlen = wlen_(np->nwid);
2137 rap = np->nva.wp;
2138 totcells = arrw*insts;
2139 for (i = 0;;)
2140 {
2141 one_allbits_(rap, np->nwid);
2142 rap = &(rap[elwlen]);
2143 one_allbits_(rap, np->nwid);
2144 if (++i >= totcells) break;
2145 rap = &(rap[elwlen]);
2146 }
2147 goto done;
2148 }
2149 /* case 3a: packs into byte */
2150 if (np->nwid <= 4)
2151 {
2152 /* each cell has 1 byte */
2153 if (nd_alloc)
2154 {
2155 totchars = arrw*insts;
2156 np->nva.bp = (byte *) __my_malloc(totchars);
2157 __arrvmem_use += totchars;
2158 }
2159 /* pack into 2 contiguous low bit side sections of byte */
2160 mask = __masktab[2*np->nwid];
2161 for (i = 0; i < arrw*insts; i++) np->nva.bp[i] = (byte) mask;
2162 goto done;
2163 }
2164 /* case 3b: packs into half word32 */
2165 if (np->nwid <= 8)
2166 {
2167 if (nd_alloc)
2168 {
2169 totchars = 2*arrw*insts;
2170 np->nva.hwp = (hword *) __my_malloc(totchars);
2171 __arrvmem_use += totchars;
2172 }
2173 mask = __masktab[2*np->nwid];
2174 for (i = 0; i < arrw*insts; i++) np->nva.hwp[i] = (hword) mask;
2175 goto done;
2176 }
2177 /* case 3c: pcks in word32 */
2178 if (nd_alloc)
2179 {
2180 totchars = 4*arrw*insts;
2181 np->nva.wp = (word32 *) __my_malloc(totchars);
2182 __arrvmem_use += totchars;
2183 }
2184 mask = __masktab[2*np->nwid];
2185 for (i = 0; i < arrw*insts; i++) np->nva.wp[i] = mask;
2186
2187 done:
2188 if (__debug_flg && nd_alloc)
2189 {
2190 __dbg_msg(
2191 "==> array %s: %d insts of %d, %d bit per inst. cells uses %d bytes\n",
2192 np->nsym->synam, insts, arrw, np->nwid, totchars);
2193 }
2194 }
2195
2196 /*
2197 * allocate all instances for a scalar var
2198 * storage here is 2 contiguous bits for a and b value of scalar
2199 * needed because eliminates need for unavailable total insts value
2200 *
2201 * notice important optimization here 1 bit per non strenth scalar,
2202 * but selection now takes array access, shift, and and
2203 */
alloc_scal_var(struct net_t * np,int32 insts)2204 static void alloc_scal_var(struct net_t *np, int32 insts)
2205 {
2206 int32 ival, stval;
2207
2208 /* variables accessed as section of design wide storage table */
2209 np->nva.bp = &(__btab[__btabbi]);
2210 __btabbi += insts;
2211
2212 ival = __get_initval(np, &stval);
2213 if (ival == 0L) memset(np->nva.bp, 0, insts);
2214 /* initialize net storage */
2215 else set_byteval_(np->nva.bp, insts, ival);
2216 }
2217
2218 /*
2219 * determine the initial value and strength for any wire type
2220 * stval is entire 8 bit value - ival returned is low 2 bits only
2221 * strength format is (st0 (7-5), st1 (4-2), val (1-0)
2222 */
__get_initval(struct net_t * np,int32 * stval)2223 extern int32 __get_initval(struct net_t *np, int32 *stval)
2224 {
2225 int32 ival, sval;
2226
2227 switch ((byte) np->ntyp) {
2228 case N_WIRE: case N_TRI: case N_TRIAND: case N_WA: case N_TRIOR: case N_WO:
2229 /* normal wires are z (hiz(0),hiz(0), z) */
2230 /* SJM 02/16/07 - initialize to x if net has drivers otherwise z */
2231
2232 /* SJM 03/16/07 - if wire is an inout special case - may have npp bid */
2233 /* mod port drvrs here, but since inout are in tran channels removed */
2234 /* later - if all drivers are bid ports, really has no drivers */
2235 if (np->ndrvs == NULL || all_drvrs_bidirect(np))
2236 {
2237 ival = 2;
2238 sval = ST_HIZ;
2239 }
2240 else
2241 {
2242 ival = 3;
2243 sval = 0xdb;
2244 }
2245 break;
2246 /* these are normal wires, that have pull0 or pull driver added */
2247 case N_TRI0: ival = 0; sval = ST_PULL0; break; /* <5:5>=0 Pu0 10110100 */
2248 case N_TRI1: ival = 1; sval = ST_PULL1; break; /* <5:5>=1 pu1 10110101 */
2249 case N_TRIREG:
2250 /* even if delay do not schedule decay to z at initialize */
2251 ival = 3;
2252 if (np->n_capsiz == CAP_LARGE) sval = 0x93; /* <4:4>=x LaX 10010011 */
2253 else if (np->n_capsiz == CAP_SMALL) sval = 0x27; /* <1:1>=x SmX 00100111 */
2254 else sval = 0x4b; /* <2:2>=x MeX 01001011 */
2255 break;
2256 /* <7:7>=1 Su1 ll111101 */
2257 case N_SUPPLY1: ival = 1; sval = ST_SUPPLY1; break;
2258 /* <7:7>=0 Su0 11111100 */
2259 case N_SUPPLY0: ival = 0; sval = ST_SUPPLY0; break;
2260 /* register initialized to 0 */
2261 case N_REG: case N_INT: case N_TIME: case N_EVENT:
2262 ival = 3; sval = 0xdb; /* strength meaningless but <6:6>=X Stx 11011011 */
2263 break;
2264 default: __case_terr(__FILE__, __LINE__); return(0);
2265 }
2266 *stval = sval;
2267 return(ival);
2268 }
2269
2270 /*
2271 * return T if all drivers are bid mdprt drivers because they are all
2272 * removed later when inout are converted to switch channels
2273 */
all_drvrs_bidirect(struct net_t * np)2274 static int32 all_drvrs_bidirect(struct net_t *np)
2275 {
2276 register struct net_pin_t *npp;
2277
2278 for (npp = np->ndrvs; npp != NULL; npp = npp->npnxt)
2279 {
2280 if (npp->npntyp != NP_BIDMDPRT && npp->npntyp != NP_BIDICONN)
2281 return(FALSE);
2282 }
2283 return(TRUE);
2284 }
2285
2286 /*
2287 * allocate all instances for a strength scalar var
2288 * 1 byte per value (low 2 bits value), next 3 bits 1 stren, high 3 0 stren
2289 */
alloc_sscal_var(struct net_t * np,int32 insts)2290 static void alloc_sscal_var(struct net_t *np, int32 insts)
2291 {
2292 byte *sbp;
2293 byte sval;
2294 int32 stval;
2295
2296 /* first allocate the normal 2 bits packed per inst values */
2297 /* for sscal 1 byte per instance for strength and value (s000,s111,vv) */
2298 __get_initval(np, &stval);
2299 /* here byte array value allocated in some words */
2300 sbp = &(__btab[__btabbi]);
2301 __btabbi += insts;
2302 sval = (byte) stval;
2303 set_byteval_(sbp, insts, sval);
2304 np->nva.bp = sbp;
2305 }
2306
2307 /*
2308 * allocate all instances for a vector variable
2309 */
__allocinit_vec_var(struct net_t * np,int32 insts,int32 nd_alloc)2310 extern void __allocinit_vec_var(struct net_t *np, int32 insts,
2311 int32 nd_alloc)
2312 {
2313 int32 ival, stval;
2314 int32 wlen, totchars;
2315 word32 maska, maskb;
2316
2317 ival = __get_initval(np, &stval);
2318 maska = maskb = 0L;
2319 /* SJM 07/15/00 - no longer pack <16 bit vecs - still pack scalar in byte */
2320 wlen = wlen_(np->nwid);
2321 totchars = 2*WRDBYTES*insts*wlen;
2322 if (nd_alloc)
2323 {
2324 np->nva.wp = &(__wtab[__wtabwi]);
2325 __wtabwi += 2*insts*wlen;
2326 }
2327 switch ((byte) ival) {
2328 case 0: memset(np->nva.wp, 0, totchars); return;
2329 case 1: maska = 0xffffffffL; break;
2330 case 2: maskb = 0xffffffffL; break;
2331 case 3: maska = 0xffffffffL; maskb = 0xffffffffL; break;
2332 default: __case_terr(__FILE__, __LINE__);
2333 }
2334 /* initialize net storage */
2335 __init_vec_var(np->nva.wp, insts, wlen, np->nwid, maska, maskb);
2336 }
2337
2338 /*
2339 * build a packed mask depending on ival
2340 */
2341
2342 /*
2343 * initialize a non strength vector (non packed) to z
2344 */
__init_vec_var(register word32 * wp,int32 insts,int32 wlen,int32 vecw,word32 maska,word32 maskb)2345 extern void __init_vec_var(register word32 *wp, int32 insts, int32 wlen,
2346 int32 vecw, word32 maska, word32 maskb)
2347 {
2348 register int32 ii, wi;
2349 word32 *iwp;
2350 int32 ubits;
2351
2352 /* insts number of <= 1 word32 vectors that each occuppy part of 1 word32 */
2353 /* use normal full word32 initialization masks but make sure high bits 0 */
2354 if (vecw <= WBITS)
2355 {
2356 maska &= __masktab[vecw];
2357 maskb &= __masktab[vecw];
2358 /* for vectors <= WBITS, alternate high 0 masked init values per inst. */
2359 for (ii = 0; ii < insts; ii++) { *wp++ = maska; *wp++ = maskb; }
2360 return;
2361 }
2362 ubits = ubits_(vecw);
2363 iwp = wp;
2364 /* insts number of multiword per vector elements */
2365 for (ii = 0; ii < insts; ii++)
2366 {
2367 /* for 1 insts's vector value, initial a part */
2368 for (wi = 0; wi < wlen; wi++) iwp[wi] = maska;
2369 iwp[wlen - 1] &= __masktab[ubits];
2370
2371 /* then b part */
2372 for (wi = wlen; wi < 2*wlen; wi++) iwp[wi] = maskb;
2373 iwp[2*wlen - 1] &= __masktab[ubits];
2374 /* finally move iwp to vector location for next inst. */
2375 iwp = &(iwp[2*wlen]);
2376 }
2377 }
2378
2379 /*
2380 * allocate all instances for a strength vector variable
2381 * using byte vector here so total if no. of bits in vec times no. of insts
2382 */
alloc_svec_var(struct net_t * np,int32 insts)2383 static void alloc_svec_var(struct net_t *np, int32 insts)
2384 {
2385 int32 stval, totbits;
2386 byte sval;
2387 byte *bp;
2388
2389 totbits = insts*np->nwid;
2390 bp = &(__btab[__btabbi]);
2391 __btabbi += totbits;
2392
2393 __get_initval(np, &stval);
2394 sval = (byte) stval;
2395 set_byteval_((char *) bp, totbits, sval);
2396 np->nva.bp = bp;
2397 }
2398
2399 /*
2400 * routine to re-initialize variables for 1 module
2401 *
2402 * this just reinits wire/reg value - after dces reinited sets chg action bits
2403 */
__reinitialize_vars(struct mod_t * mdp)2404 extern void __reinitialize_vars(struct mod_t *mdp)
2405 {
2406 register int32 ni;
2407 register struct net_t *np;
2408 register struct task_t *tskp;
2409
2410 if (mdp->mnnum == 0) goto do_tasks;
2411 for (ni = 0, np = &(mdp->mnets[0]); ni < mdp->mnnum; ni++, np++)
2412 {
2413 /* if any instances any bits in tran channel, need to re-init */
2414 /* all hard drivers */
2415 if (np->ntraux != NULL)
2416 {
2417 if (np->n_stren)
2418 __allocinit_stperival(&np->ntraux->trnva, mdp->flatinum, np, FALSE);
2419 else
2420 __allocinit_perival(&np->ntraux->trnva, mdp->flatinum, np->nwid, FALSE);
2421 }
2422 /* need all changed even for event */
2423 if (np->ntyp == N_EVENT) continue;
2424 reinit_1wirereg(np, mdp);
2425 }
2426
2427 do_tasks:
2428 for (tskp = mdp->mtasks; tskp != NULL; tskp = tskp->tsknxt)
2429 {
2430 if (tskp->trnum == 0) continue;
2431 for (ni = 0, np = &(tskp->tsk_regs[0]); ni < tskp->trnum; ni++, np++)
2432 {
2433 if (np->ntyp == N_EVENT) continue;
2434 reinit_1wirereg(np, mdp);
2435 }
2436 }
2437 }
2438
2439 /*
2440 * reinitialize 1 wire or reg
2441 */
reinit_1wirereg(struct net_t * np,struct mod_t * mdp)2442 static void reinit_1wirereg(struct net_t *np, struct mod_t *mdp)
2443 {
2444 register int32 i;
2445 register struct qcval_t *qcvalp;
2446 int32 stval, arrw, ival;
2447 byte *sbp, sval;
2448 double d1;
2449 i_tev_ndx *itevpp;
2450
2451 d1 = 0.0;
2452 /* initialize per bit per inst scheduled event tab if needed */
2453 if (np->nrngrep == NX_DWIR)
2454 {
2455 itevpp = np->nu.rngdwir->wschd_pbtevs;
2456 for (i = 0; i < mdp->flatinum*np->nwid; i++) itevpp[i] = -1;
2457 }
2458 if (np->frc_assgn_allocated)
2459 {
2460 if (np->ntyp >= NONWIRE_ST)
2461 {
2462 qcvalp = np->nu2.qcval;
2463 /* reset all force/assigns to off and turn off any dces for active */
2464 for (i = 0; i < 2*mdp->flatinum; i++, qcvalp++)
2465 {
2466 qcvalp = &(np->nu2.qcval[i]);
2467 if (qcvalp->qc_active)
2468 {
2469 /* turn off dces then empty fields by re-initializing record */
2470 if (qcvalp->qcdcep != NULL) __dcelst_off(qcvalp->qcdcep);
2471 init_qcval(qcvalp);
2472 }
2473 else if (qcvalp->qc_overridden)
2474 {
2475 /* here fields filled so need to re-init but dces off */
2476 init_qcval(qcvalp);
2477 }
2478 }
2479 }
2480 else
2481 {
2482 /* reset all force/assigns on to off and reinit - no assign of wire */
2483 /* forces per bit */
2484 for (i = 0; i < np->nwid*mdp->flatinum; i++, qcvalp++)
2485 {
2486 qcvalp = &(np->nu2.qcval[i]);
2487 if (qcvalp->qc_active)
2488 {
2489 if (qcvalp->qcdcep != NULL) __dcelst_off(qcvalp->qcdcep);
2490 init_qcval(qcvalp);
2491 }
2492 }
2493 }
2494 }
2495 /* reinitialize any pending vpi_put_value records */
2496 /* leave the record but change driver to nil and cancel any scheduled tevs */
2497 if (np->ntyp < NONWIRE_ST)
2498 {
2499 if (np->vpi_ndrvs != NULL) __reinit_netdrvr_putvrec(np, mdp);
2500 if (np->regwir_putv_tedlst != NULL)
2501 __reinit_regwir_putvrec(np, mdp->flatinum);
2502 }
2503 else
2504 {
2505 if (np->regwir_putv_tedlst != NULL)
2506 __reinit_regwir_putvrec(np, mdp->flatinum);
2507 }
2508
2509 if (np->ntyp == N_REAL)
2510 {
2511 if (np->n_isarr)
2512 {
2513 arrw = __get_arrwide(np);
2514 /* LOOKATME - assumes real fits in 8 bytes and WBITS is 32 */
2515 for (i = 0; i < arrw*mdp->flatinum; i++)
2516 {
2517 memcpy(&(np->nva.wp[2*i]), &d1, sizeof(double));
2518 }
2519 return;
2520 }
2521 for (i = 0; i < mdp->flatinum; i++)
2522 {
2523 memcpy(&(np->nva.wp[2*i]), &d1, sizeof(double));
2524 }
2525 return;
2526 }
2527 /* not for real arrays */
2528 if (np->n_isarr) { __allocinit_arr_var(np, mdp->flatinum, FALSE); return; }
2529 if (!np->n_isavec)
2530 {
2531 if (!np->n_stren)
2532 {
2533 ival = __get_initval(np, &stval);
2534 if (ival == 0) memset(np->nva.bp, 0, mdp->flatinum);
2535 else set_byteval_(np->nva.bp, mdp->flatinum, ival);
2536 return;
2537 }
2538 __get_initval(np, &stval);
2539 sbp = np->nva.bp;
2540 sval = (byte) stval;
2541 set_byteval_(sbp, mdp->flatinum, sval);
2542 return;
2543 }
2544 if (!np->n_stren) { __allocinit_vec_var(np, mdp->flatinum, FALSE); return; }
2545 __get_initval(np, &stval);
2546 sval = (byte) stval;
2547 set_byteval_(np->nva.bp, mdp->flatinum*np->nwid, sval);
2548 }
2549
2550 /*
2551 * initialize all dces (and tchk old vals) in design
2552 */
__initialize_dsgn_dces(void)2553 extern void __initialize_dsgn_dces(void)
2554 {
2555 register struct mod_t *mdp;
2556
2557 for (mdp = __modhdr; mdp != NULL; mdp = mdp->mnxt)
2558 {
2559 __push_wrkitstk(mdp, 0);
2560 __initialize_dces(__inst_mod);
2561 __pop_wrkitstk();
2562 }
2563 }
2564
2565 /*
2566 * routine to re-initialize dces (and tchk npp old vals) for 1 module
2567 *
2568 * do not need to (re)initialize npps except path and timing check which have
2569 * internal state - normal npp's need processing on any kind of change
2570 */
__initialize_dces(struct mod_t * mdp)2571 extern void __initialize_dces(struct mod_t *mdp)
2572 {
2573 register int32 i, ni;
2574 register struct net_t *np;
2575 register struct net_pin_t *npp;
2576 int32 insts;
2577 word64 tim0;
2578 struct tchg_t *tchgp;
2579 struct chktchg_t *chktcp;
2580 struct task_t *tskp;
2581
2582 insts = mdp->flatinum;
2583 for (ni = 0, np = &(mdp->mnets[0]); ni < mdp->mnnum; ni++, np++)
2584 {
2585 if (np->ntyp == N_EVENT) goto skip_spec;
2586
2587 /* reinitialize timing check state values - only accessible thru npps */
2588 /* if no specify section do not need to go through npp loads list */
2589 if (mdp->mspfy != NULL)
2590 {
2591 tim0 = 0ULL;
2592 for (npp = np->nlds; npp != NULL; npp = npp->npnxt)
2593 {
2594 if (npp->npntyp != NP_TCHG) continue;
2595
2596 switch ((byte) npp->chgsubtyp) {
2597 case NPCHG_TCSTART: case NPCHG_PTHSRC:
2598 tchgp = npp->elnpp.etchgp;
2599 reinit_npp_oldval(tchgp->oldval, np, mdp);
2600 for (i = 0; i < insts; i++) tchgp->lastchg[i] = tim0;
2601 break;
2602 case NPCHG_TCCHK:
2603 chktcp = npp->elnpp.echktchgp;
2604 reinit_npp_oldval(chktcp->chkoldval, np, mdp);
2605 for (i = 0; i < insts; i++) chktcp->chklastchg[i] = tim0;
2606 break;
2607 default: __case_terr(__FILE__, __LINE__);
2608 }
2609 }
2610 }
2611 skip_spec:
2612 __init_1net_dces(np, mdp);
2613 }
2614 for (tskp = mdp->mtasks; tskp != NULL; tskp = tskp->tsknxt)
2615 {
2616 if (tskp->trnum == 0) continue;
2617 for (ni = 0, np = &(tskp->tsk_regs[0]); ni < tskp->trnum; ni++, np++)
2618 {
2619 /* LOOKATME - think need to do this for events too */
2620 __init_1net_dces(np, mdp);
2621 }
2622 }
2623 }
2624
2625 /*
2626 * initialize one net's dces (called faster cver-cc compile and $reset)
2627 *
2628 * for XMR mdp is the define (target) itree loc since dce on define var
2629 * FIXME (OR FINDOUT) - why is algorithm to remove PLI 1.0 vcls
2630 * but leave PLI 2.0 val chg cbs
2631 */
__init_1net_dces(struct net_t * np,struct mod_t * mdp)2632 extern void __init_1net_dces(struct net_t *np, struct mod_t *mdp)
2633 {
2634 int32 insts;
2635 struct dcevnt_t *dcep;
2636 struct mod_t *ref_mdp;
2637
2638 insts = mdp->flatinum;
2639 /* must go through setting all dce schedule per inst tables to nil */
2640 for (dcep = np->dcelst; dcep != NULL;)
2641 {
2642 switch (dcep->dce_typ) {
2643 case DCE_RNG_INST: case DCE_INST:
2644 /* set per inst. schedule table to nil but leave this type of dce */
2645 /* not per bit since filter applied to range */
2646 /* also for dce forms accessed from ref. not target itree loc. */
2647
2648 /* DBG remove --- */
2649 if (dcep->st_dctrl->dceschd_tevs == NULL)
2650 __misc_terr(__FILE__, __LINE__);
2651 /* --- */
2652
2653 /* SJM 10/07/06 - NOTICE that dce previous values are indexed by */
2654 /* declare in (target) instance number and have that size but */
2655 /* because dce scheduled tevs are accessed while arming from ref */
2656 /* point, the dce schd tev table is the size of and index by ref mod */
2657
2658 /* SJM 10/07/06 - since init called from declared in (targ) itree loc */
2659 /* need to use the defined in mdp number of insts for XMRs */
2660 /* AIV 03/03/07 - made a routine to get the ref mod */
2661 ref_mdp = dcep_ref_mod(dcep);
2662
2663 /* SJM 10/07/06 - for XMR dces (@(i1.reg) say), the schd tev table */
2664 /* has the size of the referenced in (in mdp) module and is accessed */
2665 /* by the referencing (used in) inum - XMR dce prevals are accessed */
2666 /* and have the size of the declare in (target) module */
2667 /* AIV 03/03/07 - these are init when alloced for the intep */
2668 /* they are init when linked in for the compiler as well */
2669 /*
2670 for (i = 0; i < ref_mdp->flatinum; i++)
2671 {
2672 dcep->st_dctrl->dceschd_tevs[i] = -1;
2673 }
2674 */
2675
2676 /* AIV 01/04/07 - init dce expr was using the wrong inst for */
2677 /* dce with more than one inst and was also skipping init for the */
2678 /* dce_expr for the one instance case */
2679 /* set dce previous values to initial wire value */
2680 if (dcep->dce_expr != NULL) init_dce_exprval(dcep);
2681 else
2682 {
2683 if (dcep->prevval.wp != NULL)
2684 {
2685 /* 05/18/03 - for XMR there is one for each decl in inst */
2686 init_dce_prevval(dcep, ref_mdp);
2687 }
2688 }
2689 break;
2690 case DCE_RNG_MONIT: case DCE_MONIT:
2691 /* DBG remove -- */
2692 if (!dcep->dce_1inst) __misc_terr(__FILE__, __LINE__);
2693 /* --- */
2694
2695 /* SJM 12/30/02 - since monits can't be removed - only turned off */
2696 /* on reset must turn off and initialize to start value */
2697 /* previous fix was wrong */
2698 dcep->dce_off = TRUE;
2699 /* initialize old value as if this was first time - can never be XMR */
2700 __push_itstk(dcep->dce_matchitp);
2701 __init_1instdce_prevval(dcep);
2702 __pop_itstk();
2703 break;
2704 case DCE_RNG_QCAF: case DCE_QCAF:
2705 /* always 1 qca dce load per statement exec */
2706 /* never a previous value since better to repeat assign of forced */
2707 /* DBG remove --- */
2708 if (dcep->prevval.bp != NULL) __misc_terr(__FILE__, __LINE__);
2709 /* --- */
2710 /* 11/22/02 AIV - no longer freeing QCAF dces - turned off instead */
2711 /* also no previous value to re-initialize */
2712 dcep->dce_off = TRUE;
2713 break;
2714 case DCE_RNG_PVC: case DCE_PVC:
2715 /* DBG remove -- */
2716 if (!dcep->dce_1inst) __misc_terr(__FILE__, __LINE__);
2717 /* --- */
2718
2719 /* SJM 01/02/03 - for the dce - always inst specific - just reinit in */
2720 /* case has previous value */
2721 __push_itstk(dcep->dce_matchitp);
2722 __init_1instdce_prevval(dcep);
2723 __pop_itstk();
2724 break;
2725 case DCE_CBVC: case DCE_RNG_CBVC:
2726 case DCE_CBF: case DCE_RNG_CBF: case DCE_CBR: case DCE_RNG_CBR:
2727 /* DBG remove -- */
2728 if (!dcep->dce_1inst) __misc_terr(__FILE__, __LINE__);
2729 /* --- */
2730
2731 /* this handles re-init of PLI 2.0 dce that must be left on */
2732 /* know will always exist */
2733 /* LOOKATME - why left on? or why not consistent with PLI 1 */
2734 __push_itstk(dcep->dce_matchitp);
2735 __alloc_1instdce_prevval(dcep);
2736 __pop_itstk();
2737 break;
2738 /* notice iact never seen here since only enabled from iact stmt */
2739 default: __case_terr(__FILE__, __LINE__);
2740 }
2741 dcep = dcep->dcenxt;
2742 }
2743 }
2744
2745 /*
2746 * allocate normal multiple instance case dce prevval
2747 * need value for every instance (itree loc. not used)
2748 *
2749 * 05/07/03 - now separate alloc and initialize
2750 */
alloc_dce_prevval(struct dcevnt_t * dcep,struct mod_t * mdp)2751 static void alloc_dce_prevval(struct dcevnt_t *dcep, struct mod_t *mdp)
2752 {
2753 int32 dcewid, totchars;
2754 struct net_t *np;
2755
2756 np = dcep->dce_np;
2757 /* PLI change values always need previous value because >1 change during */
2758 /* one time slot possible */
2759 if (dcep->dce_typ < ST_ND_PREVVAL)
2760 {
2761 /* no previous value for arrays or non edge entire wire regs */
2762 if (np->n_isarr || (np->ntyp >= NONWIRE_ST && dcep->dci1 == -1
2763 && !dcep->dce_edge))
2764 return;
2765 }
2766
2767
2768 /* build old value for wire range change detection */
2769 dcewid = __get_dcewid(dcep, np);
2770 if (np->n_stren)
2771 {
2772 /* notice this can never be array */
2773 dcep->prevval.bp = (byte *) __my_malloc(dcewid*mdp->flatinum);
2774 }
2775 else
2776 {
2777 totchars = __get_pcku_chars(dcewid, mdp->flatinum);
2778 dcep->prevval.wp = (word32 *) __my_malloc(totchars);
2779 }
2780 }
2781
2782 /*
2783 * initialize by setting to current value of wire the dce preval
2784 * need value for every instance (itree loc. not used)
2785 *
2786 * SJM 05/07/03 - now separate alloc and initialize
2787 * only called if dce prev val non nil
2788 */
init_dce_prevval(struct dcevnt_t * dcep,struct mod_t * decl_mdp)2789 static void init_dce_prevval(struct dcevnt_t *dcep, struct mod_t *decl_mdp)
2790 {
2791 register int32 ii;
2792 int32 i1;
2793 word32 *wp;
2794 byte *sbp, *sbp2, *sbp3;
2795 int32 dcewid;
2796 struct net_t *np;
2797 struct xstk_t *xsp;
2798
2799 np = dcep->dce_np;
2800 /* build old value for wire range change detection */
2801 dcewid = __get_dcewid(dcep, np);
2802 if (np->n_stren)
2803 {
2804 /* notice this can never be array */
2805 sbp = dcep->prevval.bp;
2806 if (dcep->dci1 == -2)
2807 {
2808 /* SJM 10/12/04 - because contab realloced, must be ndx base of IS */
2809 wp = &(__contab[dcep->dci2.xvi]);
2810 for (ii = 0; ii < decl_mdp->flatinum; ii++)
2811 {
2812 /* know i1 not -1 since must be 1 bit */
2813 i1 = (int32) wp[2*ii];
2814 sbp2 = &(np->nva.bp[np->nwid*ii + i1]);
2815 sbp3 = &(sbp[ii*dcewid]);
2816 memcpy(sbp3, sbp2, dcewid);
2817 }
2818 }
2819 else
2820 {
2821 /* here set every instance */
2822 for (ii = 0; ii < decl_mdp->flatinum; ii++)
2823 {
2824 /* notice start addr. must be low (2nd) index */
2825 i1 = (dcep->dci1 == -1) ? 0 : dcep->dci2.i;
2826 sbp2 = &(np->nva.bp[np->nwid*ii + i1]);
2827 sbp3 = &(sbp[ii*dcewid]);
2828 memcpy(sbp3, sbp2, dcewid);
2829 }
2830 }
2831 return;
2832 }
2833 push_xstk_(xsp, dcewid);
2834 /* dummy itree loc needed so can change inum */
2835 __push_wrkitstk(decl_mdp, 0);
2836 /* this is impossible for monit form */
2837 if (dcep->dci1 == -2)
2838 {
2839 /* SJM 10/12/04 - because contab realloced, must be ndx base of IS */
2840 wp = &(__contab[dcep->dci2.xvi]);
2841 /* know this is all inst. since monit/iact cannot be -2 form */
2842 for (ii = 0; ii < decl_mdp->flatinum; ii++)
2843 {
2844 /* no need to access mod con table here */
2845 __inst_ptr->itinum = ii;
2846 __inum = ii;
2847 i1 = (int32) wp[2*ii];
2848 __ld_wire_sect(xsp->ap, xsp->bp, np, i1, i1);
2849 /* minus 2 form only possible for bit select */
2850 st_scalval_(dcep->prevval.bp, xsp->ap[0], xsp->bp[0]);
2851 }
2852 }
2853 else
2854 {
2855 for (ii = 0; ii < decl_mdp->flatinum; ii++)
2856 {
2857 /* no need to access mod con table here */
2858 __inst_ptr->itinum = ii;
2859 __inum = ii;
2860 /* notice dci1 will be -1 for entire wire and this handles */
2861 __ld_wire_sect(xsp->ap, xsp->bp, np, dcep->dci1, dcep->dci2.i);
2862 __st_perinst_val(dcep->prevval, dcewid, xsp->ap, xsp->bp);
2863 }
2864 }
2865 __pop_wrkitstk();
2866 __pop_xstk();
2867 }
2868
2869 /*
2870 * initialize dce expr old (expr. not variable) value
2871 *
2872 * only called if dce has expr. and this is master (1st) for XMR
2873 *
2874 * since expr evaluated in ref XMR loc, module context is referenced module
2875 * or XMR case which is set by caller
2876 *
2877 * SJM 05/06/03 - need to eval and save in ref mod loc XMR dce case
2878 * SJM 05/04/05 - notice this
2879 */
init_dce_exprval(struct dcevnt_t * dcep)2880 static void init_dce_exprval(struct dcevnt_t *dcep)
2881 {
2882 register int32 ii;
2883 struct xstk_t *xsp;
2884 struct mod_t *ref_mdp;
2885 struct gref_t *grp;
2886
2887 if (dcep->dce_xmrtyp != XNP_LOC)
2888 {
2889 if (dcep->dce_xmrtyp == XNP_RTXMR)
2890 {
2891 ref_mdp = dcep->dce_refitp->itip->imsym->el.emdp;
2892 }
2893 else
2894 {
2895 grp = dcep->dceu.dcegrp;
2896 ref_mdp = grp->gin_mdp;
2897 }
2898 }
2899 else ref_mdp = __inst_mod;
2900
2901 /* edges always 1 bit (maybe low of vector) */
2902
2903 /* LOOKATME - can expr. be evaluated here? probably since can load wire */
2904 /* need to eval. from initialized wires */
2905 /* SJM 05/06/03 - must eval expr in ref loc itree context not define */
2906 for (ii = 0; ii < ref_mdp->flatinum; ii++)
2907 {
2908 __push_itstk(ref_mdp->moditps[ii]);
2909 xsp = __eval_xpr(dcep->dce_expr->edgxp);
2910 st_scalval_(dcep->dce_expr->bp,
2911 (xsp->ap[0] & 1L), (xsp->bp[0] & 1L));
2912 __pop_xstk();
2913 __pop_itstk();
2914 }
2915 }
2916
2917 /*
2918 * return the reference dcep mod
2919 */
dcep_ref_mod(struct dcevnt_t * dcep)2920 static struct mod_t *dcep_ref_mod(struct dcevnt_t *dcep)
2921 {
2922 struct mod_t *ref_mdp;
2923 struct itree_t *itp;
2924
2925 if (dcep->dce_xmrtyp == XNP_UPXMR || dcep->dce_xmrtyp == XNP_DOWNXMR)
2926 {
2927 ref_mdp = dcep->dceu.dcegrp->targmdp;
2928 }
2929 /* AIV 03/01/07 - rooted must be linked with its containing mod */
2930 else if (dcep->dce_xmrtyp == XNP_RTXMR)
2931 {
2932 /* AIV 03/03/07 - should be match itp not ref */
2933 itp = dcep->dce_matchitp;
2934 ref_mdp = itp->itip->imsym->el.emdp;
2935 }
2936 else ref_mdp = __inst_mod;
2937 return(ref_mdp);
2938 }
2939
2940 /*
2941 * allocate one inst form dce
2942 *
2943 * since called before dce filled, can only allocate - can't initialize
2944 * variant for one inst forms - monit and XMR
2945 * this also figures out if previous value needed for 1i case
2946 */
__alloc_1instdce_prevval(struct dcevnt_t * dcep)2947 extern void __alloc_1instdce_prevval(struct dcevnt_t *dcep)
2948 {
2949 int32 dcewid, totchars;
2950 struct net_t *np;
2951 struct mod_t *ref_mdp;
2952
2953 /* SJM 05/08/03 - dce expr can never be 1 inst - always var and never XMR */
2954 /* DBG remove -- */
2955 if (dcep->dce_expr != NULL) __misc_terr(__FILE__, __LINE__);
2956 /* --- */
2957
2958 np = dcep->dce_np;
2959
2960 /* always need prevval for PLI, multiple change at same time possible */
2961 if (dcep->dce_typ < ST_ND_PREVVAL)
2962 {
2963 /* no previous value for arrays or non edge entire wire regs */
2964 /* but needed for all others and always build if nd prev val T */
2965 if (np->n_isarr || (np->ntyp >= NONWIRE_ST && dcep->dci1 == -1
2966 && !dcep->dce_edge)) return;
2967 }
2968
2969 dcewid = __get_dcewid(dcep, np);
2970 if (np->n_stren) dcep->prevval.bp = (byte *) __my_malloc(dcewid);
2971 else
2972 {
2973 ref_mdp = dcep_ref_mod(dcep);
2974 totchars = __get_pcku_chars(dcewid, ref_mdp->flatinum);
2975 dcep->prevval.wp = (word32 *) __my_malloc(totchars);
2976 }
2977 }
2978
2979 /*
2980 * initialize and set to current value into dce preval
2981 * variant for one inst forms - monit and XMR
2982 * SJM 05/07/03 - now only can be called after all of dce filled
2983 *
2984 * this routine must be passed declare (target) itree context on top of
2985 * inst stack and ref loc 1 under
2986 */
__init_1instdce_prevval(struct dcevnt_t * dcep)2987 extern void __init_1instdce_prevval(struct dcevnt_t *dcep)
2988 {
2989 byte *sbp, *sbp2;
2990 int32 dcewid;
2991 struct net_t *np;
2992 struct xstk_t *xsp;
2993
2994 np = dcep->dce_np;
2995 /* DBG remove -- */
2996 if (dcep->dce_expr != NULL) __misc_terr(__FILE__, __LINE__);
2997 /* --- */
2998
2999 if (dcep->prevval.bp == NULL) return;
3000
3001 dcewid = __get_dcewid(dcep, np);
3002 /* SJM 05/08/03 - eval in match context but store as 0 since only 1 inst */
3003 __push_itstk(dcep->dce_matchitp);
3004 if (np->n_stren)
3005 {
3006 /* notice this can never be array */
3007 sbp = dcep->prevval.bp;
3008 /* must load for initialize from right inst. */
3009 get_stwire_addr_(sbp2, np);
3010 if (dcep->dci1 != -1) sbp2 = &(sbp2[dcep->dci2.i]);
3011 memcpy(sbp, sbp2, dcewid);
3012 }
3013 else
3014 {
3015 push_xstk_(xsp, dcewid);
3016 /* must load value from correct (match_itp instance) on it stack */
3017 __ld_wire_sect(xsp->ap, xsp->bp, np, dcep->dci1, dcep->dci2.i);
3018
3019 /* need dummy place for itree inst num since know only 1 inst. */
3020 /* this is match loc but only 1 inst - so any would work */
3021 __push_wrkitstk(__inst_mod, 0);
3022 __st_perinst_val(dcep->prevval, dcewid, xsp->ap, xsp->bp);
3023 __pop_wrkitstk();
3024 __pop_xstk();
3025 }
3026 __pop_itstk();
3027 }
3028
3029 /*
3030 * STATEMENT PREPARATION ROUTINES
3031 */
3032
3033 /*
3034 * for every always, add surrounding forever statement
3035 *
3036 * all variable storage must have been allocated by here
3037 * all id (and glbid) expression nodes made to point to variable
3038 */
__prep_stmts(void)3039 extern void __prep_stmts(void)
3040 {
3041 register int32 i;
3042 register struct ialst_t *ialp;
3043 register struct task_t *tskp;
3044 struct mod_t *mdp;
3045 int32 sav_declobj;
3046
3047 /* bottom of if/case/delay control continuation stack must be null */
3048 /* for cases where continuation is really NULL - no goto */
3049 __prpsti = 0;
3050 __nbsti = -1;
3051 __prpstk[0] = NULL;
3052 for (mdp = __modhdr; mdp != NULL; mdp = mdp->mnxt)
3053 {
3054 __push_wrkitstk(mdp, 0);
3055
3056 __prep_numsts = 0;
3057 __processing_func = FALSE;
3058 for (ialp = __inst_mod->ialst; ialp != NULL; ialp = ialp->ialnxt)
3059 {
3060 /* for always, must be first - tails of last must point to 1st stmt */
3061 /* here may be list and will be put on end */
3062 if (ialp->iatyp == ALWAYS)
3063 add_loopend_goto(ialp->iastp, ialp->iastp);
3064 ialp->iastp = __prep_lstofsts(ialp->iastp, TRUE, FALSE);
3065 /* DBG remove --- */
3066 if (__prpsti != 0) __misc_terr(__FILE__, __LINE__);
3067 /* --- */
3068 }
3069 /* notice, there is one set of task variables per instance */
3070 /* but a task in one itree inst. can be enabled multiple times */
3071 /* tthrds for tasks (not functions) is per inst. list of active thrds */
3072 /* so if disable can disable all below */
3073 for (tskp = __inst_mod->mtasks; tskp != NULL; tskp = tskp->tsknxt)
3074 {
3075 /* never need task threads for function */
3076 if (tskp->tsktyp == FUNCTION) continue;
3077 tskp->tthrds = (struct tskthrd_t **)
3078 __my_malloc(__inst_mod->flatinum*sizeof(struct tskthrd_t *));
3079 for (i = 0; i < __inst_mod->flatinum; i++) tskp->tthrds[i] = NULL;
3080 }
3081
3082 sav_declobj = __cur_declobj;
3083 __cur_declobj = TASK;
3084 for (tskp = __inst_mod->mtasks; tskp != NULL; tskp = tskp->tsknxt)
3085 {
3086 /* named blocks handled as statement where they occur */
3087 if (tskp->tsktyp == FUNCTION) __processing_func = TRUE;
3088 else if (tskp->tsktyp == TASK) __processing_func = FALSE;
3089 else continue;
3090
3091 tskp->tskst = __prep_lstofsts(tskp->tskst, FALSE, FALSE);
3092 /* no branch continue here because must schedule/disable thread */
3093 /* need inform if function never enable */
3094 if (!tskp->t_used)
3095 {
3096 char s1[RECLEN], s2[RECLEN];
3097
3098 if (__processing_func) strcpy(s2, "called");
3099 else strcpy(s2, "enabled");
3100 __gfinform(439, tskp->tsksyp->syfnam_ind, tskp->tsksyp->sylin_cnt,
3101 "%s %s never %s", __to_tsktyp(s1, tskp->tsktyp), tskp->tsksyp->synam,
3102 s2);
3103 }
3104 /* DBG remove --- */
3105 if (__prpsti != 0) __misc_terr(__FILE__, __LINE__);
3106 /* --- */
3107 }
3108 __processing_func = FALSE;
3109 __cur_declobj = sav_declobj;
3110
3111 /* DBG remove --
3112 {
3113 extern void __dmp_mod(FILE *, struct mod_t *mdp);
3114
3115 if (__debug_flg) __dmp_mod(stdout, mdp);
3116 }
3117 --- */
3118 /* DBG remove ---
3119 if (__prep_numsts != __inst_mod->mstnum) __misc_terr(__FILE__, __LINE__);
3120 --- */
3121
3122 __pop_wrkitstk();
3123 }
3124 }
3125
3126 /*
3127 * routine to prepare (optimize) list of statements for simulation
3128 * returns front (may be new statement for for)
3129 *
3130 * know begin ends already turned into statements list wherever possible
3131 * know if containing statement is loop, goto added at end before here
3132 */
__prep_lstofsts(struct st_t * hdrstp,int32 nd_endgoto,int32 is_dctrl_chain)3133 extern struct st_t *__prep_lstofsts(struct st_t *hdrstp, int32 nd_endgoto,
3134 int32 is_dctrl_chain)
3135 {
3136 register struct st_t *stp;
3137 register int32 ii;
3138 int32 tei, fji;
3139 struct for_t *forp;
3140 struct st_t *astp, *astp2, *last_stp, *fjstp;
3141 struct delctrl_t *dctp;
3142
3143 for (stp = hdrstp, last_stp = NULL; stp != NULL; stp = stp->stnxt)
3144 {
3145 __sfnam_ind = stp->stfnam_ind;
3146 __slin_cnt = stp->stlin_cnt;
3147 /* DBG remove ---
3148 if (__debug_flg)
3149 {
3150 __dbg_msg("%04d: AT %s %s - STMT PREP (%s)\n", stp->stalloc_ndx,
3151 __bld_lineloc(__xs, stp->stfnam_ind, stp->stlin_cnt),
3152 __inst_mod->msym->synam, __to_sttyp(__xs2, stp->stmttyp));
3153 }
3154 --- */
3155 /* --- ALTERNATE DBG remove ---
3156 __dbg_msg("AT %s %s - STMT PREP %04d (%s)\n",
3157 __bld_lineloc(__xs, stp->stfnam_ind, stp->stlin_cnt),
3158 __inst_mod->msym->synam, __prep_numsts - 1,
3159 __to_sttyp(__xs2, stp->stmttyp));
3160 }
3161 --- */
3162
3163 switch ((byte) stp->stmttyp) {
3164 case S_PROCA: case S_FORASSGN: case S_RHSDEPROCA: case S_NBPROCA: break;
3165 case S_IF:
3166 if (stp->stnxt != NULL) push_prpstmt(stp->stnxt);
3167 stp->st.sif.thenst = __prep_lstofsts(stp->st.sif.thenst, TRUE, FALSE);
3168 if (stp->st.sif.elsest != NULL)
3169 stp->st.sif.elsest = __prep_lstofsts(stp->st.sif.elsest, TRUE, FALSE);
3170 if (stp->stnxt != NULL) pop_prpstmt();
3171 break;
3172 case S_CASE:
3173 prep_case(stp);
3174 break;
3175 case S_FOR:
3176 /* must link so old for assign is replaced by for inc. that points */
3177 /* to for body (added) - key is that for inc next is not used */
3178 /* first add goto to for statement itself at end of body */
3179 /* notice initial assign already precedes for */
3180 forp = stp->st.sfor;
3181 astp2 = add_loopend_goto(forp->forbody, forp->forbody);
3182 /* change to point to for itself */
3183 astp2->stnxt->st.sgoto = stp;
3184 astp2->stnxt->st.sgoto->lpend_goto_dest = TRUE;
3185
3186 /* insert inc stmt before goto */
3187 forp->forinc->stnxt = astp2->stnxt;
3188 astp2->stnxt = forp->forinc;
3189 /* inc will be fixed up in body of loop */
3190 forp->forbody = __prep_lstofsts(forp->forbody, FALSE, FALSE);
3191 break;
3192 case S_FOREVER:
3193 case S_WHILE:
3194 /* scheme here if evaluation of while means if non NULL xpr see if T */
3195 /* if not do not exec statement else exec - loops to same statement */
3196 /* something like while (x) begin : y ... end becomes: */
3197 /* L: while () begin ... end; goto L; */
3198 add_loopend_goto(stp->st.swh.lpst, stp);
3199 stp->st.swh.lpst = __prep_lstofsts(stp->st.swh.lpst, FALSE, FALSE);
3200 break;
3201 case S_REPEAT:
3202 /* first allocate special repeat setup statement and link on front */
3203 astp = __alloc2_stmt(S_REPSETUP, stp->stfnam_ind, stp->stlin_cnt);
3204 /* fill guts of new statement with repeat guts - astp2 pnts to nxt st */
3205 *astp = *stp;
3206
3207 stp->stmttyp = S_REPSETUP;
3208 stp->rl_stmttyp = stp->stmttyp;
3209 stp->st_unbhead = FALSE;
3210 stp->st.scausx = NULL;
3211
3212 /* then exchange stp and astp pointers (since prev nxt is stp) */
3213 astp2 = astp;
3214 astp = stp;
3215 stp = astp2;
3216 astp->stnxt = stp;
3217
3218 /* stp now points to rep setup */
3219 /* DBG remove --
3220 if (__debug_flg)
3221 {
3222 __dbg_msg("AT %s %s - STMT PREP (%s)\n",
3223 __bld_lineloc(__xs, astp->stfnam_ind, astp->stlin_cnt),
3224 __inst_mod->msym->synam, __to_sttyp(__xs2, astp->stmttyp));
3225 }
3226 --- */
3227 /* DBG remove --
3228 __prep_numsts++;
3229 if (__debug_flg)
3230 {
3231 __dbg_msg("%04d: AT %s %s - STMT PREP (%s)\n", astp->stalloc_ndx,
3232 __bld_lineloc(__xs, astp->stfnam_ind, astp->stlin_cnt),
3233 __inst_mod->msym->synam, __to_sttyp(__xs2, astp->stmttyp));
3234 }
3235 --- */
3236 /* ALTERNATE DBG remove ---
3237 __prep_numsts++;
3238 if (__debug_flg)
3239 {
3240 __dbg_msg("AT %s %s - STMT PREP %04d (%s)\n",
3241 __bld_lineloc(__xs, astp->stfnam_ind, astp->stlin_cnt),
3242 __inst_mod->msym->synam, __prep_numsts - 1,
3243 __to_sttyp(__xs2, astp->stmttyp));
3244 }
3245 --- */
3246 /* allocate per inst. count storage */
3247 /* add loop back to repeat header */
3248 add_loopend_goto(stp->st.srpt.repst, stp);
3249 /* 32 bit word32 width built in here */
3250 stp->st.srpt.reptemp = (word32 *)
3251 __my_malloc(WRDBYTES*__inst_mod->flatinum);
3252 memset(stp->st.srpt.reptemp, 0, WRDBYTES*__inst_mod->flatinum);
3253 /* end must loop back to actual repeat not setup */
3254 stp->st.srpt.repst = __prep_lstofsts(stp->st.srpt.repst, FALSE, FALSE);
3255 break;
3256 case S_WAIT:
3257 /* build and adding dc events is just change expr. case here */
3258 dctp = stp->st.swait.wait_dctp;
3259 /* must turn on iact bit so linkon dce adds to free list when done */
3260 if (__iact_state) dctp->dc_iact = TRUE;
3261 /* wait is simple expression - EV OR illegal - edge illegal state dep. */
3262 bld_evxpr_dces(stp->st.swait.lpx, dctp, FALSE);
3263 /* wait @ event triggers on loop exp and executes wait to evaluate */
3264 /* the loop expression */
3265 dctp->actionst = stp;
3266 /* fill the delay/event scheduled action rec that is needed by wait */
3267 /* for arming and triggering */
3268 dctp->dctyp = DC_WAITEVENT;
3269 /* need to alloc and init scheduled tevs table */
3270 dctp->dceschd_tevs = (i_tev_ndx *)
3271 __my_malloc(__inst_mod->flatinum*sizeof(i_tev_ndx));
3272 for (tei = 0; tei < __inst_mod->flatinum; tei++)
3273 dctp->dceschd_tevs[tei] = -1;
3274
3275 /* prepare the statements */
3276 /* wait needs end link to next statement after wait not beginning */
3277 if (stp->stnxt != NULL) push_prpstmt(stp->stnxt);
3278 /* last statement (normally only 1, will have goto to continuation */
3279 stp->st.swait.lpst = __prep_lstofsts(stp->st.swait.lpst, TRUE, FALSE);
3280 if (stp->stnxt != NULL) pop_prpstmt();
3281 break;
3282 case S_DELCTRL:
3283 dctp = stp->st.sdc;
3284 /* 10/28/00 - if repeat form, insert repeat dc setup in front */
3285 if (dctp->repcntx != NULL)
3286 {
3287 /* first allocate special repeat setup statement and link on front */
3288 /* this is needed so can insert after to get lists nexts right */
3289 /* but still have setup first */
3290 astp = __alloc2_stmt(S_REPDCSETUP, stp->stfnam_ind, stp->stlin_cnt);
3291 /* fill guts of new statement with delctrl statment guts */
3292 /* astp2 points to next stmt */
3293 *astp = *stp;
3294
3295 stp->stmttyp = S_REPDCSETUP;
3296 stp->rl_stmttyp = stp->stmttyp;
3297 stp->st_unbhead = FALSE;
3298 stp->st.scausx = NULL;
3299
3300 /* then exchange stp and astp pointers (since prev nxt is stp) */
3301 /* so stp will be at original (after inserted setup) so next works */
3302 /* right in loop */
3303 astp2 = astp;
3304 astp = stp;
3305 stp = astp2;
3306 astp->stnxt = stp;
3307 /* DBG remove */
3308 if (stp->st.sdc != dctp) __misc_terr(__FILE__, __LINE__);
3309 /* --- */
3310 /* finally, alloc and initialize to 0 per inst repeat down counter */
3311 /* SJM 04/02/01 - inter ectl rep counter now word32 */
3312 dctp->dce_repcnts = (word32 *)
3313 __my_malloc(sizeof(word32)*__inst_mod->flatinum);
3314 memset(dctp->dce_repcnts, 0, sizeof(word32)*__inst_mod->flatinum);
3315 }
3316 /* 10/28/00 SJM - always still prepare dctrl as usual */
3317 prep_dctrl(stp);
3318 break;
3319 case S_NAMBLK:
3320 /* for named block, no continuation - must be subthread except in func */
3321 __push_nbstk(stp);
3322 if (__processing_func)
3323 {
3324 if (stp->stnxt != NULL) push_prpstmt(stp->stnxt);
3325 stp->st.snbtsk->tskst = __prep_lstofsts(stp->st.snbtsk->tskst,
3326 TRUE, FALSE);
3327 if (stp->stnxt != NULL) pop_prpstmt();
3328 }
3329 else
3330 {
3331 push_prpstmt((struct st_t *) NULL);
3332 stp->st.snbtsk->tskst = __prep_lstofsts(stp->st.snbtsk->tskst, FALSE,
3333 FALSE);
3334 pop_prpstmt();
3335 }
3336 __pop_nbstk();
3337 break;
3338 case S_UNBLK:
3339 if (stp->stnxt != NULL) push_prpstmt(stp->stnxt);
3340 /* need continuation for simple block */
3341 stp->st.sbsts = __prep_lstofsts(stp->st.sbsts, TRUE, FALSE);
3342 if (stp->stnxt != NULL) pop_prpstmt();
3343 break;
3344 case S_UNFJ:
3345 push_prpstmt((struct st_t *) NULL);
3346 /* continuation inside these must be NULL, not stacked val */
3347 for (fji = 0;; fji++)
3348 {
3349 if ((fjstp = stp->st.fj.fjstps[fji]) == NULL) break;
3350 stp->st.fj.fjstps[fji] = __prep_lstofsts(fjstp, FALSE, FALSE);
3351 }
3352 pop_prpstmt();
3353 break;
3354 case S_QCONTA:
3355 /* first build the one for each lhs cat element list of per inst */
3356 /* qcaf dce lists and initialize - then during build just fill */
3357 bld_init_qcaf_dce_lstlst(stp);
3358
3359 /* separate prepare because different qcval record for each inst */
3360 /* qcaf dces per inst because only one active on a reg at a time */
3361 for (ii = 0; ii < __inst_mod->flatinum; ii++)
3362 {
3363 __push_itstk(__inst_mod->moditps[ii]);
3364
3365 if (stp->st.sqca->qcatyp == ASSIGN) prep_qc_assign(stp, FALSE);
3366 else
3367 {
3368 /* force of reg, is like assign except overrides assign */
3369 if (stp->st.sqca->regform) prep_qc_assign(stp, TRUE);
3370 else prep_qc_wireforce(stp);
3371 }
3372 __pop_itstk();
3373 }
3374 break;
3375 case S_QCONTDEA:
3376 for (ii = 0; ii < __inst_mod->flatinum; ii++)
3377 {
3378 __push_itstk(__inst_mod->moditps[ii]);
3379
3380 if (stp->st.sqcdea.qcdatyp == DEASSIGN) prep_qc_deassign(stp);
3381 else
3382 {
3383 /* SJM 06/21/02 - since deassign/release just allocs qcvals */
3384 /* no diff between release/deassign */
3385 if (stp->st.sqcdea.regform) prep_qc_deassign(stp);
3386 else prep_qc_wirerelease(stp);
3387 }
3388 __pop_itstk();
3389 }
3390 break;
3391 case S_DSABLE:
3392 /* inside function disables are gotos to next statement in up block */
3393 if (__processing_func) prep_func_dsable(stp);
3394
3395 /* since for any other name block or task cannot optimize since can */
3396 /* be disabled from interactive command */
3397 break;
3398 case S_TSKCALL:
3399 /* identify and build dces only for monit/fmonit here */
3400 prep_stskcalls(stp);
3401 break;
3402 }
3403 last_stp = stp;
3404 }
3405 /* now at end of list */
3406 /* fix up by working up stack to point where statement has next */
3407 if (nd_endgoto && __prpstk[__prpsti] != NULL)
3408 {
3409 /* DBG remove ---*/
3410 if (last_stp == NULL) __misc_terr(__FILE__, __LINE__);
3411 /* --- */
3412 /* need to save cur location so allocate get right stmt loc */
3413 astp = __alloc2_stmt(S_GOTO, last_stp->stfnam_ind, last_stp->stlin_cnt);
3414 astp->st.sgoto = __prpstk[__prpsti];
3415
3416 if (is_dctrl_chain) astp->dctrl_goto = TRUE;
3417 else astp->lstend_goto = TRUE;
3418
3419 /* in this rare case will not have line number - can it happen */
3420 /* LOOKATME - can this happen */
3421 if (last_stp == NULL) hdrstp = astp;
3422 else
3423 {
3424 last_stp->stnxt = astp;
3425 }
3426 /* DBG remove --
3427 __prep_numsts++;
3428 if (__debug_flg)
3429 {
3430 __dbg_msg("AT %s %s - STMT PREP %04d (s)\n",
3431 __bld_lineloc(__xs, astp->stfnam_ind, astp->stlin_cnt),
3432 __inst_mod->msym->synam, __prep_numsts - 1,
3433 __to_sttyp(__xs2, astp->stmttyp));
3434 }
3435 --- */
3436 /* ALTERNATE DBG remove --
3437 __prep_numsts++;
3438 if (__debug_flg)
3439 {
3440 __dbg_msg("AT %s %s - STMT PREP %04d (s)\n",
3441 __bld_lineloc(__xs, astp->stfnam_ind, astp->stlin_cnt),
3442 __inst_mod->msym->synam, __prep_numsts - 1,
3443 __to_sttyp(__xs2, astp->stmttyp));
3444 }
3445 -- */
3446 }
3447 return(hdrstp);
3448 }
3449
3450 /*
3451 * prepare sys task enable - only for monit/fmonit and builds dces for those
3452 *
3453 * SJM 06/21/02 - new algorithm builds monit/fmonit dces during prep
3454 */
prep_stskcalls(struct st_t * stp)3455 static void prep_stskcalls(struct st_t *stp)
3456 {
3457 struct expr_t *tkxp;
3458 struct tskcall_t *tkcp;
3459 struct sy_t *syp;
3460 struct systsk_t *stbp;
3461
3462 tkcp = &(stp->st.stkc);
3463 tkxp = tkcp->tsksyx;
3464
3465 /* nothing to do for non system task calls */
3466 if (tkxp->optyp == ID && *(tkxp->lu.sy->synam) == '$')
3467 {
3468 syp = tkxp->lu.sy;
3469 stbp = syp->el.esytbp;
3470
3471 switch (stbp->stsknum) {
3472 /* system task args do not have type or width - take what is there */
3473 case STN_MONITOR: case STN_MONITORB: case STN_MONITORH: case STN_MONITORO:
3474 __prep_insrc_monit(stp, FALSE);
3475 break;
3476 /* tasks that take a multichannel descriptor followed by anything */
3477 case STN_FMONITOR: case STN_FMONITORB: case STN_FMONITORH:
3478 case STN_FMONITORO:
3479 __prep_insrc_monit(stp, TRUE);
3480 break;
3481 default: break;
3482 }
3483 }
3484 }
3485
3486 /*
3487 * for list of statements that is loop body, add goto that links
3488 * back to front of loop
3489 * returns previous last statement - error to be called withh begstp nul
3490 */
add_loopend_goto(struct st_t * begstp,struct st_t * targstp)3491 static struct st_t *add_loopend_goto(struct st_t *begstp,
3492 struct st_t *targstp)
3493 {
3494 register struct st_t *stp;
3495 struct st_t *last_stp, *gtstp;
3496
3497 /* DBG remove --- */
3498 if (begstp == NULL) __arg_terr(__FILE__, __LINE__);
3499 /* --- */
3500
3501 /* find last statement in loop - know has at least one */
3502 for (stp = begstp, last_stp = NULL; stp != NULL; stp = stp->stnxt)
3503 last_stp = stp;
3504 if (last_stp == NULL) __arg_terr(__FILE__, __LINE__);
3505 gtstp = __alloc2_stmt(S_GOTO, last_stp->stfnam_ind, last_stp->stlin_cnt);
3506 gtstp->lpend_goto = TRUE;
3507 gtstp->st.sgoto = targstp;
3508 targstp->lpend_goto_dest = TRUE;
3509
3510 gtstp->stfnam_ind = last_stp->stfnam_ind;
3511 gtstp->stlin_cnt = last_stp->stlin_cnt;
3512 last_stp->stnxt = gtstp;
3513 if (__debug_flg)
3514 {
3515 char s1[RECLEN], s2[RECLEN], s3[RECLEN];
3516
3517 __dbg_msg("++ loop: adding goto after %s at %s back to stmt %s at %s\n",
3518 __to_sttyp(s1, last_stp->stmttyp), __bld_lineloc(__xs,
3519 last_stp->stfnam_ind, last_stp->stlin_cnt), __to_sttyp(s2,
3520 begstp->stmttyp), __bld_lineloc(s3, targstp->stfnam_ind,
3521 targstp->stlin_cnt));
3522 }
3523 /* --- */
3524 return(last_stp);
3525 }
3526
3527 /*
3528 * push a nested preparation statement
3529 * this is for control flow so many not pushed
3530 */
push_prpstmt(struct st_t * stp)3531 static void push_prpstmt(struct st_t *stp)
3532 {
3533 if (++__prpsti >= MAXPRPSTNEST)
3534 __sgfterr(317, "statements nested too deeply (%d)", MAXPRPSTNEST);
3535 __prpstk[__prpsti] = stp;
3536 /* DBG remove --
3537 if (__debug_flg)
3538 {
3539 if (stp != NULL)
3540 {
3541 __dbg_msg(".. push nested stmt stack to %d at %s\n", __prpsti,
3542 __bld_lineloc(__xs, stp->stfnam_ind, stp->stlin_cnt));
3543 }
3544 else
3545 {
3546 __dbg_msg(".. push NULL nested stmt stack to %d\n", __prpsti);
3547 }
3548 }
3549 --- */
3550 }
3551
3552 /*
3553 * pop a nested preparation statement
3554 */
pop_prpstmt(void)3555 static void pop_prpstmt(void)
3556 {
3557 /* should never undeflow */
3558 if (__prpsti < 0) __misc_sgfterr(__FILE__, __LINE__);
3559 __prpsti--;
3560 /* DBG remove --
3561 if (__debug_flg)
3562 {
3563 struct st_t *stp;
3564
3565 stp = __prpstk[__prpsti];
3566 if (stp != NULL)
3567 {
3568 __dbg_msg(".. pop nested stmt stack to %d at %s\n", __prpsti,
3569 __bld_lineloc(__xs, stp->stfnam_ind, stp->stlin_cnt));
3570 }
3571 else
3572 {
3573 __dbg_msg(".. pop NULL nested stmt stack to %d\n", __prpsti);
3574 }
3575 }
3576 --- */
3577 }
3578
3579 /*
3580 * push a nested named block statement - this is for disable processing
3581 * and only used during preparation
3582 */
__push_nbstk(struct st_t * stp)3583 extern void __push_nbstk(struct st_t *stp)
3584 {
3585 if (++__nbsti >= MAXPRPSTNEST)
3586 __sgfterr(318, "named blocks nested too deeply (%d)", MAXPRPSTNEST);
3587 __nbstk[__nbsti] = stp;
3588 }
3589
3590 /*
3591 * pop a nested named block statement
3592 */
__pop_nbstk(void)3593 extern void __pop_nbstk(void)
3594 {
3595 /* named blocks during prep. also should not undeflow */
3596 if (__nbsti < 0) __misc_sgfterr(__FILE__, __LINE__);
3597 __nbsti--;
3598 }
3599
3600 /*
3601 * prepare case statement for simulation
3602 */
prep_case(struct st_t * stp)3603 static void prep_case(struct st_t *stp)
3604 {
3605 register struct csitem_t *csip;
3606 struct csitem_t *dflt_csip;
3607
3608 if (stp->stnxt != NULL) push_prpstmt(stp->stnxt);
3609
3610 dflt_csip = stp->st.scs.csitems;
3611 /* this will move up stack to add goto after ending stp */
3612 for (csip = dflt_csip->csinxt; csip != NULL; csip = csip->csinxt)
3613 csip->csist = __prep_lstofsts(csip->csist, TRUE, FALSE);
3614
3615 /* this will move up stack to connect ending stnxt to next exec. place */
3616 if (dflt_csip->csist != NULL)
3617 dflt_csip->csist = __prep_lstofsts(dflt_csip->csist, TRUE, FALSE);
3618 if (stp->stnxt != NULL) pop_prpstmt();
3619 }
3620
3621 /*
3622 * prepare a declare control
3623 * tricky because #<> #<> #<> etc [stmt] legal
3624 */
prep_dctrl(struct st_t * stp)3625 static void prep_dctrl(struct st_t *stp)
3626 {
3627 register struct st_t *stp2;
3628 struct delctrl_t *dctp;
3629 struct st_t *last_stp;
3630
3631 dctp = stp->st.sdc;
3632 if (__iact_state) dctp->dc_iact = TRUE;
3633 cnv_cmpdctl_todu(stp, dctp);
3634 /* if no statement just prepare expr. - stnxt correct */
3635 if (dctp->actionst == NULL) return;
3636
3637 /* for #[d1] #[d2] #[d3] ... <stmt> chain, add goto to end only */
3638 last_stp = NULL;
3639 for (stp2 = dctp->actionst;; stp2 = stp2->st.sdc->actionst)
3640 {
3641 /* keep going until delay control has no action statement or */
3642 /* a non delay control action statement */
3643 /* case "#10 begin #20 ..." - is not delay control chain */
3644 if (stp2 == NULL || stp2->stmttyp != S_DELCTRL || stp2->st_unbhead)
3645 break;
3646 dctp = stp2->st.sdc;
3647 cnv_cmpdctl_todu(stp2, dctp);
3648
3649 /* DBG remove --
3650 if (__debug_flg)
3651 {
3652 __dbg_msg("AT %s %s - STMT PREP %04d (%s)\n",
3653 __bld_lineloc(__xs, stp2->stfnam_ind, stp2->stlin_cnt),
3654 __inst_mod->msym->synam, __to_sttyp(__xs2, stp2->stmttyp));
3655 }
3656 --- */
3657
3658 last_stp = stp2;
3659 }
3660 if (stp2 == NULL)
3661 {
3662 __sgfwarn(562, "INTERNAL - delay control chain does not end with stmt.");
3663 /* unrecognized delay control chain */
3664 if (last_stp == NULL) __misc_terr(__FILE__, __LINE__);
3665 stp2 = last_stp;
3666 }
3667 /* finally, just add goto from stp2 to original statement next */
3668 if (stp->stnxt != NULL) push_prpstmt(stp->stnxt);
3669 stp2 = __prep_lstofsts(stp2, TRUE, TRUE);
3670 if (stp->stnxt != NULL) pop_prpstmt();
3671 }
3672
3673 /*
3674 * convert a delay control CMP LST form to run tim delay
3675 */
cnv_cmpdctl_todu(struct st_t * stp,struct delctrl_t * dctp)3676 static void cnv_cmpdctl_todu(struct st_t *stp, struct delctrl_t *dctp)
3677 {
3678 register int32 tei;
3679 struct gate_t gwrk;
3680 struct sy_t tmpsym;
3681
3682 /* notice for delay controls schd event tevs field not used */
3683 if (dctp->dctyp == DC_DELAY || dctp->dctyp == DC_RHSDELAY)
3684 {
3685 __add_dctldel_pnp(stp);
3686 tmpsym.syfnam_ind = (word32) __sfnam_ind;
3687 tmpsym.sylin_cnt = __slin_cnt;
3688 __prep_delay(&gwrk, dctp->dc_du.pdels, FALSE, FALSE,
3689 "procedural delay control", FALSE, &tmpsym, FALSE);
3690 if (__nd_neg_del_warn)
3691 {
3692 __sgferr(974, "delay control negative delay illegal (0 used)");
3693 __nd_neg_del_warn = FALSE;
3694 }
3695 dctp->dc_delrep = gwrk.g_delrep;
3696 dctp->dc_du = gwrk.g_du;
3697 }
3698 else
3699 {
3700 prep_event_dctrl(dctp);
3701 /* need to alloc and init scheduled tevs table */
3702 dctp->dceschd_tevs = (i_tev_ndx *)
3703 __my_malloc(__inst_mod->flatinum*sizeof(i_tev_ndx));
3704 for (tei = 0; tei < __inst_mod->flatinum; tei++)
3705 dctp->dceschd_tevs[tei] = -1L;
3706 }
3707 }
3708
3709 /*
3710 * for every value in expr. add the dc list to appropriate wire
3711 */
prep_event_dctrl(struct delctrl_t * dctp)3712 static void prep_event_dctrl(struct delctrl_t *dctp)
3713 {
3714 register struct expr_t *xp;
3715 struct expr_t *evx;
3716 struct paramlst_t *pmp;
3717
3718 /* must change delay representation to DT_1X */
3719 pmp = dctp->dc_du.pdels;
3720 evx = pmp->plxndp;
3721 /* first free param list form */
3722 __my_free((char *) pmp, sizeof(struct paramlst_t));
3723 dctp->dc_du.d1x = evx;
3724 dctp->dc_delrep = DT_1X;
3725
3726 /* SJM 06/28/05 - for degenerate form no dces to build but must catch */
3727 /* nil event expr */
3728 if (evx == NULL)
3729 {
3730 __sgfwarn(3139,
3731 "implicit event control - no events in statement - will never trigger");
3732 return;
3733 }
3734
3735 if (evx->optyp != OPEVOR && evx->optyp != OPEVCOMMAOR)
3736 { bld_ev_dces(evx, dctp); return; }
3737 /* notice evor tree must associate left to right - i.e. evor chain */
3738 /* extends down left links */
3739 for (xp = evx;;)
3740 {
3741 bld_ev_dces(xp->ru.x, dctp);
3742 if (xp->lu.x->optyp != OPEVOR && xp->lu.x->optyp != OPEVCOMMAOR)
3743 {
3744 /* left is bottom of tree */
3745 bld_ev_dces(xp->lu.x, dctp);
3746 break;
3747 }
3748 xp = xp->lu.x;
3749 }
3750 }
3751
3752 /*
3753 * build the event control dcevnt list element(s) for one event control
3754 *
3755 * know xp is not evor - main expr of operand of evor or wait simple expr
3756 * this cannot assume any itree loc.
3757 *
3758 * for constant bit select, ID, or global do not need expr.
3759 * otherwise use normal variable in expr. change mechanism but before
3760 * triggering armed evaluate expression and see if correct edge
3761 * aux dce_expr record contains previous entire expr. value
3762 */
bld_ev_dces(struct expr_t * xp,struct delctrl_t * dctp)3763 static void bld_ev_dces(struct expr_t *xp, struct delctrl_t *dctp)
3764 {
3765 int32 biti, bitj, e_val;
3766 word32 *wp;
3767 struct net_t *np;
3768 struct expr_t *endp, *ndx, *idndp;
3769 struct gref_t *grp;
3770
3771 __cur_dce_expr = NULL;
3772 /* know evor must be at top and associates right (right tree) */
3773 e_val = NOEDGE;
3774 if (xp->optyp == OPNEGEDGE || xp->optyp == OPPOSEDGE)
3775 {
3776 if (xp->optyp == OPNEGEDGE) e_val = E_NEGEDGE;
3777 else if (xp->optyp == OPPOSEDGE) e_val = E_POSEDGE;
3778 else __case_terr(__FILE__, __LINE__);
3779
3780 endp = xp->lu.x;
3781 biti = bitj = -1;
3782 if (endp->optyp == LSB)
3783 {
3784 idndp = endp->lu.x;
3785 np = idndp->lu.sy->el.enp;
3786 /* array never scalared and if not scalared need dce expr eval */
3787 /* this will force expr eval for array index (not bsel) */
3788 if (!np->vec_scalared) goto expr_edge;
3789
3790 ndx = endp->ru.x;
3791 if (ndx->optyp == NUMBER)
3792 {
3793 /* this can be reg constant out of range of x */
3794 /* becomes entire range */
3795 wp = &(__contab[ndx->ru.xvi]);
3796 if (wp[1] == 0L) biti = bitj = (int32) wp[0];
3797 }
3798 else if (ndx->optyp == ISNUMBER)
3799 {
3800 __isform_bi_xvi = ndx->ru.xvi;
3801 biti = -2;
3802 bitj = 0;
3803 }
3804 else goto expr_edge;
3805 }
3806 else if (endp->optyp == ID || endp->optyp == GLBREF)
3807 {
3808 idndp = endp;
3809 np = idndp->lu.sy->el.enp;
3810 /* since using low bit only vectored wire do not need expr. */
3811 /* unindexed array illegal in rhs expr. */
3812 }
3813 else goto expr_edge;
3814
3815 grp = (idndp->optyp == GLBREF) ? idndp->ru.grp : NULL;
3816 linkon_dce(np, biti, bitj, dctp, e_val, grp);
3817 return;
3818
3819 expr_edge:
3820 __cur_dce_expr = (struct dce_expr_t *)
3821 __my_malloc(sizeof(struct dce_expr_t));
3822 __cur_dce_expr->edgxp = endp;
3823 __cur_dce_expr->bp = NULL;
3824 __cur_dce_expr->mast_dcep = NULL;
3825 /* this will set global dce expr for each allocated in bld routine */
3826 bld_evxpr_dces(xp, dctp, e_val);
3827 __cur_dce_expr = NULL;
3828 return;
3829 }
3830
3831 /* LOOKATME - slight bug here - trigger on variable change */
3832 /* instead of expression change - rare (eval. to 1 bit logical mostly) */
3833 /* cases where there is a difference (sending question to P1364 committee */
3834 /* allocate for every variable including indices in expr. */
3835
3836 bld_evxpr_dces(xp, dctp, FALSE);
3837 }
3838
3839 /*
3840 * build and link on dce vents for 1 normal not evor expression but can be
3841 * many dces since 1 per variable for non edge expr. that is handled here
3842 * need itree place since called while running
3843 */
bld_evxpr_dces(struct expr_t * xp,struct delctrl_t * dctp,int32 eval)3844 static void bld_evxpr_dces(struct expr_t *xp, struct delctrl_t *dctp,
3845 int32 eval)
3846 {
3847 struct net_t *np;
3848 int32 biti, bitj;
3849 word32 *wp;
3850 struct expr_t *idndp, *ndx;
3851 struct gref_t *grp;
3852
3853 switch ((byte) xp->optyp) {
3854 case GLBREF:
3855 idndp = xp;
3856 np = xp->lu.sy->el.enp;
3857 linkon_dce(np, -1, -1, dctp, eval, idndp->ru.grp);
3858 break;
3859 case ID:
3860 idndp = xp;
3861 np = xp->lu.sy->el.enp;
3862 linkon_dce(np, -1, -1, dctp, eval, (struct gref_t *) NULL);
3863 break;
3864 case NUMBER: case ISNUMBER: case REALNUM: case ISREALNUM: case OPEMPTY:
3865 break;
3866 case LSB:
3867 /* SJM - 07/02/00 - arrays also here if constant index makes range form */
3868 idndp = xp->lu.x;
3869 np = idndp->lu.sy->el.enp;
3870 ndx = xp->ru.x;
3871 biti = bitj = -1;
3872 if (ndx->optyp == NUMBER)
3873 {
3874 /* know if out of range or x/z - will be all x value */
3875 wp = &(__contab[ndx->ru.xvi]);
3876 if (wp[1] == 0L) biti = bitj = (int32) wp[0];
3877 }
3878 else if (ndx->optyp == ISNUMBER)
3879 {
3880 __isform_bi_xvi = ndx->ru.xvi;
3881 biti = -2;
3882 bitj = 0;
3883 }
3884 else
3885 {
3886 /* notice for monitor and dctrl event change, variable here is legal */
3887 /* and implies change for index and trigger on all bits of variable */
3888 bld_evxpr_dces(ndx, dctp, eval);
3889 }
3890 /* SJM - 07/03/00 - for arrays need index, i.e. value but not array */
3891 /* index scalared - works since no way to refer to both bit and index */
3892 if (biti != -1 && !np->n_isarr && !np->vec_scalared) biti = bitj = -1;
3893 grp = (idndp->optyp == GLBREF) ? idndp->ru.grp : NULL;
3894 linkon_dce(np, biti, bitj, dctp, eval, grp);
3895 break;
3896 case PARTSEL:
3897 idndp = xp->lu.x;
3898 np = idndp->lu.sy->el.enp;
3899 ndx = xp->ru.x;
3900 /* know part select never IS, will not get here if x/z or out of rng */
3901 biti = __contab[ndx->lu.x->ru.xvi];
3902 bitj = __contab[ndx->ru.x->ru.xvi];
3903 if (!np->vec_scalared) biti = bitj = -1;
3904 grp = (idndp->optyp == GLBREF) ? idndp->ru.grp : NULL;
3905 linkon_dce(np, biti, bitj, dctp, eval, grp);
3906 break;
3907 case FCALL:
3908 {
3909 register struct expr_t *fax;
3910
3911 /* if any args of system or user functions chg, monitor triggers */
3912 /* notice $time function do not have arguments */
3913 for (fax = xp->ru.x; fax != NULL; fax = fax->ru.x)
3914 bld_evxpr_dces(fax->lu.x, dctp, eval);
3915 }
3916 break;
3917 case LCB:
3918 {
3919 register struct expr_t *fax;
3920
3921 for (fax = xp->ru.x; fax != NULL; fax = fax->ru.x)
3922 bld_evxpr_dces(fax->lu.x, dctp, eval);
3923 }
3924 break;
3925 default:
3926 if (xp->lu.x != NULL) bld_evxpr_dces(xp->lu.x, dctp, eval);
3927 if (xp->ru.x != NULL) bld_evxpr_dces(xp->ru.x, dctp, eval);
3928 break;
3929 }
3930 }
3931
3932 /*
3933 * for dces: to move from expr. reference to target where var. stored
3934 * -- if xmr, xmr ref. to target by calling xmrpush refgrp to targ(grp)
3935 * [notice np is right but itree loc. wrong]
3936 * to move from target variable location back to expr. ref.
3937 * -- if xmr, xmr target to ref cal xmrpush targ to ref(xmrtyp, npu1)
3938 */
3939
3940 /*
3941 * link the delay event control element for wire np
3942 * this builds the dcelst on np that is never removed
3943 *
3944 * when called __inst_mod is module ref. in, (not declare in target)
3945 * and np is wire possibly xmr target but not itree context only mod here
3946 * if ref. expr is xmr, grp non nil
3947 */
linkon_dce(struct net_t * np,int32 biti,int32 bitj,struct delctrl_t * dctp,int32 e_val,struct gref_t * grp)3948 static void linkon_dce(struct net_t *np, int32 biti, int32 bitj,
3949 struct delctrl_t *dctp, int32 e_val, struct gref_t *grp)
3950 {
3951 struct dcevnt_t *dcep;
3952
3953 /* case 1: all xmr cases including xmr target */
3954 /* notice because of preprocessing never need 2 steps from target to ref */
3955 if (grp != NULL)
3956 {
3957 xmr_linkon_dce(np, biti, bitj, dctp, e_val, grp);
3958 return;
3959 }
3960 /* case 2: simple in module */
3961 dcep = linkon2_dce(np, biti, bitj, dctp, e_val, FALSE, __inst_mod,
3962 __inst_mod);
3963 dcep->dce_xmrtyp = XNP_LOC;
3964 if (dctp->dc_iact) init_iact_dce(dcep, dctp, NULL);
3965 }
3966
3967 /*
3968 * initialize interactive only dces
3969 *
3970 * expects inst mod to be set to module where net declared in
3971 * SJM 01/14/03 - LOOKATME - think there is reason need to pass grp
3972 *
3973 * SJM 05/04/05 - because putting var storage (np.nva) in .bss section
3974 * for cver-cc, this is only for interactive init using interpreter
3975 * after linking in .bss .so lib var values, now initializing by net in mod
3976 */
init_iact_dce(struct dcevnt_t * dcep,struct delctrl_t * dctp,struct gref_t * grp)3977 static void init_iact_dce(struct dcevnt_t *dcep, struct delctrl_t *dctp,
3978 struct gref_t *grp)
3979 {
3980 struct net_t *np;
3981 struct dceauxlst_t *dclp;
3982 struct mod_t *ref_mdp;
3983
3984 np = dcep->dce_np;
3985
3986 /* AIV 01/04/07 - init dce expr was using the wrong inst for */
3987 /* dce with more than one inst and was also skipping init for the */
3988 /* dce_expr for the one instance case */
3989 if (dcep->dce_expr != NULL) init_dce_exprval(dcep);
3990 else
3991 {
3992 if (dcep->prevval.wp != NULL)
3993 {
3994 ref_mdp = dcep_ref_mod(dcep);
3995 init_dce_prevval(dcep, ref_mdp);
3996 }
3997 }
3998
3999 if (dctp->dc_iact)
4000 {
4001 /* add to iact list for this statement - will be linked to hctrl */
4002 dclp = (struct dceauxlst_t *) __my_malloc(sizeof(struct dceauxlst_t));
4003 dclp->ldcep = dcep;
4004 dclp->dclnxt = __iact_dcehdr;
4005 __iact_dcehdr = dclp;
4006 /* SJM 05/03/03 - LOOKATME - think iact XMR event controls converted */
4007 /* to per inst. is that true? */
4008 dcep->iact_itp = __inst_ptr;
4009
4010 /* since no dce, no loads, and no dmpvars must always turn chg store on */
4011 if (!np->nchg_nd_chgstore)
4012 {
4013 /* this also turn regen of net's decl iops from dce if -O on */
4014 __dce_turn_chg_store_on(__inst_mod, dcep, TRUE);
4015 }
4016 /* SJM 04/14/04 - even if need chg store if dumpvars in future but that */
4017 /* can happen if need only has dce added from iact code and dumpvars */
4018 np->nchg_has_dces = TRUE;
4019 }
4020 }
4021
4022 /*
4023 * routine to handle all cases where dce expr. is xmr (global ref. expr)
4024 * ref. in module __inst_mod - these all always one instance forms
4025 * notice all rooted dce xmrs are 1inst forms also npps
4026 */
xmr_linkon_dce(struct net_t * np,int32 biti,int32 bitj,struct delctrl_t * dctp,int32 e_val,struct gref_t * grp)4027 static void xmr_linkon_dce(struct net_t *np, int32 biti, int32 bitj,
4028 struct delctrl_t *dctp, int32 e_val, struct gref_t *grp)
4029 {
4030 register int32 ii;
4031 struct dcevnt_t *dcep;
4032 struct itree_t *itp;
4033 struct mod_t *ref_mdp;
4034
4035 /* handle xmr */
4036 if (!grp->is_rooted)
4037 {
4038 /* SJM 05/04/03 - for non rooted where need prev. val, must set */
4039 /* module instance context for any instance such as inst 0 */
4040 /* (many instance and do not need to know here) */
4041 dcep = linkon2_dce(np, biti, bitj, dctp, e_val, FALSE, grp->gin_mdp,
4042 grp->targmdp);
4043
4044 if (grp->upwards_rel)
4045 { dcep->dceu.dcegrp = grp; dcep->dce_xmrtyp = XNP_UPXMR; }
4046 /* downward case */
4047 else { dcep->dceu.dcegrp = grp; dcep->dce_xmrtyp = XNP_DOWNXMR; }
4048 /* SJM 05/08/03 - now initialize only after complete dce built */
4049 if (dctp->dc_iact) init_iact_dce(dcep, dctp, grp);
4050 return;
4051 }
4052
4053 /* rooted xmr handled here */
4054 for (ii = 0; ii < __inst_mod->flatinum; ii++)
4055 {
4056 /* itp is itree loc. of xmr reference */
4057 itp = __inst_mod->moditps[ii];
4058 /* reference itree loc. */
4059 __push_itstk(itp);
4060 ref_mdp = __inst_ptr->itip->imsym->el.emdp;
4061 /* xmr target (declared in) */
4062 __xmrpush_refgrp_to_targ(grp);
4063 /* SJM 05/06/03 - BEWARE - TOS must be def (targ) and TOS-1 must be ref */
4064 dcep = linkon2_dce(np, biti, bitj, dctp, e_val, TRUE, ref_mdp, __inst_mod);
4065 /* if one inst form needs xmr target (where wire decled) itree loc */
4066 dcep->dce_1inst = TRUE;
4067 /* match is target itree loc */
4068 dcep->dce_matchitp = __inst_ptr;
4069 __pop_itstk();
4070 dcep->dce_refitp = __inst_ptr;
4071 /* SJM 05/07/03 - must set as rooted dce XMR */
4072 __pop_itstk();
4073 dcep->dce_xmrtyp = XNP_RTXMR;
4074
4075 /* SJM 05/08/03 - now initialize only after complete dce built */
4076 if (dctp->dc_iact) init_iact_dce(dcep, dctp, grp);
4077 }
4078 }
4079
4080 /*
4081 * actually link on event dce - other routines for monitor and dumpvars
4082 * created dce is returned
4083 *
4084 * assumes inst mod set to declared in module context
4085 *
4086 * if oninst, know the itree context of the one inst targed (declared in) set
4087 * this allocates any old value storage but does not initialize it
4088 *
4089 * SJM 05/07/03 - now since caller for XMR sets some dce fields must only
4090 * set fields here, can't call routines that use dce fields
4091 * this was cause of most of the XMR event control dce bugs
4092 */
linkon2_dce(struct net_t * np,int32 biti,int32 bitj,struct delctrl_t * dctp,int32 e_val,int32 oneinst,struct mod_t * ref_mdp,struct mod_t * decl_mdp)4093 static struct dcevnt_t *linkon2_dce(struct net_t *np, int32 biti, int32 bitj,
4094 struct delctrl_t *dctp, int32 e_val, int32 oneinst, struct mod_t *ref_mdp,
4095 struct mod_t *decl_mdp)
4096 {
4097 struct dcevnt_t *dcep;
4098 struct dceauxlst_t *dclp;
4099
4100 /* allocate, init, and fill the fields */
4101 dcep = __alloc_dcevnt(np);
4102
4103 /* if unused for non complicated edge expression will be nil */
4104 if (__cur_dce_expr != NULL)
4105 {
4106 dcep->dce_expr = __cur_dce_expr;
4107 /* DBG remove -- */
4108 if (dcep->dce_1inst) __misc_terr(__FILE__, __LINE__);
4109 /* --- */
4110
4111 /* notice for dce expr, old value only for each ref inst since */
4112 /* must eval expr in ref inst */
4113 if (__cur_dce_expr->mast_dcep == NULL)
4114 {
4115 __cur_dce_expr->mast_dcep = dcep;
4116 dcep->dce_expr->bp = (byte *) __my_malloc(ref_mdp->flatinum);
4117 }
4118 }
4119
4120 if (biti == -1) dcep->dce_typ = DCE_INST;
4121 else
4122 {
4123 dcep->dce_typ = DCE_RNG_INST;
4124 dcep->dci1 = biti;
4125 if (biti == -2)
4126 {
4127 /* for one inst. form must access -2 form to actual index */
4128 /* because know itree context pushed for one inst - from now on not IS */
4129 if (oneinst)
4130 {
4131 /* SJM 10/12/04 - because contab is realloc must be index */
4132 dcep->dci1 = dcep->dci2.i = __contab[__isform_bi_xvi + 2*__inum];
4133 }
4134 /* since correct for direction here will be right bits */
4135 /* has normal user error of connecting opposite direction bus problem */
4136 else dcep->dci2.xvi = __isform_bi_xvi;
4137 }
4138 else dcep->dci2.i = bitj;
4139 }
4140 /* 07/01/00 - just added a dce - also need change store */
4141 /* 07/24/00 - has dces only on if reg for immediate propagate/wakeup */
4142 /* but recording bits set after here */
4143 if (!dctp->dc_iact)
4144 {
4145 if (np->ntyp >= NONWIRE_ST) np->nchg_has_dces = TRUE;
4146 np->nchg_nd_chgstore = TRUE;
4147 }
4148
4149 /* link onto front of d ctrl list for np */
4150 dcep->dcenxt = np->dcelst;
4151 np->dcelst = dcep;
4152 dcep->st_dctrl = dctp;
4153 if (e_val != NOEDGE)
4154 {
4155 dcep->dce_edge = TRUE;
4156 dcep->dce_edgval = (word32) e_val;
4157 }
4158 /* if has edge needs per inst. old value table for last value */
4159 /* if oneinst know right itree loc. set */
4160 /* SJM 05/09/03 - if have 1 bit edge expr, previous value not needed */
4161 if (dcep->dce_expr == NULL)
4162 {
4163 if (oneinst)
4164 {
4165 /* notice this does need inst context */
4166 __alloc_1instdce_prevval(dcep);
4167 }
4168 else alloc_dce_prevval(dcep, decl_mdp);
4169 }
4170
4171 /* DBG remove ---
4172 if (__debug_flg)
4173 {
4174 if (dctp->dc_iact) strcpy(__xs, " (interactive)");
4175 else strcpy(__xs, "");
4176 if (oneinst) strcat(__xs, " (xmr)");
4177 __dbg_msg(
4178 ".. ref. mod %s decl. %s adding net %s delay ctrl type %d%s ([%d:%d])\n",
4179 ref_mdp->msym->synam, decl_mod->msym->synam, np->nsym->synam,
4180 dcep->dce_typ, __xs, dcep->dci1, dcep->dci2.i);
4181 }
4182 --- */
4183 if (dctp->dc_iact)
4184 {
4185 /* add to iact list for this statement - will be linked to hctrl */
4186 dclp = (struct dceauxlst_t *) __my_malloc(sizeof(struct dceauxlst_t));
4187 dclp->ldcep = dcep;
4188 dclp->dclnxt = __iact_dcehdr;
4189 __iact_dcehdr = dclp;
4190 /* for after sim start iact dctrl add, know the iact itree loc set */
4191 dcep->iact_itp = __inst_ptr;
4192 }
4193 return(dcep);
4194 }
4195
4196 /*
4197 * routine to turn chg store on when new dce added but previously chg store
4198 * off because no dces, and no loads, and not dumpvars
4199 *
4200 * SJM 02/08/03 - for -O this only regens any needed proc insns but
4201 * caller must regen the net with the new dces added - proc regen
4202 * is only for case where net was not compiled in proc code
4203 * case where
4204 */
__dce_turn_chg_store_on(struct mod_t * in_mdp,struct dcevnt_t * dcep,int32 all_insts)4205 extern void __dce_turn_chg_store_on(struct mod_t *in_mdp,
4206 struct dcevnt_t *dcep, int32 all_insts)
4207 {
4208 register int32 ii;
4209 struct mod_t *mdp;
4210 struct net_t *np;
4211
4212 if (dcep->dce_1inst)
4213 {
4214 mdp = dcep->dce_matchitp->itip->imsym->el.emdp;
4215 /* DBG remove -- */
4216 if (in_mdp != mdp) __misc_terr(__FILE__, __LINE__);
4217 /* --- */
4218 }
4219 else if (dcep->dce_xmrtyp != XNP_LOC) mdp = dcep->dceu.dcegrp->targmdp;
4220 else mdp = in_mdp;
4221 np = dcep->dce_np;
4222
4223 /* SJM 01/06/03 - fix interpreter bug since need chg store on if */
4224 /* net had no dces, no nlds, and dumpvars was off */
4225 np->nchg_nd_chgstore = TRUE;
4226
4227 /* SJM 04/14/04 - for iact added dce if dumpvars and unc. need this on */
4228 np->nchg_has_dces = TRUE;
4229
4230 if (all_insts)
4231 {
4232 /* SJM 01/06/03 - LOOKATME - is is possible to only turn on this inst? */
4233 for (ii = 0; ii < __inst_mod->flatinum; ii++)
4234 { np->nchgaction[ii] &= ~(NCHG_ALL_CHGED); }
4235 }
4236 else np->nchgaction[__inst_ptr->itinum] &= ~(NCHG_ALL_CHGED);
4237 }
4238
4239 /*
4240 * allocate a delay control event record
4241 *
4242 * this assumes non XMR and non 1inst - if not caller must set
4243 * SJM 05/07/03 - must set net at beginning since needed by following code
4244 */
__alloc_dcevnt(struct net_t * np)4245 extern struct dcevnt_t *__alloc_dcevnt(struct net_t *np)
4246 {
4247 struct dcevnt_t *dcep;
4248
4249 dcep = (struct dcevnt_t *) __my_malloc(sizeof(struct dcevnt_t));
4250 dcep->dce_np = NULL;
4251
4252 dcep->dce_typ = DCE_NONE;
4253 dcep->dce_np = np;
4254
4255 dcep->dce_xmrtyp = XNP_LOC;
4256 dcep->dce_1inst = FALSE;
4257 dcep->dce_tfunc = FALSE;
4258 /* SJM 07/22/00 - for re-entrant problem and vpi control off - never off */
4259 /* unless set by vpi (sim) control or call back entry */
4260 /* SJM 06/13/02 - now also interpreter always filters dce for on/off */
4261 /* because (f)monitor and qcaf now added during design load (prep) */
4262 /* for these builder must explicitlyturn off */
4263 dcep->dce_off = FALSE;
4264 dcep->is_fmon = FALSE;
4265 dcep->dce_nomonstren = TRUE;
4266 dcep->dci1 = -1;
4267 dcep->dci2.i = -1;
4268 dcep->prevval.wp = NULL;
4269 dcep->dce_edge = FALSE;
4270 dcep->dce_edgval = NOEDGE;
4271 dcep->st_dctrl = NULL;
4272 dcep->dceu.dcegrp = NULL;
4273 dcep->dceu2.dce_fmon = NULL;
4274 dcep->dce_matchitp = NULL;
4275 dcep->dce_refitp = NULL;
4276 dcep->iact_itp = NULL;
4277 dcep->dcenxt = NULL;
4278 dcep->dce_expr = NULL;
4279 return(dcep);
4280 }
4281
4282 /*
4283 * prepare a disable inside a function by setting to next statement to goto
4284 * inside function disable are like c continue and are just gotos
4285 */
prep_func_dsable(struct st_t * stp)4286 static void prep_func_dsable(struct st_t *stp)
4287 {
4288 register int32 i;
4289 struct expr_t *dsxp;
4290 struct sy_t *syp;
4291 struct task_t *dsatskp;
4292
4293 dsxp = stp->st.sdsable.dsablx;
4294 syp = dsxp->lu.sy;
4295 /* disable of func. indicated by nil next statmenet - use fcall stack */
4296 if (syp->sytyp == SYM_F)
4297 {
4298 stp->st.sdsable.func_nxtstp = NULL;
4299 return;
4300 }
4301
4302 /* must be disabling named block */
4303 if (syp->sytyp != SYM_LB || syp->el.etskp->tsktyp == FORK
4304 || dsxp->optyp == GLBREF) __misc_sgfterr(__FILE__, __LINE__);
4305 dsatskp = syp->el.etskp;
4306 /* know every named block when entered in function name block is stacked */
4307 for (i = __nbsti; i >= 0; i--)
4308 {
4309 if (__nbstk[i]->st.snbtsk == dsatskp)
4310 {
4311 /* this can be nil */
4312 stp->st.sdsable.func_nxtstp = __nbstk[i]->stnxt;
4313 return;
4314 }
4315 }
4316 /* know always enclosing, or will not get here - earlier error */
4317 __case_terr(__FILE__, __LINE__);
4318 }
4319
4320 /*
4321 * return T if disable targsyp above cursytp
4322 * i.e. is upward break type disable
4323 * if any named block on path sets nbonpath to T
4324 *
4325 * because of xmr disabling of named begin-end blocks need thread so cannot
4326 * use goto except inside function
4327 */
__is_upward_dsable_syp(struct sy_t * targsyp,struct symtab_t * cursytp,int32 * nbonpath)4328 extern int32 __is_upward_dsable_syp(struct sy_t *targsyp,
4329 struct symtab_t *cursytp, int32 *nbonpath)
4330 {
4331 register struct symtab_t *sytp;
4332 struct sy_t *syp;
4333
4334 *nbonpath = FALSE;
4335 if (targsyp->sytyp != SYM_LB) return(FALSE);
4336 /* notice top of upward chain will be module, but disable of module */
4337 /* illegal so will return match top of chain module */
4338 for (sytp = cursytp; sytp != NULL; sytp = sytp->sytpar)
4339 {
4340 syp = sytp->sypofsyt;
4341 if (targsyp == syp) return(TRUE);
4342 if (syp->sytyp == SYM_LB) *nbonpath = TRUE;
4343 }
4344 return(FALSE);
4345 }
4346
4347 /*
4348 * FORCE/ASSIGN/MONITOR PREPARATION ADD DCE ROUTINES
4349 */
4350
4351 /*
4352 * for qcaf stmts, build the per lhs cat element
4353 * for reg per cat component dce lists are per inst and for wire
4354 * they are per bit of cat expr per inst
4355 */
bld_init_qcaf_dce_lstlst(struct st_t * stp)4356 static void bld_init_qcaf_dce_lstlst(struct st_t *stp)
4357 {
4358 register int32 ii;
4359 int32 ibase;
4360 struct expr_t *lhsx;
4361 struct expr_t *catndp, *catlhsx;
4362 struct dceauxlstlst_t *dcllp, *end_dcllp;
4363
4364 lhsx = stp->st.sqca->qclhsx;
4365 if (lhsx->optyp != LCB)
4366 {
4367 /* one list of peri lists */
4368 dcllp = (struct dceauxlstlst_t *)
4369 __my_malloc(sizeof(struct dceauxlstlst_t));
4370 stp->st.sqca->rhs_qcdlstlst = dcllp;
4371 if (stp->st.sqca->regform)
4372 {
4373 /* for reg list field is peri table of dce lists - starting at empty */
4374 dcllp->dcelsttab = (struct dceauxlst_t **)
4375 __my_malloc(__inst_mod->flatinum*sizeof(struct dceauxlst_t *));
4376
4377 for (ii = 0; ii < __inst_mod->flatinum; ii++)
4378 dcllp->dcelsttab[ii] = NULL;
4379 }
4380 else
4381 {
4382 /* for wire list field is peri/bit tab of dce lists - starts at empty */
4383 ibase = __inst_mod->flatinum*lhsx->szu.xclen;
4384 dcllp->dcelsttab = (struct dceauxlst_t **)
4385 __my_malloc(ibase*sizeof(struct dceauxlst_t *));
4386 for (ii = 0; ii < ibase; ii++) dcllp->dcelsttab[ii] = NULL;
4387 }
4388 /* since non concat, only one element */
4389 dcllp->dcelstlstnxt = NULL;
4390 }
4391 else
4392 {
4393 end_dcllp = NULL;
4394 for (catndp = lhsx->ru.x; catndp != NULL; catndp = catndp->ru.x)
4395 {
4396 catlhsx = catndp->lu.x;
4397
4398 /* allocate list of peri lists element */
4399 dcllp = (struct dceauxlstlst_t *)
4400 __my_malloc(sizeof(struct dceauxlstlst_t));
4401 if (end_dcllp == NULL) stp->st.sqca->rhs_qcdlstlst = dcllp;
4402 else end_dcllp->dcelstlstnxt = dcllp;
4403 end_dcllp = dcllp;
4404
4405 if (stp->st.sqca->regform)
4406 {
4407 /* reg list field is peri table of dce lists - starting at empty */
4408 dcllp->dcelsttab = (struct dceauxlst_t **)
4409 __my_malloc(__inst_mod->flatinum*sizeof(struct dceauxlst_t *));
4410 for (ii = 0; ii < __inst_mod->flatinum; ii++)
4411 dcllp->dcelsttab[ii] = NULL;
4412 }
4413 else
4414 {
4415 ibase = __inst_mod->flatinum*catlhsx->szu.xclen;
4416 /* if not list end, next pass will set */
4417 dcllp->dcelsttab = (struct dceauxlst_t **)
4418 __my_malloc(ibase*sizeof(struct dceauxlst_t *));
4419 for (ii = 0; ii < ibase; ii++) dcllp->dcelsttab[ii] = NULL;
4420 }
4421 dcllp->dcelstlstnxt = NULL;
4422 }
4423 }
4424 }
4425
4426 /*
4427 * prepare a quasi-continuous assign or force of reg (same as qc assign)
4428 * this is for both reg force and reg assign
4429 *
4430 * SJM 06/14/02 - same as old exec qc assign for each inst but at prep time
4431 * to build and fill d.s
4432 */
prep_qc_assign(struct st_t * stp,int32 is_force)4433 static void prep_qc_assign(struct st_t *stp, int32 is_force)
4434 {
4435 register struct expr_t *catndp;
4436 register struct dceauxlstlst_t *dcllp;
4437 struct expr_t *lhsx, *catlhsx;
4438 struct dceauxlst_t *qcdep;
4439
4440 /* first evaluate rhs */
4441 lhsx = stp->st.sqca->qclhsx;
4442 /* only possibilities are concat and ID */
4443 /* this builds and initializes the reg assign/frc qcval records */
4444 if (lhsx->optyp != LCB)
4445 {
4446 if (is_force) qcdep = prep_noncat_qc_regforce(stp, lhsx);
4447 else qcdep = prep_noncat_qc_assign(stp, lhsx);
4448 /* only one list of lists element since not lhs concat */
4449 stp->st.sqca->rhs_qcdlstlst->dcelsttab[__inum] = qcdep;
4450 }
4451 else
4452 {
4453 /* concatenate case know lhs entire var */
4454 dcllp = stp->st.sqca->rhs_qcdlstlst;
4455 for (catndp = lhsx->ru.x; catndp != NULL; catndp = catndp->ru.x,
4456 dcllp = dcllp->dcelstlstnxt)
4457 {
4458 catlhsx = catndp->lu.x;
4459 if (is_force) qcdep = prep_noncat_qc_regforce(stp, catlhsx);
4460 else qcdep = prep_noncat_qc_assign(stp, catlhsx);
4461 dcllp->dcelsttab[__inum] = qcdep;
4462 }
4463 }
4464 }
4465
4466 /*
4467 * prepare a quasi-continuous deassign (same as old exec but at per/inst now)
4468 * inverse of assign and reg only defined for regs
4469 */
prep_qc_deassign(struct st_t * stp)4470 static void prep_qc_deassign(struct st_t *stp)
4471 {
4472 register struct expr_t *catndp;
4473 int32 nd_itpop;
4474 struct expr_t *lhsx, *catlhsx;
4475 struct net_t *np;
4476 struct gref_t *grp;
4477
4478 /* SJM 07/19/02 - was wrongly accessing qconta not qcontdea record */
4479 lhsx = stp->st.sqcdea.qcdalhs;
4480 /* only possibilities are concat and ID */
4481 if (lhsx->optyp != LCB)
4482 {
4483 /* just need to alloc qcval records here - dce list built from assgn */
4484 np = lhsx->lu.sy->el.enp;
4485 if (np->nu2.qcval == NULL)
4486 {
4487 /* SJM 05/23/03 - must alloc in context of XMR */
4488 if (lhsx->optyp == GLBREF)
4489 { grp = lhsx->ru.grp; __xmrpush_refgrp_to_targ(grp); nd_itpop = TRUE; }
4490 else nd_itpop = FALSE;
4491
4492 __alloc_qcval(np);
4493
4494 if (nd_itpop) __pop_itstk();
4495 }
4496 }
4497 else
4498 {
4499 /* concatenate case know lhs full wire - tricky extractions of rhs */
4500 for (catndp = lhsx->ru.x; catndp != NULL; catndp = catndp->ru.x)
4501 {
4502 catlhsx = catndp->lu.x;
4503 np = catlhsx->lu.sy->el.enp;
4504 if (np->nu2.qcval == NULL)
4505 {
4506 /* SJM 05/23/03 - must alloc in context of XMR */
4507 if (lhsx->optyp == GLBREF)
4508 {
4509 grp = lhsx->ru.grp;
4510 __xmrpush_refgrp_to_targ(grp);
4511 nd_itpop = TRUE;
4512 }
4513 else nd_itpop = FALSE;
4514
4515 __alloc_qcval(np);
4516
4517 if (nd_itpop) __pop_itstk();
4518 }
4519 }
4520 }
4521 }
4522
4523 /*
4524 * prep quasi continuous assign for one expr in one inst.
4525 *
4526 * know lhs always entire register - no assign for wires - lhs can be xmr
4527 * this is called for every inst of module tha contains stmt
4528 * LOOKATME - do not need stmt since can get from qcval already built
4529 */
prep_noncat_qc_assign(struct st_t * qcstp,struct expr_t * lhsx)4530 static struct dceauxlst_t *prep_noncat_qc_assign(struct st_t *qcstp,
4531 struct expr_t *lhsx)
4532 {
4533 int32 nd_itpop;
4534 struct net_t *np;
4535 struct qcval_t *assgn_qcp;
4536 struct gref_t *grp;
4537
4538 /* SJM 05/29/03 - must allocate and find qcval rec from lhs itree loc */
4539 nd_itpop = FALSE;
4540 if (lhsx->optyp == GLBREF)
4541 {
4542 grp = lhsx->ru.grp;
4543 __xmrpush_refgrp_to_targ(grp);
4544 nd_itpop = TRUE;
4545 }
4546 else if (lhsx->optyp != ID) __case_terr(__FILE__, __LINE__);
4547
4548 np = lhsx->lu.sy->el.enp;
4549 if (np->nu2.qcval == NULL) __alloc_qcval(np);
4550 assgn_qcp = &(np->nu2.qcval[2*__inum + 1]);
4551
4552 /* add qcaf from soruce location of statment */
4553 if (nd_itpop) __pop_itstk();
4554
4555 /* SJM 05/29/03 - must build the qcaf dce in reference not declare context */
4556 __qcaf_dcehdr = NULL;
4557 /* for constant rhs this can be nil */
4558 bld_qcaf_dces(qcstp->st.sqca->qcrhsx, assgn_qcp);
4559
4560 return(__qcaf_dcehdr);
4561 }
4562
4563 /*
4564 * prepare the quasi continuous force for reg variables - just alloc qcval rec
4565 *
4566 * know lhs always entire register
4567 * lhs here can be xmr
4568 * force of entire reg only overrides possible active reg assign
4569 */
prep_noncat_qc_regforce(struct st_t * qcastp,struct expr_t * lhsx)4570 static struct dceauxlst_t *prep_noncat_qc_regforce(struct st_t *qcastp,
4571 struct expr_t *lhsx)
4572 {
4573 int32 nd_itpop;
4574 struct net_t *np;
4575 struct qcval_t *frc_qcp;
4576 struct gref_t *grp;
4577
4578 nd_itpop = FALSE;
4579 /* 05/28/03 - must get fource qc record ptr from lhs decl itree loc */
4580 if (lhsx->optyp == GLBREF)
4581 {
4582 grp = lhsx->ru.grp;
4583 __xmrpush_refgrp_to_targ(grp);
4584 nd_itpop = TRUE;
4585 }
4586 else if (lhsx->optyp != ID) __case_terr(__FILE__, __LINE__);
4587
4588 np = lhsx->lu.sy->el.enp;
4589 /* DBG remove -- */
4590 if (!np->frc_assgn_allocated) __misc_terr(__FILE__, __LINE__);
4591 /* --- */
4592 if (np->nu2.qcval == NULL) __alloc_qcval(np);
4593 frc_qcp = &(np->nu2.qcval[2*__inum]);
4594
4595 /* 05/28/03 - but must bld qcaf dces in ref itree context */
4596 if (nd_itpop) __pop_itstk();
4597
4598 __qcaf_dcehdr = NULL;
4599 /* for constant rhs thsi can be nil */
4600 bld_qcaf_dces(qcastp->st.sqca->qcrhsx, frc_qcp);
4601
4602 return(__qcaf_dcehdr);
4603 }
4604
4605 /*
4606 * allocate a new qc assign value aux. record
4607 * itree location must be set before calling here since needs to know mod in
4608 *
4609 * SJM 12/21/02 - this is per inst of mod net declared in when called as XMR
4610 */
__alloc_qcval(struct net_t * np)4611 extern void __alloc_qcval(struct net_t *np)
4612 {
4613 register int32 i;
4614 register struct qcval_t *qcvalp;
4615
4616 /* AIV 03/09/05 - if force from vpi bit needs to be set */
4617 np->frc_assgn_allocated = TRUE;
4618 if (np->ntyp >= NONWIRE_ST)
4619 {
4620 /* here need 1 qcval per inst. but need 1 for assign and 1 for force */
4621 np->nu2.qcval = (struct qcval_t *)
4622 __my_malloc(2*__inst_mod->flatinum*sizeof(struct qcval_t));
4623
4624 qcvalp = np->nu2.qcval;
4625 for (i = 0; i < 2*__inst_mod->flatinum; i++, qcvalp++) init_qcval(qcvalp);
4626 }
4627 else
4628 {
4629 /* here need 1 per inst bit product */
4630 /* LOOKATME - could have 1 bit per inst. for vectored wires */
4631 np->nu2.qcval = (struct qcval_t *)
4632 __my_malloc(__inst_mod->flatinum*np->nwid*sizeof(struct qcval_t));
4633 qcvalp = np->nu2.qcval;
4634 for (i = 0; i < __inst_mod->flatinum*np->nwid; i++, qcvalp++)
4635 init_qcval(qcvalp);
4636 }
4637 }
4638
4639 /*
4640 * allocate a qcval record
4641 */
init_qcval(struct qcval_t * qcvalp)4642 static void init_qcval(struct qcval_t *qcvalp)
4643 {
4644 qcvalp->qc_active = FALSE;
4645 qcvalp->qc_overridden = FALSE;
4646 qcvalp->qcstp = NULL;
4647 qcvalp->qcrhsbi = -1;
4648 qcvalp->qclhsbi = -1;
4649 qcvalp->lhsitp = NULL;
4650 qcvalp->qcdcep = NULL;
4651 }
4652
4653 /*
4654 * QUASI CONTINUOUS WIRE FORCE/RELEASE PREP ROUTINES
4655 */
4656
4657 /*
4658 * prepare quasi-continuous force on a wire
4659 * possibilities here are wire, constant bit select, part select
4660 * also concat of above
4661 * wire must be scalared and everything decomposed to bits
4662 */
prep_qc_wireforce(struct st_t * stp)4663 static void prep_qc_wireforce(struct st_t *stp)
4664 {
4665 register struct expr_t *catndp;
4666 register struct dceauxlstlst_t *dcllp;
4667 struct expr_t *lhsx, *catlhsx;
4668
4669 /* first evaluate rhs */
4670 lhsx = stp->st.sqca->qclhsx;
4671 /* only possibilities are concat and ID */
4672 /* this builds and initializes the reg assign/frc qcval records */
4673 if (lhsx->optyp != LCB)
4674 {
4675 prep_noncat_qc_wireforce(stp, lhsx, stp->st.sqca->rhs_qcdlstlst);
4676 }
4677 else
4678 {
4679 /* concatenate case know lhs full wire - tricky extractions of rhs */
4680 dcllp = stp->st.sqca->rhs_qcdlstlst;
4681 for (catndp = lhsx->ru.x; catndp != NULL; catndp = catndp->ru.x,
4682 dcllp = dcllp->dcelstlstnxt)
4683 {
4684 catlhsx = catndp->lu.x;
4685 prep_noncat_qc_wireforce(stp, catlhsx, dcllp);
4686 }
4687 }
4688 }
4689
4690 /*
4691 * prepare a quasi-continuous release - only decomposes into lhs exprs
4692 * only scalared wires or selects or cats not regs
4693 * wire force/release is one level only
4694 * called in itree context of release stmt
4695 *
4696 * 06/24/02 - wire release just alloc qcvals for all nets so it must
4697 * decompose into cat elements to get net - for wire all bits need
4698 * qcval even if only some can be forced/released
4699 */
prep_qc_wirerelease(struct st_t * stp)4700 static void prep_qc_wirerelease(struct st_t *stp)
4701 {
4702 register struct expr_t *catndp;
4703 struct expr_t *lhsx, *catlhsx;
4704
4705 lhsx = stp->st.sqcdea.qcdalhs;
4706 /* only possibilities are concat and ID */
4707 if (lhsx->optyp != LCB) prep_noncat_qc_wirerelease(lhsx);
4708 else
4709 {
4710 /* concatenate case know lhs full wire - tricky extractions of rhs */
4711 for (catndp = lhsx->ru.x; catndp != NULL; catndp = catndp->ru.x)
4712 { catlhsx = catndp->lu.x; prep_noncat_qc_wirerelease(catlhsx); }
4713 }
4714 }
4715 /*
4716 * after possible concat unwinding, prepare wire force
4717 *
4718 * wire force is bit by bit unless vectored wire (when only entire wire)
4719 *
4720 * force which is for debugging overrides any wire delay assign
4721 * when wire change happens (wire event process) if force active, no assign
4722 * rhsbi is low bit of possible rhs section select (0 for not concat)
4723 * this is called with stmt itree loc even if lhs xmr and handled push/pop
4724 *
4725 * SJM 11/14/00 - tran channel (inout port) force now is separate routine
4726 * LOOKATME - think could simplify since for wire force always one bit
4727 *
4728 * sjm 12/21/02 - attached to statement so dcllp element must use stmt
4729 * itree inst loc even for XMR lhs expr
4730 */
prep_noncat_qc_wireforce(struct st_t * qcfstp,struct expr_t * lhsx,struct dceauxlstlst_t * dcllp)4731 static void prep_noncat_qc_wireforce(struct st_t *qcfstp, struct expr_t *lhsx,
4732 struct dceauxlstlst_t *dcllp)
4733 {
4734 register int32 bi, xbi, ibase;
4735 int32 biti, bitj;
4736 struct qcval_t *frc_qcp;
4737 struct net_t *np;
4738 struct itree_t *itp;
4739
4740 /* step 1: get the wire range */
4741 /* for psel or vector, range is biti down to bitj - for scalar 0,0 */
4742 /* this computes any xmr new itp but does not push it - nil if not XMR */
4743 __get_qc_wirrng(lhsx, &np, &biti, &bitj, &itp);
4744
4745 /* for tran channel wire, no dces - forcing wire forces channel */
4746 /* LOOKATME - think no need also no need for qcvals */
4747
4748 /* SJM 07/19/02 - for lhs xmr need target itree loc */
4749 /* rest needs possible lhs xmr itree context including qcval alloc */
4750 if (itp != NULL) __push_itstk(itp);
4751
4752 /* allocate the qcval record for wire if not yet alloced */
4753 /* also needed for tran channel force */
4754 /* for XMR attached net per inst values are from pushed defined in mod */
4755 if (np->nu2.qcval == NULL) __alloc_qcval(np);
4756
4757 /* 05/28/03 - need to get per bit frc qc record from lhs decl itree cntxt */
4758 ibase = __inum*np->nwid;
4759 if (itp != NULL) __pop_itstk();
4760
4761 if (np->ntraux != NULL) return;
4762
4763 for (bi = bitj, xbi = 0; bi <= biti; bi++, xbi++)
4764 {
4765 /* for normal add dces for every bit */
4766 frc_qcp = &(np->nu2.qcval[ibase + bi]);
4767
4768 __qcaf_dcehdr = NULL;
4769 /* for constant rhs thsi can be nil */
4770 /* LOOKATME-could decompose lhs-rhs bit but rare and speed non-critical */
4771 bld_qcaf_dces(qcfstp->st.sqca->qcrhsx, frc_qcp);
4772
4773 /* SJM 12/22/02 - per inst here is stmt when lhs XMR */
4774 dcllp->dcelsttab[__inum*lhsx->szu.xclen + xbi] = __qcaf_dcehdr;
4775 }
4776 }
4777
4778 /*
4779 * after possible concat unwinding, prepare lhs expr release
4780 * just allocs qcval if needed in case seen before force
4781 */
prep_noncat_qc_wirerelease(struct expr_t * lhsx)4782 static void prep_noncat_qc_wirerelease(struct expr_t *lhsx)
4783 {
4784 int32 biti, bitj;
4785 struct net_t *np;
4786 struct itree_t *itp;
4787
4788 /* get the wire range - using common routine but only need net here */
4789 __get_qc_wirrng(lhsx, &np, &biti, &bitj, &itp);
4790
4791 /* SJM 07/19/02 - for lhs xmr need target itree loc */
4792 /* rest needs possible lhs xmr itree context including qcval alloc */
4793 if (itp != NULL) __push_itstk(itp);
4794
4795 /* allocate the qcval record for wire in case see release before force */
4796 if (np->nu2.qcval == NULL) __alloc_qcval(np);
4797
4798 if (itp != NULL) __pop_itstk();
4799 }
4800
4801 /*
4802 * ROUTINES TO SETUP QUASI-CONTINOUS ASSIGN STORE AND DCE LIST
4803 */
4804
4805 /*
4806 * build and link on special qc assign/force rhs simple dce
4807 * xp is rhs expr and called from itree loc. where exec qc assign
4808 */
bld_qcaf_dces(struct expr_t * xp,struct qcval_t * qcvalp)4809 static void bld_qcaf_dces(struct expr_t *xp, struct qcval_t *qcvalp)
4810 {
4811 register word32 *wp;
4812 struct net_t *np;
4813 int32 biti, bitj;
4814 struct expr_t *idndp, *ndx;
4815 struct expr_t *fax;
4816
4817 switch ((byte) xp->optyp) {
4818 case GLBREF:
4819 idndp = xp;
4820 /* for global - do not need ref. point - just link on 1 (because only 1 */
4821 /* monit call from 1 inst.) target wire */
4822 biti = bitj = -1;
4823 glb_dce:
4824 np = idndp->lu.sy->el.enp;
4825 linkon_qcaf_dce(np, biti, bitj, idndp->ru.grp, qcvalp);
4826 break;
4827 case ID:
4828 idndp = xp;
4829 np = xp->lu.sy->el.enp;
4830 linkon_qcaf_dce(np, -1, -1, (struct gref_t *) NULL, qcvalp);
4831 break;
4832 /* SJM 05/18/00 - must do nothing for reals */
4833 case NUMBER: case ISNUMBER: case REALNUM: case ISREALNUM: case OPEMPTY:
4834 return;
4835 case LSB:
4836 idndp = xp->lu.x;
4837 np = idndp->lu.sy->el.enp;
4838 ndx = xp->ru.x;
4839 /* for monits, any reg or non scalaraed wire must trigger on any chg */
4840 if (ndx->optyp == NUMBER)
4841 {
4842 wp = &(__contab[ndx->ru.xvi]);
4843 if (wp[1] != 0L) biti = -1; else biti = (int32) wp[0];
4844 }
4845 else if (ndx->optyp == ISNUMBER)
4846 {
4847 wp = &(__contab[ndx->ru.xvi]);
4848 wp = &(wp[2*__inum]);
4849 /* need length for IS number because can be wider - but get low */
4850 if (wp[1] != 0L) biti = -1; else biti = (int32) wp[0];
4851 }
4852 else
4853 {
4854 /* notice for monitor and dctrl event change, variable here is legal */
4855 /* and implies change for index and trigger on all bits of variable */
4856 bld_qcaf_dces(ndx, qcvalp);
4857 biti = -1;
4858 }
4859 bitj = biti;
4860 if (biti != -1 && !np->vec_scalared) biti = bitj = -1;
4861 if (idndp->optyp == GLBREF) goto glb_dce;
4862 linkon_qcaf_dce(np, biti, biti, (struct gref_t *) NULL, qcvalp);
4863 break;
4864 case PARTSEL:
4865 idndp = xp->lu.x;
4866 np = idndp->lu.sy->el.enp;
4867 ndx = xp->ru.x;
4868 /* know part select never IS */
4869 biti = __contab[ndx->lu.x->ru.xvi];
4870 bitj = __contab[ndx->ru.x->ru.xvi];
4871 if (!np->vec_scalared) biti = bitj = -1;
4872 if (idndp->optyp == GLBREF) goto glb_dce;
4873 linkon_qcaf_dce(np, biti, bitj, (struct gref_t *) NULL, qcvalp);
4874 break;
4875 case FCALL:
4876 /* if any arguments of system or user functions change, monitor triggers */
4877 /* notice $time function do not have arguments */
4878 for (fax = xp->ru.x; fax != NULL; fax = fax->ru.x)
4879 bld_qcaf_dces(fax->lu.x, qcvalp);
4880 break;
4881 case LCB:
4882 for (fax = xp->ru.x; fax != NULL; fax = fax->ru.x)
4883 bld_qcaf_dces(fax->lu.x, qcvalp);
4884 break;
4885 default:
4886 if (xp->lu.x != NULL) bld_qcaf_dces(xp->lu.x, qcvalp);
4887 if (xp->ru.x != NULL) bld_qcaf_dces(xp->ru.x, qcvalp);
4888 break;
4889 }
4890 }
4891
4892 /*
4893 * link on a special (simplified) qc assign/force dce rhs load
4894 * caller must decompose any scalared wire part selects into bit selects
4895 * before here
4896 * -2 IS form impossible since any one inst. IS form converted to constant
4897 * before here
4898 *
4899 * this must be called from source reference location of the qc stmt
4900 *
4901 * notice never a need for an old value since better to just re-eval assign
4902 * this goes on front but after any DMPV
4903 */
linkon_qcaf_dce(struct net_t * np,int32 biti,int32 bitj,struct gref_t * grp,struct qcval_t * qcvalp)4904 static void linkon_qcaf_dce(struct net_t *np, int32 biti, int32 bitj,
4905 struct gref_t *grp, struct qcval_t *qcvalp)
4906 {
4907 int32 nd_itpop;
4908 struct itree_t *ref_itp;
4909 struct dcevnt_t *dcep;
4910 struct dceauxlst_t *dclp;
4911
4912 ref_itp = __inst_ptr;
4913 nd_itpop = FALSE;
4914 if (grp != NULL) { __xmrpush_refgrp_to_targ(grp); nd_itpop = TRUE; }
4915 /* allocate, init, and fill the fields */
4916 dcep = __alloc_dcevnt(np);
4917 if (biti == -1) dcep->dce_typ = DCE_QCAF;
4918 else
4919 {
4920 dcep->dce_typ = DCE_RNG_QCAF;
4921 dcep->dci1 = biti;
4922 dcep->dci2.i = bitj;
4923 }
4924 dcep->dce_np = np;
4925 /* dce's start out on so must explicitly turn prep time built qc off here */
4926 dcep->dce_off = TRUE;
4927
4928 /* link this on front */
4929 dcep->dcenxt = np->dcelst;
4930 np->dcelst = dcep;
4931
4932 /* set ref. itree location - since dcep on target */
4933 dcep->dce_1inst = TRUE;
4934 dcep->dce_matchitp = __inst_ptr;
4935 dcep->dce_refitp = ref_itp;
4936 /* SJM 07/19/02 needed to make sure chg form iops get gened */
4937 np->nchg_nd_chgstore = TRUE;
4938
4939 /* also set unused fmon field to qcval for bit or wire if reg */
4940 dcep->dceu2.dce_qcvalp = qcvalp;
4941
4942 /* then link on undo/chg list */
4943 dclp = (struct dceauxlst_t *) __my_malloc(sizeof(struct dceauxlst_t));
4944 dclp->ldcep = dcep;
4945 dclp->dclnxt = __qcaf_dcehdr;
4946 __qcaf_dcehdr = dclp;
4947
4948 /* SJM 06/23/04 ### ??? - without regen is this needed? */
4949 /* SJM 02/06/03 - may have npps but not dces so must turn this on */
4950 /* since nchg nd chgstore on, know nchg action right */
4951 if (np->ntyp >= NONWIRE_ST) np->nchg_has_dces = TRUE;
4952
4953 if (nd_itpop) __pop_itstk();
4954 }
4955
4956 /*
4957 * XMR VARIABLE PREPARATION ROUTINES
4958 */
4959
4960 /*
4961 * prepare xmrs - allocate per inst. storage and set itree pointers
4962 *
4963 * no itree context here - must use explicit itree locs
4964 */
__prep_xmrs(void)4965 extern void __prep_xmrs(void)
4966 {
4967 register int32 gri;
4968 register struct mod_t *mdp;
4969 register struct gref_t *grp;
4970 int32 ii;
4971 struct itree_t *itp;
4972
4973 /* first process rooted and count upward rel. in static tree */
4974 __num_uprel_glbs = 0;
4975 for (mdp = __modhdr; mdp != NULL; mdp = mdp->mnxt)
4976 {
4977 for (gri = 0, grp = &(mdp->mgrtab[0]); gri < mdp->mgrnum; gri++, grp++)
4978 {
4979 /* DBG remove - if any gr_err should not get here */
4980 if (grp->gr_err) __misc_terr(__FILE__, __LINE__);
4981 if (grp->gr_gone) continue;
4982
4983 /* if guessed wrong xmr from used in instance argument loc., undo */
4984 /* if rooted set root target itp */
4985 if (grp->is_rooted)
4986 {
4987 /* find itree root corresponding to root path staring module name */
4988 /* never constant inst array select */
4989 if ((ii = __ip_indsrch(grp->grcmps[0]->synam)) == -1)
4990 __misc_gfterr(__FILE__, __LINE__, grp->grfnam_ind, grp->grflin_cnt);
4991 itp = __it_roots[ii];
4992
4993 if (grp->last_gri != 0)
4994 grp->targu.targitp = __find_unrt_targitp(grp, itp, 1);
4995 /* one component rooted, so already have itp */
4996 else grp->targu.targitp = itp;
4997
4998 /* must mark module as containing rooted gref */
4999 /* and first rooted gref - allocate flatinum style itree table */
5000 continue;
5001 }
5002 /* notice one uprel xmr has flatinum number in itree */
5003 if (grp->upwards_rel)
5004 {
5005 /* setting of inst. selects in upward rels, handled elsewhere */
5006 __num_uprel_glbs += mdp->flatinum;
5007 process_upwards_grp(grp);
5008 continue;
5009 }
5010 /* for downward relative with array of inst. selects must check ranges */
5011 if (grp->path_has_isel) chk_downrel_inst_sels(grp);
5012 }
5013 }
5014 }
5015
5016 /*
5017 * fill grp targ field for rooted grp and uprel itps tabl for upward rel
5018 * called for interactive and delay setting run time xmr eval only
5019 */
__fill_grp_targu_fld(struct gref_t * grp)5020 extern void __fill_grp_targu_fld(struct gref_t *grp)
5021 {
5022 int32 ii;
5023 struct itree_t *itp2;
5024
5025 if (grp->gr_err || grp->gr_gone) return;
5026
5027 /* if rooted set root target itp */
5028 if (grp->is_rooted)
5029 {
5030 /* find itree root corresponding to root path staring module name */
5031 if ((ii = __ip_indsrch(grp->grcmps[0]->synam)) == -1)
5032 __misc_gfterr(__FILE__, __LINE__, grp->grfnam_ind, grp->grflin_cnt);
5033 itp2 = __it_roots[ii];
5034 /* may be rooted and in top module */
5035 if (grp->last_gri != 0)
5036 grp->targu.targitp = __find_unrt_targitp(grp, itp2, 1);
5037 else grp->targu.targitp = itp2;
5038 return;
5039 }
5040 /* since never more than 1 inst. of interactive scope, up from current */
5041 /* scope - this implies must always reparse interactive statements */
5042 /* SJM 09/15/00 - this fills the per ref. indexed targ itps table */
5043 if (grp->upwards_rel) process_upwards_grp(grp);
5044 }
5045
5046 /*
5047 * process upward relative gref by building downward reverse of upward
5048 * path to first above module of matching type
5049 *
5050 * rule is go upward until find matching 0th component type, then down
5051 * this is needed because for inst. upward distance may be different
5052 * because inst array pound param range types copied before defparam
5053 * splitting, first upward rel. first component symbol will be right split off
5054 */
process_upwards_grp(struct gref_t * grp)5055 static void process_upwards_grp(struct gref_t *grp)
5056 {
5057 register int32 ii;
5058 struct mod_t *imdp, *up_mdp, *mast_imdp, *up_mast_mdp;
5059 struct itree_t *in_itp, *up_itp, *titp;
5060 struct inst_t *ip;
5061 struct sy_t *syp;
5062 struct itree_t **uprelitps;
5063
5064 /* notice for upward relative, first component is module type not inst */
5065 syp = grp->grcmps[0];
5066 /* DBG remove - upward relative xmr head not module type */
5067 if (syp->sytyp != SYM_M)
5068 __misc_gfterr(__FILE__, __LINE__, grp->grfnam_ind, grp->grflin_cnt);
5069 /* -- */
5070
5071 uprelitps = (struct itree_t **)
5072 __my_malloc(grp->gin_mdp->flatinum*sizeof(struct itree_t *));
5073 grp->targu.uprel_itps = uprelitps;
5074
5075 /* for upward distance is per inst. variable - up to find target mod */
5076 up_mdp = syp->el.emdp;
5077 up_mast_mdp = __get_mast_mdp(up_mdp);
5078 for (ii = 0; ii < grp->gin_mdp->flatinum; ii++)
5079 {
5080 in_itp = grp->gin_mdp->moditps[ii];
5081
5082 for (up_itp = in_itp;;)
5083 {
5084 if (up_itp == NULL)
5085 {
5086 __gferr(981, grp->grfnam_ind, grp->grflin_cnt,
5087 "unqualified use of upward relative task/func %s illegal - matched upward relative task/func undefined above instance %s",
5088 grp->gnam, __msg2_blditree(__xs, in_itp));
5089 return;
5090 }
5091
5092 /* upward xmr reference no matching type above */
5093 ip = up_itp->itip;
5094 imdp = ip->imsym->el.emdp;
5095 mast_imdp = __get_mast_mdp(imdp);
5096 /* DBG remove --
5097 __dbg_msg("upwards rel: %s: first comp=%s, current mdp: %s(%s)=%d\n",
5098 grp->gnam, up_mast_mdp->msym->synam, mast_imdp->msym->synam,
5099 ip->isym->synam, up_itp->itinum);
5100 -- */
5101 if (mast_imdp == up_mast_mdp) break;
5102 up_itp = up_itp->up_it;
5103 }
5104 /* next trace down from upward module type match (using it inst) */
5105 /* 0th is module that determines upward distance */
5106 if (grp->last_gri != 0) titp = __find_unrt_targitp(grp, up_itp, 1);
5107 /* one component upward relative module name */
5108 else titp = up_itp;
5109
5110 uprelitps[ii] = titp;
5111 }
5112 }
5113
5114 /*
5115 * given possible split off module type get the mast type
5116 *
5117 * pound params may be 1 level split off and defparams one more
5118 * i.e. all pound parameters split off from one master (maybe >1 inst)
5119 * then defparam split off from either normal or pound split off
5120 */
__get_mast_mdp(struct mod_t * mdp)5121 extern struct mod_t *__get_mast_mdp(struct mod_t *mdp)
5122 {
5123 struct mod_t *mast_mdp;
5124
5125 if (mdp->msplit)
5126 {
5127 mast_mdp = mdp->mspltmst;
5128 if (mast_mdp->mpndsplit) mast_mdp = mast_mdp->mpndspltmst;
5129 }
5130 else if (mdp->mpndsplit) mast_mdp = mdp->mpndspltmst;
5131 else mast_mdp = mdp;
5132 return(mast_mdp);
5133 }
5134
5135 /*
5136 * check all instances of downward relative instance selects
5137 *
5138 * only called for downward relative that have inst array selects
5139 */
chk_downrel_inst_sels(struct gref_t * grp)5140 static void chk_downrel_inst_sels(struct gref_t *grp)
5141 {
5142 register int32 ii;
5143 struct mod_t *mdp;
5144 struct itree_t *itp;
5145 int32 sav_ecnt;
5146
5147 sav_ecnt = __pv_err_cnt;
5148 mdp = grp->gin_mdp;
5149 for (ii = 0; ii < mdp->flatinum; ii++)
5150 {
5151 itp = mdp->moditps[ii];
5152 /* DBG remove --- */
5153 if (itp == NULL) __misc_terr(__FILE__, __LINE__);
5154 /* --- */
5155 /* this does checking for each and emits error */
5156 __find_unrt_targitp(grp, itp, 0);
5157 /* once error is found stop */
5158 if (__pv_err_cnt > sav_ecnt) break;
5159 }
5160 }
5161
5162 /*
5163 * SPECIFY SECTION PREPARATION ROUTINES
5164 */
5165
__prep_specify(void)5166 extern void __prep_specify(void)
5167 {
5168 prep_tchks();
5169 prep_pths();
5170 }
5171
5172 /*
5173 * TIMING CHECK REPRESENTATION CHANGE ROUTINES
5174 */
5175
5176 /*
5177 * routine to prepare timing checks
5178 * for vectors in timing check event slots must split into 1 bit wide
5179 * checks since each bit timing reference and data events separate
5180 *
5181 * notice timing checks cannot be xmrs (must be I/O ports) so no xmr
5182 * processing net pin elements
5183 */
prep_tchks(void)5184 static void prep_tchks(void)
5185 {
5186 register struct tchk_t *tcp;
5187 register int32 i1, i2;
5188 int32 starti1, starti2, chki1, chki2;
5189 struct mod_t *mdp;
5190 struct net_t *startnp, *chknp;
5191 struct tchg_t *start_tchgp;
5192 struct chktchg_t *chk_tchgp;
5193 struct gate_t gwrk;
5194
5195 for (mdp = __modhdr; mdp != NULL; mdp = mdp->mnxt)
5196 {
5197
5198 /* multiple blocks each with different symbol table legal */
5199 /* if option to ignore, specify will be remove before here */
5200 if (mdp->mspfy == NULL) continue;
5201
5202 __push_wrkitstk(mdp, 0);
5203 for (tcp = mdp->mspfy->tchks; tcp != NULL; tcp = tcp->tchknxt)
5204 {
5205 /* do not convert since always will not get initialized - invisible */
5206 if (tcp->tc_gone) continue;
5207
5208 /* convert ref. and data events to wire and range */
5209 /* know events is non concatente lhs (wire, const. bit/part select) */
5210 __xtract_wirng(tcp->startxp, &startnp, &starti1, &starti2);
5211 if (tcp->tchktyp == TCHK_PERIOD || tcp->tchktyp == TCHK_WIDTH)
5212 {
5213 chknp = startnp; chki1 = starti1; chki2 = starti2;
5214 /* need to copy to make timing check violation check code work */
5215 tcp->chkxp = __copy_expr(tcp->startxp);
5216 tcp->chkcondx = __copy_expr(tcp->startcondx);
5217 if (tcp->tchktyp == TCHK_WIDTH)
5218 {
5219 /* opposite edge */
5220 if (tcp->startedge == E_NEGEDGE) tcp->chkedge = E_POSEDGE;
5221 else if (tcp->startedge == E_POSEDGE) tcp->chkedge = E_NEGEDGE;
5222 else __case_terr(__FILE__, __LINE__);
5223 }
5224 /* same edge */
5225 else tcp->chkedge = tcp->startedge;
5226 }
5227 else __xtract_wirng(tcp->chkxp, &chknp, &chki1, &chki2);
5228
5229 switch ((byte) tcp->tchktyp) {
5230 /* SJM 01/16/04 - added $removal - reversed terms of $recovery */
5231 case TCHK_SETUP: case TCHK_HOLD: case TCHK_SKEW: case TCHK_RECOVERY:
5232 case TCHK_SETUPHOLD: case TCHK_REMOVAL: case TCHK_RECREM:
5233 /* timing check range all full - every bit time checked against */
5234 for (i1 = starti1; i1 >= starti2; i1--)
5235 {
5236 start_tchgp = bld_start_tchk_npp(tcp, startnp, i1);
5237 for (i2 = chki1; i2 >= chki1; i2--)
5238 {
5239 chk_tchgp = bld_check_tchk_npp(chknp, i2);
5240 /* check event needs access to start - also path to tchk master */
5241 chk_tchgp->startchgp = start_tchgp;
5242 }
5243 }
5244 break;
5245 case TCHK_WIDTH: case TCHK_PERIOD:
5246 /* after above fixup, like normal except ==> not *> form */
5247 /* maybe wrong and should be ? */
5248 for (i1 = starti1; i1 >= starti2; i1--)
5249 {
5250 /* for $period no reference event, data and reference the same */
5251 /* so this build ref. but no npp */
5252 start_tchgp = bld_start_tchk_npp(tcp, startnp, i1);
5253 chk_tchgp = bld_check_tchk_npp(chknp, i1);
5254 /* check event needs access to start - also path to tchk master */
5255 chk_tchgp->startchgp = start_tchgp;
5256 }
5257 break;
5258 default: __case_terr(__FILE__, __LINE__);
5259 }
5260 /* no delay preparation for added hold of setuphold */
5261 if (tcp->tc_supofsuphld || tcp->tc_recofrecrem) continue;
5262
5263 /* first set &&& conditional fields net still acessed from t event */
5264 /* width irrelevant know will always only be 1 delay */
5265 /* know every timing check has 1 limit field */
5266 __add_tchkdel_pnp(tcp, TRUE);
5267 __prep_delay(&gwrk, tcp->tclim_du.pdels, FALSE, FALSE,
5268 "first timing check limit", TRUE, tcp->tcsym, TRUE);
5269 if (__nd_neg_del_warn)
5270 {
5271 __gfwarn(601, tcp->tcsym->syfnam_ind, tcp->tcsym->sylin_cnt,
5272 "timing check negative delay changed to 0 (ok for timing verifier)");
5273 __nd_neg_del_warn = FALSE;
5274 }
5275 tcp->tc_delrep = gwrk.g_delrep;
5276 tcp->tclim_du = gwrk.g_du;
5277
5278 /* notice for setuphold (actually hold part) this always on */
5279 if (tcp->tc_haslim2)
5280 {
5281 /* width irrelevant know will always only be 1 delay */
5282 __add_tchkdel_pnp(tcp, FALSE);
5283 __prep_delay(&gwrk, tcp->tclim2_du.pdels, FALSE, FALSE,
5284 "2nd timing check limit", TRUE, tcp->tcsym, TRUE);
5285 if (__nd_neg_del_warn)
5286 {
5287 __gfwarn(601, tcp->tcsym->syfnam_ind, tcp->tcsym->sylin_cnt,
5288 "timing check negative delay changed to 0 (ok for timing verifier)");
5289 __nd_neg_del_warn = FALSE;
5290 }
5291 tcp->tc_delrep2 = gwrk.g_delrep;
5292 tcp->tclim2_du = gwrk.g_du;
5293 }
5294 }
5295 __pop_wrkitstk();
5296 }
5297 }
5298
5299 /*
5300 * extract wire and range
5301 * know ranges always constants
5302 */
__xtract_wirng(struct expr_t * xp,struct net_t ** np,int32 * i1,int32 * i2)5303 extern void __xtract_wirng(struct expr_t *xp, struct net_t **np,
5304 int32 *i1, int32 *i2)
5305 {
5306 word32 *wp;
5307
5308 switch ((byte) xp->optyp) {
5309 case ID:
5310 *np = xp->lu.sy->el.enp;
5311 *i1 = *i2 = -1;
5312 break;
5313 case LSB:
5314 *np = xp->lu.x->lu.sy->el.enp;
5315 wp = &(__contab[xp->ru.x->ru.xvi]);
5316 *i1 = *i2 = (int32) wp[0];
5317 break;
5318 case PARTSEL:
5319 *np = xp->lu.x->lu.sy->el.enp;
5320 *i1 = (int32) __contab[xp->ru.x->lu.x->ru.xvi];
5321 *i2 = (int32) __contab[xp->ru.x->ru.x->ru.xvi];
5322 break;
5323 default: __case_terr(__FILE__, __LINE__);
5324 }
5325 }
5326
5327 /*
5328 * build the start (earliest reference) event
5329 * for period need the start change (ref.) event but no npp
5330 */
bld_start_tchk_npp(struct tchk_t * tcp,struct net_t * startnp,int32 bi1)5331 static struct tchg_t *bld_start_tchk_npp(struct tchk_t *tcp,
5332 struct net_t *startnp, int32 bi1)
5333 {
5334 register int32 ii;
5335 word64 t;
5336 struct tchg_t *start_tchgp;
5337
5338 start_tchgp = (struct tchg_t *) __my_malloc(sizeof(struct tchg_t));
5339 start_tchgp->chgu.chgtcp = tcp;
5340 start_tchgp->oldval = bld_npp_oldval(startnp, __inst_mod);
5341 start_tchgp->lastchg = (word64 *)
5342 __my_malloc(__inst_mod->flatinum*sizeof(word64));
5343 /* FIXME - why are 2nd later change time and t chg rec unused here */
5344 t = 0ULL;
5345 for (ii = 0; ii < __inst_mod->flatinum; ii++) start_tchgp->lastchg[ii] = t;
5346
5347 /* for $period, no npp just placer holder set when data event occurs */
5348 if (tcp->tchktyp != TCHK_PERIOD)
5349 {
5350 __cur_npnum = 0;
5351 if (!startnp->n_isavec) bi1 = -1;
5352 __conn_npin(startnp, bi1, bi1, FALSE, NP_TCHG, (struct gref_t *) NULL,
5353 NPCHG_TCSTART, (char *) start_tchgp);
5354
5355 /* SJM - 04/04/02 need to turn on chg processing if only load */
5356 /* DBG remove -- */
5357 if (startnp->nlds == NULL) __misc_terr(__FILE__, __LINE__);
5358 /* --- */
5359 if (startnp->nlds->npnxt == NULL)
5360 {
5361 startnp->nchg_has_lds = TRUE;
5362 startnp->nchg_nd_chgstore = TRUE;
5363
5364 /* if also no dces, now when add tchk load, must turn off all chged */
5365 if (startnp->dcelst == NULL)
5366 {
5367 for (ii = 0; ii < __inst_mod->flatinum; ii++)
5368 { startnp->nchgaction[ii] &= ~(NCHG_ALL_CHGED); }
5369 }
5370 }
5371 }
5372 return(start_tchgp);
5373 }
5374
5375 /*
5376 * build the check net pin event (later data event)
5377 */
bld_check_tchk_npp(struct net_t * chknp,int32 bi1)5378 static struct chktchg_t *bld_check_tchk_npp(struct net_t *chknp, int32 bi1)
5379 {
5380 register int32 ii;
5381 word64 t;
5382 struct chktchg_t *chk_tchgp;
5383
5384 chk_tchgp = (struct chktchg_t *) __my_malloc(sizeof(struct chktchg_t));
5385 /* link to start and tchk master accessed through start but caller sets */
5386 /* tchk master accessed through union in start tim chg */
5387 /* FIXME - why are 2nd later change time and t chg rec unused here */
5388 chk_tchgp->chklastchg = (word64 *)
5389 __my_malloc(__inst_mod->flatinum*sizeof(word64));
5390 t = 0ULL;
5391 for (ii = 0; ii < __inst_mod->flatinum; ii++) chk_tchgp->chklastchg[ii] = t;
5392 chk_tchgp->chkoldval = bld_npp_oldval(chknp, __inst_mod);
5393 __cur_npnum = 0;
5394 if (!chknp->n_isavec) bi1 = -1;
5395 __conn_npin(chknp, bi1, bi1, FALSE, NP_TCHG, (struct gref_t *) NULL,
5396 NPCHG_TCCHK, (char *) chk_tchgp);
5397
5398 /* SJM - 04/04/02 need to turn on chg processing if only load */
5399 /* DBG remove -- */
5400 if (chknp->nlds == NULL) __misc_terr(__FILE__, __LINE__);
5401 /* --- */
5402 if (chknp->nlds->npnxt == NULL)
5403 {
5404 chknp->nchg_has_lds = TRUE;
5405 chknp->nchg_nd_chgstore = TRUE;
5406
5407 /* if also no dces, now when add tchk load, must turn off all chged */
5408 if (chknp->dcelst == NULL)
5409 {
5410 for (ii = 0; ii < __inst_mod->flatinum; ii++)
5411 { chknp->nchgaction[ii] &= ~(NCHG_ALL_CHGED); }
5412 }
5413 }
5414 return(chk_tchgp);
5415 }
5416
5417 /*
5418 * build net pin old value for detecting path source and tc event bit chgs
5419 */
bld_npp_oldval(struct net_t * np,struct mod_t * mdp)5420 static byte *bld_npp_oldval(struct net_t *np, struct mod_t *mdp)
5421 {
5422 int32 stval, ival, insts;
5423 byte sval, *bp;
5424
5425 insts = mdp->flatinum;
5426 if (np->n_stren)
5427 {
5428 bp = (byte *) __my_malloc(insts);
5429 __get_initval(np, &stval);
5430 sval = (byte) stval;
5431 set_byteval_(bp, insts, sval);
5432 return(bp);
5433 }
5434 bp = (byte *) __my_malloc(insts);
5435 ival = __get_initval(np, &stval);
5436 set_byteval_(bp, insts, ival);
5437 return(bp);
5438 }
5439
5440 /*
5441 * build net pin old value for detecting path source and tc event bit chgs
5442 */
reinit_npp_oldval(byte * bp,struct net_t * np,struct mod_t * mdp)5443 static void reinit_npp_oldval(byte *bp, struct net_t *np, struct mod_t *mdp)
5444 {
5445 int32 stval;
5446 int32 insts, ival;
5447 byte sval;
5448
5449 insts = mdp->flatinum;
5450 if (np->n_stren)
5451 {
5452 __get_initval(np, &stval);
5453 sval = (byte) stval;
5454 set_byteval_(bp, insts, sval);
5455 }
5456 else
5457 {
5458 ival = __get_initval(np, &stval);
5459 set_byteval_(bp, insts, ival);
5460 }
5461 }
5462
5463 /*
5464 * prepare path elements and delays
5465 * 1) convert path expr. lists to path elements
5466 * 2) convert delays - know delay expression is 1, 2, 3, or 6 numbers
5467 * here delay expr. must still be scaled
5468 * 3) check inout ports that are path srcs to be sure has input driver
5469 * pattern and path and path dest. has output driver pattern only
5470 *
5471 * LOOKATME - also somewhere back annotation can create IS forms ?
5472 */
prep_pths(void)5473 static void prep_pths(void)
5474 {
5475 register struct spcpth_t *pthp;
5476 int32 rv, mod_has_path;
5477 struct mod_t *mdp;
5478 struct gate_t gwrk;
5479
5480 for (mdp = __modhdr; mdp != NULL; mdp = mdp->mnxt)
5481 {
5482 if (mdp->mspfy == NULL) continue;
5483
5484 __push_wrkitstk(mdp, 0);
5485
5486 mod_has_path = FALSE;
5487 /* know each delay is NUMBER or REALNUM */
5488 for (pthp = __inst_mod->mspfy->spcpths; pthp != NULL;
5489 pthp = pthp->spcpthnxt)
5490 {
5491 /* do not convert since always will not get initialized - invisible */
5492 if (pthp->pth_gone) continue;
5493
5494 if (!chk_pthels(pthp)) { pthp->pth_gone = TRUE; continue; }
5495 if (!bldchk_pb_pthdsts(pthp)) { pthp->pth_gone = TRUE; continue; }
5496
5497 /* prepare the delay - notice this uses __inst_mod */
5498 __add_pathdel_pnp(pthp);
5499 __prep_delay(&gwrk, pthp->pth_du.pdels, TRUE, FALSE, "path delay",
5500 TRUE, pthp->pthsym, TRUE);
5501 if (__nd_neg_del_warn)
5502 {
5503 __gferr(981, pthp->pthsym->syfnam_ind, pthp->pthsym->sylin_cnt,
5504 "path delay negative (used 0)");
5505 __nd_neg_del_warn = FALSE;
5506 }
5507 pthp->pth_delrep = gwrk.g_delrep;
5508 pthp->pth_du = gwrk.g_du;
5509 if (pthp->datasrcx != NULL)
5510 {
5511 __gfinform(481, pthp->pthsym->syfnam_ind, pthp->pthsym->sylin_cnt,
5512 "path polarity operator ignored meaningless in simulator");
5513 }
5514 /* and check for any illegal 0 delays */
5515 if ((rv = __chk_0del(pthp->pth_delrep, pthp->pth_du, __inst_mod))
5516 != DGOOD)
5517 {
5518 if (rv != DBAD_MAYBE0 && rv != DBAD_0)
5519 {
5520 __gferr(964, pthp->pthsym->syfnam_ind, pthp->pthsym->sylin_cnt,
5521 "path delay illegal (%s delay form)", __to_deltypnam(__xs,
5522 pthp->pth_delrep));
5523 }
5524 else
5525 {
5526 /* for IO PAD cells some 0 delays common */
5527 if (rv == DBAD_MAYBE0)
5528 {
5529 __gfinform(483, pthp->pthsym->syfnam_ind, pthp->pthsym->sylin_cnt,
5530 "path delay (%s form) contains some zero delays - primitive delay may be better",
5531 __to_deltypnam(__xs, pthp->pth_delrep));
5532 }
5533 else
5534 {
5535 __gfinform(484, pthp->pthsym->syfnam_ind, pthp->pthsym->sylin_cnt,
5536 "path delay (%s form) all zeros - no effect unless annotated to",
5537 __to_deltypnam(__xs, pthp->pth_delrep));
5538 }
5539 }
5540 }
5541 mod_has_path = TRUE;
5542 }
5543 if (mod_has_path) emit_pthdst_bit_informs(__inst_mod);
5544 __pop_wrkitstk();
5545 }
5546 }
5547
5548 /*
5549 * check all path components - check things that apply to path elements
5550 * check for bit by bit things when building sim path d.s.
5551 */
chk_pthels(struct spcpth_t * pthp)5552 static int32 chk_pthels(struct spcpth_t *pthp)
5553 {
5554 register int32 pei;
5555 struct pathel_t *pep;
5556 struct net_t *np;
5557 int32 gd_path;
5558
5559 gd_path = TRUE;
5560 for (pei = 0; pei <= pthp->last_pein; pei++)
5561 {
5562 pep = &(pthp->peins[pei]);
5563 np = pep->penp;
5564 /* DBG remove */
5565 if (!np->n_isapthsrc) __misc_terr(__FILE__, __LINE__);
5566 /* --- */
5567 /* path source can be vectored or scalared (can be reg) */
5568 /* since source just timing check ref. event - record time of any chg */
5569 }
5570 for (pei = 0; pei <= pthp->last_peout; pei++)
5571 {
5572 pep = &(pthp->peouts[pei]);
5573 np = pep->penp;
5574 if (np->n_isavec && !np->vec_scalared)
5575 {
5576 __gferr(825, pthp->pthsym->syfnam_ind, pthp->pthsym->sylin_cnt,
5577 "vectored path destination wire %s illegal - must be scalared",
5578 np->nsym->synam);
5579 gd_path = FALSE;
5580 }
5581 /* if both has wire delay and dest., pth dst bit was turned off */
5582 /* LOOKATME - not sure what to check here */
5583 if (np->ntyp == N_TRIREG)
5584 {
5585 __gferr(826, pthp->pthsym->syfnam_ind, pthp->pthsym->sylin_cnt,
5586 "path destination wire %s illegal trireg type", np->nsym->synam);
5587 gd_path = FALSE;
5588 }
5589 else if (!np->n_isapthdst
5590 || (np->nrngrep == NX_DWIR && np->nu.rngdwir->n_delrep != DT_PTHDST))
5591 {
5592 __gferr(826, pthp->pthsym->syfnam_ind, pthp->pthsym->sylin_cnt,
5593 "path destination wire %s illegal - has wire delay", np->nsym->synam);
5594 gd_path = FALSE;
5595 }
5596 }
5597 return(gd_path);
5598 }
5599
5600 /*
5601 * build and check the per bit but not per inst path dest table lists
5602 * all non bit specific path properties already checked
5603 */
bldchk_pb_pthdsts(struct spcpth_t * pthp)5604 static int32 bldchk_pb_pthdsts(struct spcpth_t *pthp)
5605 {
5606 register int32 spi, dpi;
5607 register struct pathel_t *spep, *dpep;
5608 int32 pbiwid, pbowid;
5609 int32 sbi, sbi2, dbi, dbi2, pb_gd, nxt_spep, nxt_dpep;
5610 struct net_t *snp, *dnp;
5611 struct tchg_t *src_tchg;
5612
5613 pb_gd = TRUE;
5614 /* needed since loop initialization indirect and lint cannot detect */
5615 sbi = sbi2 = dbi = dbi2 = -1;
5616 snp = dnp = NULL;
5617 src_tchg = NULL;
5618 if (pthp->pthtyp == PTH_PAR)
5619 {
5620 get_pthbitwidths(pthp, &pbiwid, &pbowid);
5621 if (pbiwid != pbowid)
5622 {
5623 __gferr(839, pthp->pthsym->syfnam_ind, pthp->pthsym->sylin_cnt,
5624 "parallel path input bit length %d and output %d differ",
5625 pbiwid, pbowid);
5626 return(FALSE);
5627 }
5628 /* know bit lengths the same and if vectors or selects scalared */
5629 for (spi = dpi = -1, nxt_spep = nxt_dpep = TRUE;;)
5630 {
5631 if (nxt_spep)
5632 {
5633 /* before moving to next bit always add net pin element */
5634 if (++spi > pthp->last_pein)
5635 {
5636 if (!nxt_dpep || (dpi + 1) <= pthp->last_peout)
5637 __misc_terr(__FILE__, __LINE__);
5638 break;
5639 }
5640 spep = &(pthp->peins[spi]);
5641 snp = spep->penp;
5642 if (!snp->n_isavec) sbi = sbi2 = 0;
5643 else
5644 {
5645 if (spep->pthi1 == -1) { sbi = snp->nwid - 1; sbi2 = 0; }
5646 else { sbi = spep->pthi1; sbi2 = spep->pthi2; }
5647 }
5648 /* build and add (if non already added) path dest. NP_DPTHSRC */
5649 src_tchg = try_add_npp_dpthsrc(pthp, snp, sbi);
5650 nxt_spep = FALSE;
5651 }
5652 if (nxt_dpep)
5653 {
5654 /* if done, will always exit on src over run of end */
5655 if (++dpi > pthp->last_peout) __misc_terr(__FILE__, __LINE__);
5656 dpep = &(pthp->peouts[dpi]);
5657 dnp = dpep->penp;
5658 if (!dnp->n_isavec) dbi = dbi2 = 0;
5659 else
5660 {
5661 if (dpep->pthi1 == -1) { dbi = dnp->nwid - 1; dbi2 = 0; }
5662 else { dbi = dpep->pthi1; dbi2 = dpep->pthi2; }
5663 }
5664 nxt_dpep = FALSE;
5665 }
5666 if (!bldchk_1bit_pthdst(pthp, snp, sbi, dnp, dbi, dnp->nwid, src_tchg))
5667 pb_gd = FALSE;
5668 if (--sbi < sbi2) nxt_spep = TRUE;
5669 if (--dbi < dbi2) nxt_dpep = TRUE;
5670 }
5671 return(pb_gd);
5672 }
5673 /* handle full path case - for every source bit */
5674 for (spi = 0; spi <= pthp->last_pein; spi++)
5675 {
5676 spep = &(pthp->peins[spi]);
5677 snp = spep->penp;
5678 if (!snp->n_isavec) sbi = sbi2 = 0;
5679 else
5680 {
5681 if (spep->pthi1 == -1) { sbi = snp->nwid - 1; sbi2 = 0; }
5682 else { sbi = spep->pthi1; sbi2 = spep->pthi2; }
5683 }
5684 for (; sbi >= sbi2; sbi--)
5685 {
5686 src_tchg = try_add_npp_dpthsrc(pthp, snp, sbi);
5687 /* for given bit of input path - for every output path */
5688 for (dpi = 0; dpi <= pthp->last_peout; dpi++)
5689 {
5690 dpep = &(pthp->peouts[dpi]);
5691 dnp = dpep->penp;
5692 if (!dnp->n_isavec) dbi = dbi2 = 0;
5693 else
5694 {
5695 if (dpep->pthi1 == -1) { dbi = dnp->nwid - 1; dbi2 = 0; }
5696 else { dbi = dpep->pthi1; dbi2 = dpep->pthi2; }
5697 }
5698 /* for every bit of destination path element */
5699 for (; dbi >= dbi2; dbi--)
5700 {
5701 /* notice here, must check every sbits by dbits combination */
5702 if (!bldchk_1bit_pthdst(pthp, snp, sbi, dnp, dbi, dnp->nwid,
5703 src_tchg)) pb_gd = FALSE;
5704 }
5705 }
5706 }
5707 }
5708 return(pb_gd);
5709 }
5710
5711 /*
5712 * build and add the path source net pin entry - like tc ref. event
5713 * just records latest change
5714 *
5715 * for cases with mulitple source for 1 path only add first time
5716 * here special indirect npp that allows indexing by bit and comparison
5717 *
5718 * if non empty or separate bit lists make sense
5719 * best is to put in pass that checks to see if needed and adds after here
5720 */
try_add_npp_dpthsrc(struct spcpth_t * pthp,struct net_t * snp,int32 sbi)5721 static struct tchg_t *try_add_npp_dpthsrc(struct spcpth_t *pthp,
5722 struct net_t *snp, int32 sbi)
5723 {
5724 register int32 ii;
5725 struct net_pin_t *npp;
5726 word64 t;
5727 struct tchg_t *start_tchgp;
5728
5729 if ((npp = find_1timchg_psnpp(snp, sbi, NPCHG_PTHSRC)) != NULL)
5730 return(npp->elnpp.etchgp);
5731
5732 start_tchgp = (struct tchg_t *) __my_malloc(sizeof(struct tchg_t));
5733 start_tchgp->chgu.chgpthp = pthp;
5734 start_tchgp->oldval = bld_npp_oldval(snp, __inst_mod);
5735 start_tchgp->lastchg = (word64 *)
5736 __my_malloc(__inst_mod->flatinum*sizeof(word64));
5737
5738 t = 0ULL;
5739 for (ii = 0; ii < __inst_mod->flatinum; ii++) start_tchgp->lastchg[ii] = t;
5740 __cur_npnum = 0;
5741 /* sbi access inst. table so must be 0 but must be -1 for npp */
5742 if (!snp->n_isavec) sbi = -1;
5743
5744 /* with table for moving down to col. from inst. */
5745 __conn_npin(snp, sbi, sbi, FALSE, NP_TCHG, (struct gref_t *) NULL,
5746 NPCHG_PTHSRC, (char *) start_tchgp);
5747 return(start_tchgp);
5748 }
5749
5750 /*
5751 * find a path source time change npp
5752 * know __inst_mod set
5753 */
find_1timchg_psnpp(struct net_t * snp,int32 bi,int32 subtyp)5754 static struct net_pin_t *find_1timchg_psnpp(struct net_t *snp, int32 bi,
5755 int32 subtyp)
5756 {
5757 register struct net_pin_t *npp;
5758 struct npaux_t *npauxp;
5759
5760 for (npp = snp->nlds; npp != NULL; npp = npp->npnxt)
5761 {
5762 if (npp->npntyp == NP_TCHG && npp->chgsubtyp == subtyp
5763 && ((npauxp = npp->npaux) != NULL && npauxp->nbi1 == bi)) return(npp);
5764 }
5765 return(NULL);
5766 }
5767
5768 /*
5769 * build 1 bit path dest. pair simulation path and do 1 bit path checking
5770 * if error path dest. not built
5771 *
5772 * for scalar dbi will be 0
5773 */
bldchk_1bit_pthdst(struct spcpth_t * pthp,struct net_t * s_np,int32 sbi,struct net_t * d_np,int32 dbi,int32 dnwid,struct tchg_t * src_tchg)5774 static int32 bldchk_1bit_pthdst(struct spcpth_t *pthp, struct net_t *s_np,
5775 int32 sbi, struct net_t *d_np, int32 dbi, int32 dnwid,
5776 struct tchg_t *src_tchg)
5777 {
5778 register int32 i;
5779 int32 pb_gd;
5780 struct pthdst_t *pdp;
5781 char s1[RECLEN], s2[RECLEN];
5782
5783 /* for inouts - source and dest. cannot be the same */
5784 pb_gd = TRUE;
5785 if (d_np->nsym == s_np->nsym && dbi == sbi)
5786 {
5787 __gfwarn(617, pthp->pthsym->syfnam_ind, pthp->pthsym->sylin_cnt,
5788 "ignoring path from %s%s to %s%s because source and destination same wire - simulators that split inout ports results may differ",
5789 s_np->nsym->synam, bld_bitref(s1, s_np, sbi), d_np->nsym->synam,
5790 bld_bitref(s2, d_np, dbi));
5791 return(FALSE);
5792 }
5793 /* if previous error, net marked as gone here */
5794 if (d_np->n_gone || s_np->n_gone) return(FALSE);
5795
5796 /* check for no more than one driver per bit */
5797 /* LOOKATME - now driver for 1364 can be anything and */
5798 /* tran or inouts are not drivers (not hard), does this work? */
5799 if (!chk_biti_pthdst_driver(pthp, d_np, dbi)) return(FALSE);
5800
5801 /* know for dest. rng dwir already allocated */
5802 /* if first bit of this wire, allocate table and mark all unused */
5803 if (d_np->nu.rngdwir->n_du.pb_pthdst == NULL)
5804 {
5805 d_np->nu.rngdwir->n_du.pb_pthdst = (struct pthdst_t **)
5806 __my_malloc(dnwid*sizeof(struct pthdst_t *));
5807 for (i = 0; i < dnwid; i++) d_np->nu.rngdwir->n_du.pb_pthdst[i] = NULL;
5808 }
5809 /* if this is path with different source but same destination, allocate */
5810 pdp = (struct pthdst_t *) __my_malloc(sizeof(struct pthdst_t));
5811 pdp->pstchgp = src_tchg;
5812 pdp->pdnxt = d_np->nu.rngdwir->n_du.pb_pthdst[dbi];
5813 d_np->nu.rngdwir->n_du.pb_pthdst[dbi] = pdp;
5814 return(pb_gd);
5815 }
5816
5817 /*
5818 * compute a path's input and output bit widths
5819 */
get_pthbitwidths(struct spcpth_t * pthp,int32 * pinwid,int32 * poutwid)5820 static void get_pthbitwidths(struct spcpth_t *pthp, int32 *pinwid,
5821 int32 *poutwid)
5822 {
5823 register int32 pei;
5824 int32 pbwid;
5825 struct pathel_t *pep;
5826
5827 for (pei = 0, pbwid = 0; pei <= pthp->last_pein; pei++)
5828 {
5829 pep = &(pthp->peins[pei]);
5830 /* notice all ranges normalized to h:l here */
5831 if (pep->pthi1 == -1) pbwid += pep->penp->nwid;
5832 else pbwid += (pep->pthi1 - pep->pthi2 + 1);
5833 }
5834 *pinwid = pbwid;
5835 for (pei = 0, pbwid = 0; pei <= pthp->last_peout; pei++)
5836 {
5837 pep = &(pthp->peouts[pei]);
5838 if (pep->pthi1 == -1) pbwid += pep->penp->nwid;
5839 else pbwid += (pep->pthi1 - pep->pthi2 + 1);
5840 }
5841 *poutwid = pbwid;
5842 }
5843
5844 /*
5845 * build a bit reference of form [<bit>] or ""
5846 */
bld_bitref(char * s,struct net_t * np,int32 bi)5847 static char *bld_bitref(char *s, struct net_t *np, int32 bi)
5848 {
5849 if (!np->n_isavec || bi == -1) strcpy(s, "");
5850 else sprintf(s, "[%d]", bi);
5851 return(s);
5852 }
5853
5854 /*
5855 * check driver of bit to make sure bit has exactly one driver
5856 */
chk_biti_pthdst_driver(struct spcpth_t * pthp,struct net_t * np,int32 biti)5857 static int32 chk_biti_pthdst_driver(struct spcpth_t *pthp, struct net_t *np,
5858 int32 biti)
5859 {
5860 register struct net_pin_t *npp;
5861 register struct npaux_t *npauxp;
5862 int32 num_drvs;
5863 char s1[RECLEN];
5864
5865 if (!np->n_isavec)
5866 {
5867 if (np->ndrvs == NULL) goto no_drv;
5868 for (num_drvs = 0, npp = np->ndrvs; npp != NULL; npp = npp->npnxt)
5869 {
5870 if (npp->npntyp > NP_MDPRT) continue;
5871 if (npp->npntyp <= NP_MDPRT && npp->np_xmrtyp != XNP_LOC)
5872 goto pthdst_xmr;
5873 if (++num_drvs > 1)
5874 {
5875 __gferr(827, pthp->pthsym->syfnam_ind, pthp->pthsym->sylin_cnt,
5876 "path destination %s illegal - scalar wire has more than one driver",
5877 np->nsym->synam);
5878 return(FALSE);
5879 }
5880 }
5881 if (num_drvs == 0) goto no_drv;
5882 }
5883
5884 /* here only check drivers of this bit */
5885 for (num_drvs = 0, npp = np->ndrvs; npp != NULL; npp = npp->npnxt)
5886 {
5887 if ((npauxp = npp->npaux) == NULL || npauxp->nbi1 == -1) goto got_match;
5888 /* must never see IS form driver here - must cause copying earlier */
5889 /* DBG remove --- */
5890 if (npauxp->nbi1 == -2) __misc_terr(__FILE__, __LINE__);
5891 /* --- */
5892
5893 if (biti > npauxp->nbi1 || biti < npauxp->nbi2.i) continue;
5894
5895 got_match:
5896 /* found driver of bit - know only 1 */
5897 if (npp->npntyp <= NP_MDPRT && npp->np_xmrtyp != XNP_LOC)
5898 {
5899 pthdst_xmr:
5900 __gferr(961, pthp->pthsym->syfnam_ind, pthp->pthsym->sylin_cnt,
5901 "path destination %s%s cross module reference driver illegal",
5902 np->nsym->synam, bld_bitref(s1, np, biti));
5903 return(FALSE);
5904 }
5905 if (++num_drvs > 1)
5906 {
5907 __gferr(827, pthp->pthsym->syfnam_ind, pthp->pthsym->sylin_cnt,
5908 "path destination %s%s illegal - more than one driver",
5909 np->nsym->synam, bld_bitref(s1, np, biti));
5910 }
5911 }
5912 if (num_drvs != 0) return(TRUE);
5913
5914 no_drv:
5915 __gfwarn(616, pthp->pthsym->syfnam_ind, pthp->pthsym->sylin_cnt,
5916 "path destination %s%s does not have a driver - path delay no effect",
5917 np->nsym->synam, bld_bitref(s1, np, biti));
5918 return(TRUE);
5919 }
5920
5921 /*
5922 * emit path dest bit informs where bits not in any path for vectors
5923 * processes path dests in current module.
5924 * know module has specify section and at least one path
5925 *
5926 * SJM 06/06/00 - also emitting inform for inout as both path src/dest
5927 */
emit_pthdst_bit_informs(struct mod_t * mdp)5928 static void emit_pthdst_bit_informs(struct mod_t *mdp)
5929 {
5930 register int32 ni, bi;
5931 register struct net_t *np;
5932
5933 if (mdp->mnnum == 0) return;
5934 for (ni = 0, np = &(mdp->mnets[0]); ni < mdp->mnnum; ni++, np++)
5935 {
5936 /* SJM 06/06/00 - emit inform if inout used as path src and dst */
5937 /* SJM 07/16/01 - removed warning, it was wrong - inout paths now work */
5938
5939 if (!np->n_isapthdst || !np->n_isavec) continue;
5940 /* DBG remove --- */
5941 if (np->nu.rngdwir->n_delrep != DT_PTHDST)
5942 __misc_terr(__FILE__, __LINE__);
5943 /* --- */
5944
5945 for (bi = 0; bi < np->nwid; bi++)
5946 {
5947 if (np->nu.rngdwir->n_du.pb_pthdst[bi] == NULL)
5948 {
5949 __gfinform(474, np->nsym->syfnam_ind, np->nsym->sylin_cnt,
5950 "for delay path destination %s, bit %d is not destionation of any path",
5951 np->nsym->synam, bi);
5952 }
5953 }
5954 }
5955 }
5956
5957 /*
5958 * check path delay non zero and non expression
5959 */
__chk_0del(word32 drep,union del_u du,struct mod_t * mdp)5960 extern int32 __chk_0del(word32 drep, union del_u du, struct mod_t *mdp)
5961 {
5962 register int32 i;
5963 int32 some_0, all_0;
5964
5965 some_0 = FALSE;
5966 all_0 = TRUE;
5967 switch ((byte) drep) {
5968 /* think ,, form will work right here for timing checks */
5969 case DT_NONE: return(DBAD_NONE);
5970 case DT_1X: case DT_4X: return(DBAD_EXPR);
5971 case DT_1V:
5972 if (*du.d1v == 0ULL) return(DBAD_0);
5973 return(DGOOD);
5974 case DT_IS1V:
5975 for (i = 0; i < mdp->flatinum; i++)
5976 { if (du.dis1v[i] == 0ULL) some_0 = TRUE; else all_0 = FALSE; }
5977 break;
5978 case DT_IS1V1:
5979 for (i = 0; i < mdp->flatinum; i++)
5980 { if (du.dis1v1[i] == 0) some_0 = TRUE; else all_0 = FALSE; }
5981 break;
5982 case DT_IS1V2:
5983 for (i = 0; i < mdp->flatinum; i++)
5984 { if (du.dis1v2[i] == 0) some_0 = TRUE; else all_0 = FALSE; }
5985 break;
5986 case DT_4V:
5987 for (i = 0; i < 4; i++)
5988 { if (du.d4v[i] == 0ULL) some_0 = TRUE; else all_0 = FALSE; }
5989 break;
5990 case DT_IS4V:
5991 for (i = 0; i < 4*mdp->flatinum; i++)
5992 { if (du.dis4v[i] == 0ULL) some_0 = TRUE; else all_0 = FALSE; }
5993 break;
5994 case DT_IS4V1:
5995 some_0 = FALSE;
5996 all_0 = TRUE;
5997 for (i = 0; i < 4*mdp->flatinum; i++)
5998 { if (du.dis4v1[i] == 0) some_0 = TRUE; else all_0 = FALSE; }
5999 break;
6000 case DT_IS4V2:
6001 some_0 = FALSE;
6002 all_0 = TRUE;
6003 for (i = 0; i < 4*mdp->flatinum; i++)
6004 { if (du.dis4v2[i] == 0) some_0 = TRUE; else all_0 = FALSE; }
6005 break;
6006 case DT_16V:
6007 for (i = 1; i < 16; i++)
6008 {
6009 /* must skip unused that will be 0 */
6010 if (i == 5 || i == 10 || i == 15) continue;
6011 if (du.d16v[i] == 0ULL) some_0 = TRUE; else all_0 = FALSE;
6012 }
6013 break;
6014 case DT_IS16V:
6015 for (i = 1; i < 16*mdp->flatinum; i++)
6016 {
6017 if ((i % 16) == 5 || (i % 16) == 10 || (i % 16) == 15) continue;
6018 if (du.dis16v[i] == 0ULL) some_0 = TRUE; else all_0 = FALSE;
6019 }
6020 break;
6021 case DT_IS16V1:
6022 for (i = 1; i < 16*mdp->flatinum; i++)
6023 {
6024 if ((i % 16) == 5 || (i % 16) == 10 || (i % 16) == 15) continue;
6025 if (du.dis16v1[i] == 0) some_0 = TRUE; else all_0 = FALSE;
6026 }
6027 break;
6028 case DT_IS16V2:
6029 for (i = 0; i < 16*mdp->flatinum; i++)
6030 {
6031 if ((i % 16) == 5 || (i % 16) == 10 || (i % 16) == 15) continue;
6032 if (du.dis16v2[i] == 0) some_0 = TRUE; else all_0 = FALSE;
6033 }
6034 break;
6035 default: __case_terr(__FILE__, __LINE__);
6036 }
6037 if (all_0) return(DBAD_0);
6038 if (some_0) return(DBAD_MAYBE0);
6039 return(DGOOD);
6040 }
6041
6042 /*
6043 * DESIGN PART FREE ROUTINES
6044 */
6045
6046 /*
6047 * free one statement
6048 * this requires set __inst_mod
6049 * LOOKATME - how come only called from interactive and always 1 flat inst?
6050 */
__free_1stmt(struct st_t * stp)6051 extern void __free_1stmt(struct st_t *stp)
6052 {
6053 int32 fji;
6054 struct for_t *frp;
6055 struct qconta_t *qcafs;
6056 struct st_t *fjstp;
6057
6058 if (stp == NULL) return;
6059
6060 switch ((byte) stp->stmttyp) {
6061 case S_NULL: case S_STNONE: break;
6062 case S_PROCA: case S_FORASSGN: case S_RHSDEPROCA: case S_NBPROCA:
6063 __free_xtree(stp->st.spra.lhsx);
6064 __free_xtree(stp->st.spra.rhsx);
6065 break;
6066 case S_IF:
6067 __free_xtree(stp->st.sif.condx);
6068 __free_stlst(stp->st.sif.thenst);
6069 __free_stlst(stp->st.sif.elsest);
6070 break;
6071 case S_CASE:
6072 __free_xtree(stp->st.scs.csx);
6073 /* this also frees default: (maybe just place holder) and list els */
6074 free_csitemlst(stp->st.scs.csitems);
6075 break;
6076 case S_REPEAT:
6077 __free_xtree(stp->st.srpt.repx);
6078 /* if after preparation need to free temp. repeat count array */
6079 /* LOOKATME - why is this only per. inst. stor. place in stmts? */
6080 if (stp->st.srpt.reptemp != NULL)
6081 __my_free((char *) stp->st.srpt.reptemp,
6082 __inst_mod->flatinum*sizeof(word32 *));
6083 __free_stlst(stp->st.srpt.repst);
6084 break;
6085 case S_FOREVER:
6086 case S_WHILE:
6087 __free_xtree(stp->st.swh.lpx);
6088 __free_stlst(stp->st.swh.lpst);
6089 break;
6090 case S_WAIT:
6091 __free_xtree(stp->st.swait.lpx);
6092 /* free statement list since del. ctrl. points to wait itself */
6093 __free_stlst(stp->st.swait.lpst);
6094 /* do not free action statement which is wait itself */
6095 free_dctrl(stp->st.swait.wait_dctp, FALSE);
6096 break;
6097 case S_FOR:
6098 frp = stp->st.sfor;
6099 /* notice for assign already freed */
6100 __free_xtree(frp->fortermx);
6101 __free_1stmt(frp->forinc);
6102 __free_stlst(frp->forbody);
6103 __my_free((char *) frp, sizeof(struct for_t));
6104 break;
6105 case S_DELCTRL: free_dctrl(stp->st.sdc, TRUE); break;
6106 case S_UNBLK:
6107 __free_stlst(stp->st.sbsts);
6108 break;
6109 case S_UNFJ:
6110 for (fji = 0;; fji++)
6111 {
6112 if ((fjstp = stp->st.fj.fjstps[fji]) == NULL) break;
6113 __free_stlst(fjstp);
6114 }
6115 __my_free((char *) stp->st.fj.fjstps, (fji + 1)*sizeof(struct st_t *));
6116 __my_free((char *) stp->st.fj.fjlabs, (fji + 1)*sizeof(int32));
6117 break;
6118 case S_TSKCALL:
6119 __free_xtree(stp->st.stkc.targs);
6120 /* cannot free symbol */
6121 /* only interactive can be freed so cannot have tfrec */
6122 if (stp->st.stkc.tkcaux.trec != NULL) __misc_terr(__FILE__, __LINE__);
6123 break;
6124 case S_QCONTA:
6125 qcafs = stp->st.sqca;
6126 /* notice for assign already freed */
6127 __free_xtree(qcafs->qclhsx);
6128 __free_xtree(qcafs->qcrhsx);
6129 __my_free((char *) qcafs, sizeof(struct qconta_t));
6130 break;
6131 case S_QCONTDEA:
6132 __free_xtree(stp->st.sqcdea.qcdalhs);
6133 break;
6134 case S_CAUSE: break;
6135 case S_DSABLE:
6136 __free_xtree(stp->st.sdsable.dsablx);
6137 break;
6138 /* statement added for execution */
6139 case S_REPSETUP:
6140 /* union field unused */
6141 stp->st.scausx = NULL;
6142 break;
6143 case S_REPDCSETUP:
6144 /* union field unused */
6145 stp->st.scausx = NULL;
6146 break;
6147 case S_GOTO: break;
6148 /* notice named block non freeable (at least for now) */
6149 default: __case_terr(__FILE__, __LINE__);
6150 }
6151 }
6152
6153 /*
6154 * free a delay control record
6155 */
free_dctrl(struct delctrl_t * dctp,int32 free_action)6156 static void free_dctrl(struct delctrl_t *dctp, int32 free_action)
6157 {
6158 /* free the delay value or expr. for event controls */
6159 __free_del(dctp->dc_du, dctp->dc_delrep, __inst_mod->flatinum);
6160
6161 /* SJM - 08/03/01 - also free repeat cout expr if present */
6162 if (dctp->repcntx != NULL) __free_xtree(dctp->repcntx);
6163
6164 /* for delay not event control, will not have scheduled tevs */
6165 if (dctp->dceschd_tevs != NULL)
6166 __my_free((char *) dctp->dceschd_tevs,
6167 __inst_mod->flatinum*sizeof(struct tev_t *));
6168 dctp->dceschd_tevs = NULL;
6169 if (free_action) __free_stlst(dctp->actionst);
6170 __my_free((char *) dctp, sizeof(struct delctrl_t));
6171 }
6172
6173 /*
6174 * free case item list
6175 */
free_csitemlst(register struct csitem_t * csip)6176 static void free_csitemlst(register struct csitem_t *csip)
6177 {
6178 struct csitem_t *csip2;
6179
6180 for (;csip != NULL;)
6181 {
6182 csip2 = csip->csinxt;
6183 /* nil expr list always nil for default */
6184 if (csip->csixlst != NULL) __free_xprlst(csip->csixlst);
6185 /* if no default, stmt of first nil */
6186 if (csip->csist != NULL) __free_stlst(csip->csist);
6187 __my_free((char *) csip, sizeof(struct csitem_t));
6188 csip = csip2;
6189 }
6190 }
6191
6192 /*
6193 * free a list of statements - i.e. next fields connect to make block list
6194 */
__free_stlst(register struct st_t * stp)6195 extern void __free_stlst(register struct st_t *stp)
6196 {
6197 register struct st_t *stp2;
6198
6199 for (; stp != NULL;) { stp2 = stp->stnxt; __free_1stmt(stp); stp = stp2; }
6200 }
6201
6202 /*
6203 * free an expression list
6204 */
__free_xprlst(struct exprlst_t * xplp)6205 extern void __free_xprlst(struct exprlst_t *xplp)
6206 {
6207 register struct exprlst_t *xplp2;
6208
6209 for (; xplp != NULL;)
6210 {
6211 xplp2 = xplp->xpnxt;
6212 __free_xtree(xplp->xp);
6213 __my_free((char *) xplp, sizeof(struct exprlst_t));
6214 xplp = xplp2;
6215 }
6216 }
6217
6218 /*
6219 * ROUTINES TO TRANFORM INTERNAL NET LIST FOR COMPILATION
6220 */
6221
6222 /*
6223 * process all net list data structures tranforming for compilation
6224 */
__xform_nl_to_modtabs(void)6225 extern void __xform_nl_to_modtabs(void)
6226 {
6227 struct mod_t *mdp;
6228
6229 for (mdp = __modhdr; mdp != NULL; mdp = mdp->mnxt)
6230 {
6231 __push_wrkitstk(mdp, 0);
6232
6233 /* allocate table filled with all exprs in module */
6234 if (__inst_mod->mexprnum == 0) __inst_mod->mexprtab = NULL;
6235 else
6236 {
6237 __inst_mod->mexprtab = (struct expr_t *)
6238 __my_malloc(__inst_mod->mexprnum*sizeof(struct expr_t));
6239 }
6240 __last_modxi = -1;
6241
6242 if (__inst_mod->mstnum == 0) __inst_mod->msttab = NULL;
6243 else
6244 {
6245 __inst_mod->msttab = (struct st_t *)
6246 __my_malloc(__inst_mod->mstnum*sizeof(struct st_t));
6247 }
6248 __last_modsti = -1;
6249
6250 cmp_xform_ports();
6251 cmp_xform_ialst();
6252 cmp_xform_inst_conns();
6253 cmp_xform_gates();
6254 cmp_xform_contas();
6255 cmp_xform_tasks();
6256
6257 if (__inst_mod->mspfy != NULL) cmp_xform_specify();
6258
6259 /* LOOKATME - do analog block statement and expressions need to be */
6260 /* xformed since not accessed in digital sim, think not */
6261
6262 /* many expressions (say from xmrs) not copied since not needed during */
6263 /* exec - check and adjust mod expr no. here */
6264 if (__last_modxi + 1 > __inst_mod->mexprnum)
6265 __misc_terr(__FILE__, __LINE__);
6266 __inst_mod->mexprnum = __last_modxi + 1;
6267
6268 /* DBG remove --
6269 if (__last_modsti + 1 != __inst_mod->mstnum)
6270 __misc_terr(__FILE__, __LINE__);
6271 --- */
6272 __pop_wrkitstk();
6273
6274 /* DBG remove ---
6275 if (__debug_flg)
6276 {
6277 __dmp_exprtab(mdp, mdp->mexprnum);
6278 __dmp_msttab(mdp, mdp->mstnum);
6279 }
6280 --- */
6281 }
6282 }
6283
6284 /*
6285 * compile transform all port lists for one module
6286 */
cmp_xform_ports(void)6287 static void cmp_xform_ports(void)
6288 {
6289 register int32 pi, pnum;
6290 register struct mod_pin_t *mpp;
6291
6292 mpp = &(__inst_mod->mpins[0]);
6293 pnum = __inst_mod->mpnum;
6294 for (pi = 0; pi < pnum; pi++, mpp++)
6295 {
6296 mpp->mpref = mv1_expr_totab(mpp->mpref);
6297 }
6298 }
6299
6300 /*
6301 * copy a statement list (linked list of statements)
6302 */
cmp_xform_lstofsts(register struct st_t * ostp)6303 static struct st_t *cmp_xform_lstofsts(register struct st_t *ostp)
6304 {
6305 register struct st_t *nstp_hdr, *nstp, *last_nstp, *ostp2;
6306
6307 nstp_hdr = NULL;
6308 for (last_nstp = NULL; ostp != NULL;)
6309 {
6310 nstp = cmp_xform1_stmt(ostp, last_nstp);
6311 /* must not free internal parts of old statement */
6312
6313 if (last_nstp == NULL) nstp_hdr = nstp; else last_nstp->stnxt = nstp;
6314 nstp->stnxt = NULL;
6315 last_nstp = nstp;
6316 ostp2 = ostp->stnxt;
6317 __my_free((char *) ostp, sizeof(struct st_t));
6318 ostp = ostp2;
6319 }
6320 return(nstp_hdr);
6321 }
6322
6323 /*
6324 * compile transform one statement
6325 */
cmp_xform1_stmt(register struct st_t * ostp,struct st_t * last_stp)6326 static struct st_t *cmp_xform1_stmt(register struct st_t *ostp,
6327 struct st_t *last_stp)
6328 {
6329 register struct st_t *nstp;
6330 int32 fji;
6331 struct sy_t *syp;
6332 struct systsk_t *stbp;
6333 struct st_t *fjstp;
6334
6335 /* DBG remove --
6336 if (__debug_flg)
6337 {
6338 -* --
6339 __dbg_msg("%04d: AT %s %s - STMT XFORM (%s)\n", ostp->stalloc_ndx,
6340 __bld_lineloc(__xs, ostp->stfnam_ind, ostp->stlin_cnt),
6341 __inst_mod->msym->synam, __to_sttyp(__xs2, ostp->stmttyp));
6342 -- *-
6343 -* -- *-
6344 __dbg_msg("AT %s %s - STMT XFORM %04d (%s)\n",
6345 __bld_lineloc(__xs, ostp->stfnam_ind, ostp->stlin_cnt),
6346 __inst_mod->msym->synam, __last_modsti + 1,
6347 __to_sttyp(__xs2, ostp->stmttyp));
6348 -* -- *-
6349 }
6350 --- */
6351
6352 if (ostp == NULL) return(NULL);
6353
6354 /* DBG remove -- */
6355 if (__last_modsti + 2 > __inst_mod->mstnum) __misc_terr(__FILE__, __LINE__);
6356 /* ---*/
6357
6358 nstp = &(__inst_mod->msttab[++__last_modsti]);
6359
6360 /* because union for common case now in statement, gets copied here */
6361 *nstp = *ostp;
6362 nstp->stnxt = NULL;
6363
6364 switch ((byte) ostp->stmttyp) {
6365 /* null just has type value and NULL pointer (i.e. ; by itself) */
6366 case S_NULL: case S_STNONE: break;
6367 /* SJM 09/24/01 - for fj, must not make for assign separate stmt */
6368 case S_PROCA: case S_RHSDEPROCA: case S_NBPROCA: case S_FORASSGN:
6369 nstp->st.spra.lhsx = mv1_expr_totab(ostp->st.spra.lhsx);
6370 nstp->st.spra.rhsx = mv1_expr_totab(ostp->st.spra.rhsx);
6371 break;
6372 case S_IF:
6373 nstp->st.sif.condx = mv1_expr_totab(ostp->st.sif.condx);
6374 nstp->st.sif.thenst = cmp_xform_lstofsts(ostp->st.sif.thenst);
6375 nstp->st.sif.elsest = cmp_xform_lstofsts(ostp->st.sif.elsest);
6376 break;
6377 case S_CASE:
6378 nstp->st.scs.castyp = ostp->st.scs.castyp;
6379 nstp->st.scs.maxselwid = ostp->st.scs.maxselwid;
6380 nstp->st.scs.csx = mv1_expr_totab(ostp->st.scs.csx);
6381
6382 /* if case has no default, st and expr list fields nil */
6383 /* no copy but must transform st lists and expr inside */
6384 nstp->st.scs.csitems = ostp->st.scs.csitems;
6385 cmp_xform_csitemlst(nstp->st.scs.csitems);
6386 break;
6387 case S_REPEAT:
6388 nstp->st.srpt.repx = mv1_expr_totab(ostp->st.srpt.repx);
6389 /* move ptr to new, old freed so no wrong cross links */
6390 nstp->st.srpt.reptemp = ostp->st.srpt.reptemp;
6391 nstp->st.srpt.repst = cmp_xform_lstofsts(ostp->st.srpt.repst);
6392 break;
6393 case S_REPSETUP:
6394 /* no contents just indicator for setting up next actual rep stmt */
6395 break;
6396
6397 case S_FOREVER: case S_WHILE:
6398 nstp->st.swh.lpx = mv1_expr_totab(ostp->st.swh.lpx);
6399 nstp->st.swh.lpst = cmp_xform_lstofsts(ostp->st.swh.lpst);
6400 break;
6401 case S_WAIT:
6402 nstp->st.swait.lpx = mv1_expr_totab(ostp->st.swait.lpx);
6403 nstp->st.swait.lpst = cmp_xform_lstofsts(ostp->st.swait.lpst);
6404 /* here just move pointer since needed by new statement in table */
6405 /* LOOKATME - is this already copied */
6406 nstp->st.swait.wait_dctp = ostp->st.swait.wait_dctp;
6407 /* also point dctp statement to new one */
6408 nstp->st.swait.wait_dctp->actionst = nstp->st.swait.lpst;
6409 break;
6410 case S_FOR:
6411 /* DBG remove --- */
6412 if (last_stp == NULL) __misc_terr(__FILE__, __LINE__);
6413 if (last_stp->stmttyp != S_FORASSGN) __misc_terr(__FILE__, __LINE__);
6414 /* --- */
6415 /* notice using already allocated storage - assumes dynamic dlopen so */
6416 /* no need to save and copy storage to runtime */
6417 nstp->st.sfor = ostp->st.sfor;
6418 nstp->st.sfor->forassgn = last_stp;
6419 /* still need to xform expressions */
6420 nstp->st.sfor->fortermx = mv1_expr_totab(ostp->st.sfor->fortermx);
6421
6422 /* for inc xformed in here */
6423 nstp->st.sfor->forbody = cmp_xform_lstofsts(ostp->st.sfor->forbody);
6424
6425 {
6426 struct st_t *stp2, *last_stp2, *last_stp3;
6427
6428 stp2 = nstp->st.sfor->forbody;
6429 for (last_stp2 = last_stp3 = NULL; stp2 != NULL; stp2 = stp2->stnxt)
6430 {
6431 last_stp3 = last_stp2;
6432 last_stp2 = stp2;
6433 }
6434 /* DBG remove --- */
6435 if (last_stp3->stmttyp != S_PROCA) __misc_terr(__FILE__, __LINE__);
6436 /* --- */
6437 nstp->st.sfor->forinc = last_stp3;
6438 }
6439 break;
6440 case S_REPDCSETUP:
6441 /* no contents just indicator for setting up next actual rep stmt */
6442 break;
6443 case S_DELCTRL:
6444 /* stmt contents do not fit in record so link old ptr to new */
6445 nstp->st.sdc = ostp->st.sdc;
6446 /* here fix but no need to copy dellst */
6447 cmp_xform_delay(nstp->st.sdc->dc_delrep, nstp->st.sdc->dc_du);
6448 nstp->st.sdc->actionst = cmp_xform_lstofsts(ostp->st.sdc->actionst);
6449 break;
6450 case S_NAMBLK:
6451 nstp->st.snbtsk->tsksyp->el.etskp->st_namblkin = nstp;
6452 /* for named block, no continuation - must be subthread except in func */
6453 nstp->st.snbtsk->tskst = cmp_xform_lstofsts(ostp->st.snbtsk->tskst);
6454 break;
6455 case S_UNBLK:
6456 nstp->st.sbsts = cmp_xform_lstofsts(ostp->st.sbsts);
6457 break;
6458 case S_UNFJ:
6459 for (fji = 0;; fji++)
6460 {
6461 if ((fjstp = ostp->st.fj.fjstps[fji]) == NULL) break;
6462 nstp->st.fj.fjstps[fji] = cmp_xform_lstofsts(fjstp);
6463 }
6464 break;
6465 case S_TSKCALL:
6466 /* find new task through old to new symbol */
6467 /* for system tasks since points to itself gets same (right) sym */
6468 nstp->st.stkc.tsksyx = mv1_expr_totab(ostp->st.stkc.tsksyx);
6469 nstp->st.stkc.targs = mv1_expr_totab(ostp->st.stkc.targs);
6470 /* task itself xforms in xform task routine */
6471
6472 /* need to fixup PLI tf systf cross linked tf recs only - not vpi */
6473 syp = nstp->st.stkc.tsksyx->lu.sy;
6474 if (syp->sytyp == SYM_STSK)
6475 {
6476 stbp = syp->el.esytbp;
6477 /* only way to tell if tf_ is by range of tsk veriusertf index */
6478 if (stbp->stsknum >= BASE_VERIUSERTFS
6479 && (int32) stbp->stsknum <= __last_veriusertf)
6480 {
6481 xform_tf_syst_enable(nstp);
6482 }
6483 }
6484 /* for vpi registered and built-in system tasks nothing to do */
6485 /* for vpi systasks accessing vpi_ systf has index and values */
6486 /* needed in vpi_ t vpi systf data record */
6487 break;
6488 case S_QCONTA:
6489 /* SJM 02/23/02 - now malloc aux info, but can still use - no new malloc */
6490 nstp->st.sqca->qcatyp = ostp->st.sqca->qcatyp;
6491 nstp->st.sqca->regform = ostp->st.sqca->regform;
6492 nstp->st.sqca->qclhsx = mv1_expr_totab(ostp->st.sqca->qclhsx);
6493 nstp->st.sqca->qcrhsx = mv1_expr_totab(ostp->st.sqca->qcrhsx);
6494 nstp->st.sqca->rhs_qcdlstlst = ostp->st.sqca->rhs_qcdlstlst;
6495 break;
6496 case S_QCONTDEA:
6497 nstp->st.sqcdea.qcdatyp = ostp->st.sqcdea.qcdatyp;
6498 nstp->st.sqcdea.regform = ostp->st.sqcdea.regform;
6499 nstp->st.sqcdea.qcdalhs = mv1_expr_totab(ostp->st.sqcdea.qcdalhs);
6500 break;
6501 case S_CAUSE:
6502 /* must copy expr. even though know just event name */
6503 nstp->st.scausx = mv1_expr_totab(ostp->st.scausx);
6504 break;
6505 case S_DSABLE:
6506 nstp->st.sdsable.dsablx = mv1_expr_totab(ostp->st.sdsable.dsablx);
6507 break;
6508 case S_GOTO:
6509 /* must 0 out dest. since fixed later with right target */
6510 nstp->st.sgoto = NULL;
6511 break;
6512 /* LOOKATME - need name resolving statement type no? */
6513 default: __case_terr(__FILE__, __LINE__);
6514 }
6515 return(nstp);
6516 }
6517
6518 /*
6519 * xform tf_ style sys task enable by updating tf rec
6520 *
6521 * not needed for vpi style syst enables because compiletf already called
6522 *
6523 * LOOKATME - could pass systf ptrs since already computed
6524 */
xform_tf_syst_enable(struct st_t * nstp)6525 static void xform_tf_syst_enable(struct st_t *nstp)
6526 {
6527 register int32 ai;
6528 struct tskcall_t *tkcp;
6529 struct tfrec_t *tfrp;
6530 struct expr_t *argxp;
6531 struct tfarg_t *tfap;
6532
6533 tkcp = &(nstp->st.stkc);
6534 /* DBG remove --- */
6535 if (tkcp->tkcaux.trec == NULL) __misc_terr(__FILE__, __LINE__);
6536 /* --- */
6537 /* tsk call record contains tfrec for PLI 1.0 registered */
6538 tfrp = tkcp->tkcaux.trec;
6539
6540 /* back link tfrec for systask enable to new moved stmt */
6541 tfrp->tfu.tfstp = nstp;
6542 /* notice for tsk, targs 1st left has actual argument - no need for next */
6543 argxp = tkcp->targs;
6544 /* update all the tfarg expr ptrs to moved expr */
6545 for (ai = 1; ai < tfrp->tfanump1; ai++, argxp = argxp->ru.x)
6546 {
6547 /* DBG remove -- */
6548 if (argxp == NULL) __misc_terr(__FILE__, __LINE__);
6549 /* --- */
6550
6551 tfap = &(tfrp->tfargs[ai]);
6552 tfap->arg.axp = argxp->lu.x;
6553 }
6554 }
6555
6556 /*
6557 * ROUTINES TO XFORM (MOVE TO INDEXABLE TABLE) EXPRESSIONS
6558 */
6559 /*
6560 * routine to move one leaf or non leaf expression into table
6561 */
mv1_expr_totab(struct expr_t * oxp)6562 static struct expr_t *mv1_expr_totab(struct expr_t *oxp)
6563 {
6564 struct expr_t *nxp;
6565 struct sy_t *syp;
6566 struct sysfunc_t *sfbp;
6567
6568 if (oxp == NULL) return(NULL);
6569
6570 /* DBG remove -- */
6571 if (__last_modxi + 2 > __inst_mod->mexprnum) __misc_terr(__FILE__, __LINE__);
6572 /* ---*/
6573
6574 nxp = &(__inst_mod->mexprtab[++__last_modxi]);
6575 switch ((byte) oxp->optyp) {
6576 case ID: case OPEMPTY:
6577 /* op empty works since both lu and ru are nil */
6578 *nxp = *oxp;
6579 /* notice symbol name in symbol table so ptr can be subtracted from */
6580 /* symbol table base */
6581 break;
6582 case GLBREF:
6583 /* for global ru is gref and lu is symbol in target mod symbol table */
6584 /* know ru.grp always in module and already built as table */
6585 *nxp = *oxp;
6586 nxp->ru.grp->gxndp = nxp;
6587 break;
6588 case NUMBER: case ISNUMBER: case REALNUM: case ISREALNUM:
6589 /* know constant table already built so can just copy */
6590 *nxp = *oxp;
6591 break;
6592 case FCALL:
6593 syp = oxp->lu.x->lu.sy;
6594 *nxp = *oxp;
6595 /* start by copying call expr. */
6596 if (oxp->lu.x != NULL) nxp->lu.x = mv1_expr_totab(oxp->lu.x);
6597 if (oxp->ru.x != NULL) nxp->ru.x = mv1_expr_totab(oxp->ru.x);
6598
6599 /* need to fixup PLI systf cross linked records when change repl expr */
6600 if (syp->sytyp == SYM_SF)
6601 {
6602 sfbp = syp->el.esyftbp;
6603 if (sfbp->tftyp == SYSF_TF) xform_tf_sysf_call(nxp);
6604 }
6605 /* for vpi registered and built-in system function nothing to do */
6606 /* for vpi sysfuncs accessing vpi_ sysfunc t has index and values */
6607 /* needed in vpi_ t vpi systf data record */
6608 break;
6609 case TOK_NONE: case BADOBJ: case LITSTR:
6610 __misc_terr(__FILE__, __LINE__); break;
6611 default:
6612 *nxp = *oxp;
6613 if (oxp->lu.x != NULL) nxp->lu.x = mv1_expr_totab(oxp->lu.x);
6614 if (oxp->ru.x != NULL) nxp->ru.x = mv1_expr_totab(oxp->ru.x);
6615 }
6616 /* last step - contained in expr. link in parents right dir ptr */
6617 __my_free((char *) oxp, sizeof(struct expr_t));
6618 return(nxp);
6619 }
6620
6621 /*
6622 * xform tf_ style sysf call by updating sysf call record
6623 *
6624 * not needed for vpi style sysf calls because compiletf already called
6625 */
xform_tf_sysf_call(struct expr_t * nxp)6626 static void xform_tf_sysf_call(struct expr_t *nxp)
6627 {
6628 register int32 ai;
6629 struct sy_t *syp;
6630 struct sysfunc_t *sfbp;
6631 struct tfrec_t *tfrp;
6632 struct expr_t *argxp;
6633 struct tfarg_t *tfap;
6634
6635 syp = nxp->lu.x->lu.sy;
6636 sfbp = syp->el.esyftbp;
6637
6638 /* DBG remove --- */
6639 if (sfbp->syfnum < BASE_VERIUSERTFS
6640 || sfbp->syfnum > __last_veriusertf) __misc_terr(__FILE__, __LINE__);
6641 /* --- */
6642 /* szu field used for ptr to the tfrec - nothing to set in new expr*/
6643 tfrp = nxp->lu.x->szu.xfrec;
6644 /* back link tfrec for sysf call to new moved expr */
6645 tfrp->tfu.callx = nxp;
6646 /* update all the tfarg expr ptrs to moved expr */
6647 argxp = nxp->ru.x;
6648 for (ai = 1; ai < tfrp->tfanump1; ai++, argxp = argxp->ru.x)
6649 {
6650 /* DBG remove -- */
6651 if (argxp == NULL) __misc_terr(__FILE__, __LINE__);
6652 /* --- */
6653
6654 tfap = &(tfrp->tfargs[ai]);
6655 tfap->arg.axp = argxp->lu.x;
6656 }
6657 }
6658
6659 /*
6660 * compile transform list of case items
6661 *
6662 * no need to copy but must replace statement lists and exprs inside
6663 */
cmp_xform_csitemlst(register struct csitem_t * csip)6664 static void cmp_xform_csitemlst(register struct csitem_t *csip)
6665 {
6666 register struct exprlst_t *xplp;
6667
6668 for (; csip != NULL; csip = csip->csinxt)
6669 {
6670 if (csip->csist != NULL) csip->csist = cmp_xform_lstofsts(csip->csist);
6671 /* know for first default, selector x list nil */
6672 for (xplp = csip->csixlst; xplp != NULL; xplp = xplp->xpnxt)
6673 {
6674 xplp->xp = mv1_expr_totab(xplp->xp);
6675 }
6676 }
6677 }
6678
6679 /*
6680 * must replace expressions in case dellst one of the expr forms
6681 *
6682 * by here delays already converted to one of 16 forms
6683 */
cmp_xform_delay(int32 drep,union del_u du)6684 static void cmp_xform_delay(int32 drep, union del_u du)
6685 {
6686 struct expr_t *tmpxp;
6687
6688 switch ((byte) drep) {
6689 /* nothing to do for the delay value table forms */
6690 case DT_NONE:
6691 case DT_1V: case DT_IS1V: case DT_IS1V1: case DT_IS1V2: case DT_4V:
6692 case DT_IS4V: case DT_IS4V1: case DT_IS4V2: case DT_16V: case DT_IS16V:
6693 case DT_IS16V1: case DT_IS16V2:
6694 break;
6695 case DT_1X:
6696 /* since must copy from something that gets freed, must first */
6697 /* copy expr to tmp that is then immediately freed by expr mv */
6698 tmpxp = __sim_copy_expr(du.d1x);
6699 du.d1x = mv1_expr_totab(tmpxp);
6700 break;
6701 case DT_4X:
6702 if (du.d4x[0] != NULL)
6703 { tmpxp = __sim_copy_expr(du.d4x[0]); du.d4x[0] = mv1_expr_totab(tmpxp); }
6704 if (du.d4x[1] != NULL)
6705 { tmpxp = __sim_copy_expr(du.d4x[1]); du.d4x[1] = mv1_expr_totab(tmpxp); }
6706 if (du.d4x[2] != NULL)
6707 { tmpxp = __sim_copy_expr(du.d4x[2]); du.d4x[2] = mv1_expr_totab(tmpxp); }
6708 if (du.d4x[3] != NULL)
6709 { tmpxp = __sim_copy_expr(du.d4x[3]); du.d4x[3] = mv1_expr_totab(tmpxp); }
6710 break;
6711 case DT_CMPLST:
6712 /* LOOKATME - is this legal */
6713 __misc_terr(__FILE__, __LINE__);
6714 break;
6715 default: __case_terr(__FILE__, __LINE__);
6716 }
6717 }
6718
6719 /*
6720 * GOTO FIXUP ROUTINES
6721 */
6722
6723 /*
6724 * compile transform all initial/always statement lists
6725 */
cmp_xform_ialst(void)6726 static void cmp_xform_ialst(void)
6727 {
6728 register struct ialst_t *ialp;
6729
6730 __processing_func = FALSE;
6731 for (ialp = __inst_mod->ialst; ialp != NULL; ialp = ialp->ialnxt)
6732 {
6733 ialp->iastp = cmp_xform_lstofsts(ialp->iastp);
6734 }
6735 __prpsti = 0;
6736 __nbsti = -1;
6737 __prpstk[0] = NULL;
6738 for (ialp = __inst_mod->ialst; ialp != NULL; ialp = ialp->ialnxt)
6739 {
6740 if (ialp->iatyp == ALWAYS)
6741 {
6742 cxf_fixup_loopend_goto(ialp->iastp, ialp->iastp);
6743 }
6744 cxf_fixup_lstofsts_gotos(ialp->iastp, TRUE);
6745
6746 /* DBG remove --- */
6747 if (__prpsti != 0) __misc_terr(__FILE__, __LINE__);
6748 /* --- */
6749 }
6750 }
6751
6752 /*
6753 * fixup loop end gotos
6754 */
cxf_fixup_loopend_goto(struct st_t * begstp,struct st_t * targstp)6755 static void cxf_fixup_loopend_goto(struct st_t *begstp, struct st_t *targstp)
6756 {
6757 register struct st_t *stp;
6758 struct st_t *last_stp;
6759
6760 /* DBG remove --- */
6761 if (begstp == NULL) __arg_terr(__FILE__, __LINE__);
6762 /* --- */
6763
6764 /* find last statement in loop - know has at least one */
6765 for (stp = begstp, last_stp = NULL; stp != NULL; stp = stp->stnxt)
6766 last_stp = stp;
6767 /* DBG remove --- */
6768 if (last_stp == NULL) __arg_terr(__FILE__, __LINE__);
6769 if (last_stp->stmttyp != S_GOTO) __arg_terr(__FILE__, __LINE__);
6770 if (!last_stp->lpend_goto) __arg_terr(__FILE__, __LINE__);
6771 if (targstp == NULL) __misc_terr(__FILE__, __LINE__);
6772 /* --- */
6773 last_stp->st.sgoto = targstp;
6774
6775 if (__debug_flg)
6776 {
6777 char s1[RECLEN], s2[RECLEN], s3[RECLEN];
6778
6779 __dbg_msg("++ loop: xform goto after %s at %s back to stmt %s at %s\n",
6780 __to_sttyp(s1, last_stp->stmttyp), __bld_lineloc(__xs,
6781 last_stp->stfnam_ind, last_stp->stlin_cnt), __to_sttyp(s2,
6782 begstp->stmttyp), __bld_lineloc(s3, targstp->stfnam_ind,
6783 targstp->stlin_cnt));
6784 }
6785 }
6786
6787 /*
6788 * fixup all gotos in list of stmts
6789 *
6790 * know all statement transformed - sets correct new goto dest.
6791 */
cxf_fixup_lstofsts_gotos(struct st_t * hdrstp,int32 has_endgoto)6792 static void cxf_fixup_lstofsts_gotos(struct st_t *hdrstp, int32 has_endgoto)
6793 {
6794 register struct st_t *stp;
6795 int32 fji;
6796 struct for_t *forp;
6797 struct st_t *stp2, *fjstp;
6798
6799 for (stp = hdrstp; stp != NULL; stp = stp->stnxt)
6800 {
6801 __sfnam_ind = stp->stfnam_ind;
6802 __slin_cnt = stp->stlin_cnt;
6803
6804 switch ((byte) stp->stmttyp) {
6805 case S_PROCA: case S_FORASSGN: case S_RHSDEPROCA: case S_NBPROCA: break;
6806 case S_IF:
6807 if (stp->stnxt != NULL) push_prpstmt(stp->stnxt);
6808 cxf_fixup_lstofsts_gotos(stp->st.sif.thenst, TRUE);
6809 if (stp->st.sif.elsest != NULL)
6810 cxf_fixup_lstofsts_gotos(stp->st.sif.elsest, TRUE);
6811 if (stp->stnxt != NULL) pop_prpstmt();
6812 break;
6813 case S_CASE:
6814 cxf_fixup_case_gotos(stp);
6815 break;
6816 case S_FOR:
6817 forp = stp->st.sfor;
6818 /* LOOKATME - is prep change needed */
6819 /* goto to for itself */
6820 cxf_fixup_loopend_goto(forp->forbody, stp);
6821 cxf_fixup_lstofsts_gotos(forp->forbody, FALSE);
6822 break;
6823 case S_FOREVER: case S_WHILE:
6824 cxf_fixup_loopend_goto(stp->st.swh.lpst, stp);
6825 cxf_fixup_lstofsts_gotos(stp->st.swh.lpst, FALSE);
6826 break;
6827 case S_REPEAT:
6828 /* allocate per inst. count storage */
6829 /* add loop back to repeat header */
6830 cxf_fixup_loopend_goto(stp->st.srpt.repst, stp);
6831 cxf_fixup_lstofsts_gotos(stp->st.srpt.repst, FALSE);
6832 break;
6833 case S_WAIT:
6834 if (stp->stnxt != NULL) push_prpstmt(stp->stnxt);
6835 cxf_fixup_lstofsts_gotos(stp->st.swait.lpst, TRUE);
6836 if (stp->stnxt != NULL) pop_prpstmt();
6837 break;
6838 case S_DELCTRL:
6839 /* first find end of decltrl chain */
6840 /* if no actoin statement - nothing to do */
6841 if (stp->st.sdc->actionst == NULL) break;
6842 for (stp2 = stp->st.sdc->actionst;; stp2 = stp2->st.sdc->actionst)
6843 {
6844 /* keep going until delay control has no action statement or */
6845 /* a non delay control action statement */
6846 /* case "#10 begin #20 ..." - is not delay control chain */
6847 if (stp2 == NULL || stp2->stmttyp != S_DELCTRL || stp2->st_unbhead)
6848 break;
6849 }
6850 /* ??? 11/10/99 - if (stp2 == NULL || stp2->stmttyp != S_GOTO) break; */
6851 if (stp2 == NULL) break;
6852 /* do the goto fix up */
6853 if (stp->stnxt != NULL) push_prpstmt(stp->stnxt);
6854 cxf_fixup_lstofsts_gotos(stp2, TRUE);
6855 /* only end of chain can have "real" statement */
6856 if (stp->stnxt != NULL) pop_prpstmt();
6857 break;
6858 case S_NAMBLK:
6859 /* for named block, no continuation - must be subthread except in func */
6860 __push_nbstk(stp);
6861 if (__processing_func)
6862 {
6863 /* for function no task scheduled - run inline to need goto to cont */
6864 /* at end */
6865 if (stp->stnxt != NULL) push_prpstmt(stp->stnxt);
6866 cxf_fixup_lstofsts_gotos(stp->st.snbtsk->tskst, TRUE);
6867 if (stp->stnxt != NULL) pop_prpstmt();
6868 }
6869 else
6870 {
6871 push_prpstmt((struct st_t *) NULL);
6872 cxf_fixup_lstofsts_gotos(stp->st.snbtsk->tskst, FALSE);
6873 pop_prpstmt();
6874 }
6875 __pop_nbstk();
6876 break;
6877 case S_UNBLK:
6878 if (stp->stnxt != NULL) push_prpstmt(stp->stnxt);
6879 /* need continuation for simple block */
6880 cxf_fixup_lstofsts_gotos(stp->st.sbsts, TRUE);
6881 if (stp->stnxt != NULL) pop_prpstmt();
6882 break;
6883 case S_UNFJ:
6884 push_prpstmt((struct st_t *) NULL);
6885 /* continuation inside these must be NULL, not stacked val */
6886 for (fji = 0;; fji++)
6887 {
6888 if ((fjstp = stp->st.fj.fjstps[fji]) == NULL) break;
6889 cxf_fixup_lstofsts_gotos(fjstp, FALSE);
6890 }
6891 pop_prpstmt();
6892 break;
6893 case S_DSABLE:
6894 /* inside function disables are gotos to next statement in up block */
6895 if (__processing_func) cxf_fixup_func_dsabl_gotos(stp);
6896 /* any other disable is task scheduled */
6897 break;
6898 case S_GOTO:
6899 /* goto can only be last */
6900 /* DBG remove --- */
6901 if (stp->stnxt != NULL) __misc_terr(__FILE__, __LINE__);
6902 /* --- */
6903 /* if loopend dest. set and nothing to do here */
6904 if (stp->lpend_goto) return;
6905 if (has_endgoto && __prpstk[__prpsti] != NULL)
6906 {
6907 stp->st.sgoto = __prpstk[__prpsti];
6908 /* in this rare case will not have line number - can it happen */
6909 }
6910 /* DBG remove -- */
6911 else __case_terr(__FILE__, __LINE__);
6912 /* -- */
6913 return;
6914 default:
6915 /* statements with no processing ignored thru here */
6916 break;
6917 }
6918 }
6919 /* block with no loop end goto returns thru here */
6920 }
6921
6922 /*
6923 * fixup all gotos in case stmt
6924 */
cxf_fixup_case_gotos(struct st_t * stp)6925 static void cxf_fixup_case_gotos(struct st_t *stp)
6926 {
6927 register struct csitem_t *csip;
6928 struct csitem_t *dflt_csip;
6929
6930 if (stp->stnxt != NULL) push_prpstmt(stp->stnxt);
6931
6932 dflt_csip = stp->st.scs.csitems;
6933 /* this will move up stack to add goto after ending stp */
6934 for (csip = dflt_csip->csinxt; csip != NULL; csip = csip->csinxt)
6935 {
6936 cxf_fixup_lstofsts_gotos(csip->csist, TRUE);
6937 }
6938
6939 /* this will move up stack to connect ending stnxt to next exec. place */
6940 if (dflt_csip->csist != NULL)
6941 cxf_fixup_lstofsts_gotos(dflt_csip->csist, TRUE);
6942 if (stp->stnxt != NULL) pop_prpstmt();
6943 }
6944
6945 /*
6946 *
6947 * inside function disable are like c continue and are just gotos
6948 */
cxf_fixup_func_dsabl_gotos(struct st_t * stp)6949 static void cxf_fixup_func_dsabl_gotos(struct st_t *stp)
6950 {
6951 register int32 i;
6952 struct expr_t *dsxp;
6953 struct sy_t *syp;
6954 struct task_t *dsatskp;
6955
6956 dsxp = stp->st.sdsable.dsablx;
6957 syp = dsxp->lu.sy;
6958 /* disable of func. indicated by nil next statmenet - use fcall stack */
6959 /* system function disable no next */
6960 if (syp->sytyp == SYM_F) return;
6961
6962 dsatskp = syp->el.etskp;
6963 /* know every named block when entered in function name block is stacked */
6964 for (i = __nbsti; i >= 0; i--)
6965 {
6966 if (__nbstk[i]->st.snbtsk == dsatskp)
6967 {
6968 /* this can be nil */
6969 stp->st.sdsable.func_nxtstp = __nbstk[i]->stnxt;
6970 return;
6971 }
6972 }
6973 /* know always enclosing, or will not get here - earlier error */
6974 __case_terr(__FILE__, __LINE__);
6975 }
6976
6977 /*
6978 * compile transform instance connections
6979 */
cmp_xform_inst_conns(void)6980 static void cmp_xform_inst_conns(void)
6981 {
6982 register int32 ii, pi;
6983 register struct inst_t *ip;
6984 int32 pnum;
6985
6986 if (__inst_mod->minum == 0) return;
6987
6988 for (ii = 0; ii < __inst_mod->minum; ii++)
6989 {
6990 ip = &(__inst_mod->minsts[ii]);
6991 pnum = ip->imsym->el.emdp->mpnum;
6992 for (pi = 0; pi < pnum; pi++)
6993 ip->ipins[pi] = mv1_expr_totab(ip->ipins[pi]);
6994 }
6995 }
6996
6997 /*
6998 * compile transform gates
6999 */
cmp_xform_gates(void)7000 static void cmp_xform_gates(void)
7001 {
7002 register int32 gi, pi;
7003 register struct gate_t *gp;
7004 int32 pnum;
7005
7006 for (gi = 0; gi < __inst_mod->mgnum; gi++)
7007 {
7008 gp = &(__inst_mod->mgates[gi]);
7009 /* fixex (moves expr) only delay expression forms */
7010 cmp_xform_delay(gp->g_delrep, gp->g_du);
7011 pnum = gp->gpnum;
7012 for (pi = 0; pi < pnum; pi++)
7013 gp->gpins[pi] = mv1_expr_totab(gp->gpins[pi]);
7014 }
7015 }
7016
7017 /*
7018 * compile transform cont assigns
7019 */
cmp_xform_contas(void)7020 static void cmp_xform_contas(void)
7021 {
7022 register int32 bi;
7023 register struct conta_t *cap, *pbcap;
7024 int32 cai;
7025
7026 for (cap = &(__inst_mod->mcas[0]), cai = 0; cai < __inst_mod->mcanum;
7027 cai++, cap++)
7028 {
7029 /* fixex (moves expr) only delay expression forms */
7030 cmp_xform_delay(cap->ca_delrep, cap->ca_du);
7031
7032 cap->lhsx = mv1_expr_totab(cap->lhsx);
7033 cap->rhsx = mv1_expr_totab(cap->rhsx);
7034 if (cap->ca_pb_sim)
7035 {
7036 for (bi = 0; bi < cap->lhsx->szu.xclen; bi++)
7037 {
7038 pbcap = &(cap->pbcau.pbcaps[bi]);
7039 pbcap->lhsx = mv1_expr_totab(pbcap->lhsx);
7040 pbcap->rhsx = mv1_expr_totab(pbcap->rhsx);
7041 }
7042 }
7043 }
7044 }
7045
7046 /*
7047 * compile transform tasks
7048 *
7049 * this also handle tasks - task specific task field (one for now)
7050 * set when statemetns tranformed
7051 *
7052 * task args do not have expr. forms
7053 */
cmp_xform_tasks(void)7054 static void cmp_xform_tasks(void)
7055 {
7056 register struct task_t *tskp;
7057
7058 /* finally check user tasks and function statements */
7059 for (tskp = __inst_mod->mtasks; tskp != NULL; tskp = tskp->tsknxt)
7060 {
7061 /* named blocks handled as statement where they occur */
7062 if (tskp->tsktyp == FUNCTION) __processing_func = TRUE;
7063 else if (tskp->tsktyp == TASK) __processing_func = FALSE;
7064 else continue;
7065 /* DBG remove -- */
7066 if (__debug_flg)
7067 {
7068 __dbg_msg("+++ xforming task %s\n", tskp->tsksyp->synam);
7069 }
7070 /* --- */
7071
7072 tskp->tskst = cmp_xform_lstofsts(tskp->tskst);
7073 }
7074 __prpsti = 0;
7075 __nbsti = -1;
7076 __prpstk[0] = NULL;
7077 for (tskp = __inst_mod->mtasks; tskp != NULL; tskp = tskp->tsknxt)
7078 {
7079 /* named blocks handled as statement where they occur */
7080 if (tskp->tsktyp == FUNCTION) __processing_func = TRUE;
7081 else if (tskp->tsktyp == TASK) __processing_func = FALSE;
7082 else continue;
7083
7084 cxf_fixup_lstofsts_gotos(tskp->tskst, FALSE);
7085 }
7086 /* DBG remove --- */
7087 if (__prpsti != 0) __misc_terr(__FILE__, __LINE__);
7088 /* --- */
7089 /* AIV 10/20/05 - must reset __processing_func to FALSE */
7090 __processing_func = FALSE;
7091 }
7092
7093 /*
7094 * compile transform specify sections expressions
7095 *
7096 * only called if module has specify section
7097 * maybe do not need to copy these expressions
7098 *
7099 * notice by here pathel's no longer expressions and expr. delay forms
7100 * illegal in paths (error before here)
7101 */
cmp_xform_specify(void)7102 static void cmp_xform_specify(void)
7103 {
7104 register struct spcpth_t *pthp;
7105 register struct tchk_t *tcp;
7106
7107 /* prepare paths */
7108 for (pthp = __inst_mod->mspfy->spcpths; pthp != NULL; pthp = pthp->spcpthnxt)
7109 {
7110 /* data source not really used in simulator but moved still */
7111 pthp->datasrcx = mv1_expr_totab(pthp->datasrcx);
7112 /* need to move this since condition eval can eliminates path */
7113 pthp->pthcondx = mv1_expr_totab(pthp->pthcondx);
7114 }
7115 /* copy these because needed at run time for timing check error msgs */
7116 for (tcp = __inst_mod->mspfy->tchks; tcp != NULL; tcp = tcp->tchknxt)
7117 {
7118 if (tcp->startxp != NULL) tcp->startxp = mv1_expr_totab(tcp->startxp);
7119
7120 if (tcp->startcondx != NULL)
7121 tcp->startcondx = mv1_expr_totab(tcp->startcondx);
7122
7123 if (tcp->chkxp != NULL) tcp->chkxp = mv1_expr_totab(tcp->chkxp);
7124
7125 if (tcp->chkcondx != NULL) tcp->chkcondx = mv1_expr_totab(tcp->chkcondx);
7126
7127 /* both sides of setuphold determined from hold half and both sides of*/
7128 /* recrem determined removal half */
7129 if (tcp->tc_supofsuphld || tcp->tc_recofrecrem) continue;
7130
7131 /* LOOKATME - are these needed since think spec delays can't be exprs */
7132 cmp_xform_delay(tcp->tc_delrep, tcp->tclim_du);
7133 if (tcp->tc_haslim2) cmp_xform_delay(tcp->tc_delrep2, tcp->tclim2_du);
7134 }
7135 }
7136
7137 /*
7138 * dump a module's statement table
7139 *
7140 * number to dump is arg so can use on partially built table
7141 */
__dmp_msttab(struct mod_t * mdp,int32 snum)7142 extern void __dmp_msttab(struct mod_t *mdp, int32 snum)
7143 {
7144 register int32 si;
7145 register struct st_t *stp;
7146 int32 si2;
7147
7148 if (mdp->mstnum == 0)
7149 {
7150 __dbg_msg("+++ module %s has no statements +++\n", mdp->msym->synam);
7151 return;
7152 }
7153 /* DBG remove --- */
7154 if (mdp->msttab == NULL) __misc_terr(__FILE__, __LINE__);
7155 /* --- */
7156
7157 __dbg_msg("+++ module %s has %d statements +++\n", mdp->msym->synam,
7158 mdp->mstnum);
7159 __push_wrkitstk(mdp, 0);
7160 for (si = 0; si < snum; si++)
7161 {
7162 stp = &(mdp->msttab[si]);
7163 if (stp->stmttyp == S_GOTO)
7164 {
7165 si2 = stp->st.sgoto - &(mdp->msttab[0]);
7166 __dbg_msg("GOTO STMT %d (goto %d):\n", si, si2);
7167 }
7168 else __dbg_msg("STMT %d:\n", si);
7169 __dmp_stmt(stdout, stp, FALSE);
7170 __dbg_msg("\n");
7171 }
7172 __pop_wrkitstk();
7173 }
7174
7175 /*
7176 * dump a module's expr table
7177 *
7178 * number to dump is arg so can use on partially built table
7179 */
__dmp_exprtab(struct mod_t * mdp,int32 xnum)7180 extern void __dmp_exprtab(struct mod_t *mdp, int32 xnum)
7181 {
7182 register int32 xi;
7183 register struct expr_t *xp;
7184 int32 lxi, rxi;
7185 char s1[RECLEN];
7186
7187 if (mdp->mexprnum == 0)
7188 {
7189 __dbg_msg("+++ module %s has no expressions +++\n", mdp->msym->synam);
7190 return;
7191 }
7192 /* DBG remove --- */
7193 if (mdp->mexprtab == NULL) __misc_terr(__FILE__, __LINE__);
7194 /* --- */
7195
7196 __dbg_msg("+++ module %s has %d expression nodes +++\n", mdp->msym->synam,
7197 mdp->mexprnum);
7198 __push_wrkitstk(mdp, 0);
7199 for (xi = 0; xi < xnum; xi++)
7200 {
7201 xp = &(mdp->mexprtab[xi]);
7202
7203 if (__isleaf(xp))
7204 {
7205 __dbg_msg("EXPR %d: leaf %s\n", xi, bld_opname(s1, xp));
7206 }
7207 else
7208 {
7209 lxi = rxi = -1;
7210 if (xp->lu.x != NULL) lxi = xp->lu.x - &(mdp->mexprtab[0]);
7211 if (xp->ru.x != NULL) rxi = xp->ru.x - &(mdp->mexprtab[0]);
7212 __dbg_msg("EXPR %d: op %s left [%d] right [%d]\n", xi,
7213 bld_opname(s1, xp), lxi, rxi);
7214 }
7215 }
7216 __pop_wrkitstk();
7217 }
7218
7219 /*
7220 * build name of an operator in a string
7221 *
7222 * LOOKATME - this is almost same as to_xndnam but here names in symbol
7223 * table and expridtab not filled
7224 */
bld_opname(char * s,struct expr_t * ndp)7225 static char *bld_opname(char *s, struct expr_t *ndp)
7226 {
7227 int32 wlen;
7228 word32 *ap, *bp;
7229 char s1[2*IDLEN], s2[RECLEN];
7230
7231 switch ((byte) ndp->optyp) {
7232 case ID:
7233 sprintf(s1, "IDENTIFIER: %s", ndp->lu.sy->synam);
7234 break;
7235 /* LOOKATME - can GLBREF occur here */
7236 case GLBREF:
7237 sprintf(s1, "XMR: %s", ndp->ru.grp->gnam);
7238 break;
7239 case NUMBER:
7240 case ISNUMBER:
7241 ap = &(__contab[ndp->ru.xvi]);
7242 wlen = wlen_(ndp->szu.xclen);
7243 bp = &ap[wlen];
7244 sprintf(s1, "NUMBER: %s", __xregab_tostr(s2, ap, bp, ndp->szu.xclen, ndp));
7245 break;
7246 case REALNUM: case ISREALNUM:
7247 /* just pass a for both here */
7248 /* LOOKATME - should just format as double */
7249 ap = &(__contab[ndp->ru.xvi]);
7250 sprintf(s1, "REAL: %s", __regab_tostr(s2, ap, ap, ndp->szu.xclen, BDBLE,
7251 FALSE));
7252 break;
7253 case OPEMPTY:
7254 strcpy(s, "<EMPTY>");
7255 return(s);
7256 case UNDEF:
7257 strcpy(s, "<EXPR END>");
7258 return(s);
7259 default:
7260 strcpy(s, __to_opname(ndp->optyp)); return(s);
7261 }
7262 if ((int32) strlen(s1) >= RECLEN - 1) s1[RECLEN - 1] = '\0';
7263 strcpy(s, s1);
7264 return(s);
7265 }
7266