1 /* Copyright (c) 1991-2007 Pragmatic C Software Corp. */
2
3 /*
4 This program is free software; you can redistribute it and/or modify it
5 under the terms of the GNU General Public License as published by the
6 Free Software Foundation; either version 2 of the License, or (at your
7 option) any later version.
8
9 This program is distributed in the hope that it will be useful, but
10 WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 General Public License for more details.
13
14 You should have received a copy of the GNU General Public License along
15 with this program; if not, write to the Free Software Foundation, Inc.,
16 59 Temple Place, Suite 330, Boston, MA, 02111-1307.
17
18 We are selling our new Verilog compiler that compiles to X86 Linux
19 assembly language. It is at least two times faster for accurate gate
20 level designs and much faster for procedural designs. The new
21 commercial compiled Verilog product is called CVC. For more information
22 on CVC visit our website at www.pragmatic-c.com/cvc.htm or contact
23 Andrew at avanvick@pragmatic-c.com
24
25 */
26
27
28 /*
29 * run time execution routines - lhs stores and gate evalution routines
30 */
31
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <string.h>
35
36 #ifdef __DBMALLOC__
37 #include "../malloc.h"
38 #endif
39
40 #include "v.h"
41 #include "cvmacros.h"
42
43 /* local prototypes */
44 static void do_qc_assign(struct st_t *, struct expr_t *, int32,
45 struct dceauxlstlst_t *);
46 static void do_qc_deassign(struct expr_t *);
47 static void do_qc_regforce(struct st_t *, struct expr_t *, int32,
48 struct dceauxlstlst_t *);
49 static void do_qc_regrelease(struct expr_t *);
50 static void do_qc_wireforce(struct st_t *, struct expr_t *, int32,
51 struct dceauxlstlst_t *);
52 static void do_1bit_wireforce(struct st_t *, struct net_t *, int32, int32,
53 int32, struct itree_t *itp, struct dceauxlst_t *);
54 static void do_qc_wirerelease(struct expr_t *);
55 static void assign_alllhs_bits(struct expr_t *, struct xstk_t *);
56 static void do_qc2_regstore(struct net_t *, struct qcval_t *,
57 struct xstk_t *);
58 static void do_qc2_wirestore(struct net_t *, struct qcval_t *,
59 struct xstk_t *);
60 static void trace_conta_assign(struct expr_t *, word32 *, word32 *);
61 static void sched_conta_assign(struct expr_t *, register word32 *,
62 register word32 *);
63 static void evtr_wdel_schd_1wirebit(register struct net_t *, register int32,
64 register word32, register word32, int32);
65 static void schd_1pthwirebit(register struct net_t *, register int32,
66 register word32, register word32);
67 static void evtr_schd_1pthwirebit(register struct net_t *, register int32,
68 register word32, register word32);
69 static void prt_dbgpthtrmsg(struct spcpth_t *, word64);
70 static void get_impth_del(word64 *, struct net_t *, int32, struct mipd_t *);
71 static void prt_dbgimpthtrmsg(struct net_t *, int32, word64, word64);
72 static void evtr_sched_mipd_nchg(struct net_t *, int32, struct mipd_t *);
73 static void cancel_1mipdev(struct tev_t *);
74 static i_tev_ndx reschedule_1mipd(struct net_t *, int32, i_tev_ndx,
75 word64, word64);
76 static void st_vecval(word32 *, int32, register word32 *, register word32 *);
77 static void chg_st_vecval(register word32 *, int32, register word32 *,
78 register word32 *);
79 static void schedassign_to_bit(struct net_t *, struct expr_t *,
80 struct expr_t *, register word32 *, register word32 *);
81 static void get_unknown_biti_val(struct net_t *, word32 *, word32 *, word32 *,
82 word32 *, word32);
83 static void setx_ifnotval(word32 *, word32 *, word32);
84 static void chg_lhsbsel(register word32 *, int32, word32);
85 static int32 forced_assign_to_psel(struct expr_t *, int32, int32,
86 struct net_t *, register word32 *, register word32 *);
87 static void schedassign_to_psel(struct expr_t *, register word32 *,
88 register word32 *);
89 static void ins_walign(register word32 *, register word32 *, register int32);
90 static void cp_dofs_wval(register word32 *, register word32 *, int32, int32);
91 static void chg_st_unpckpsel(word32 *, int32, int32, int32, register word32 *,
92 register word32 *);
93 static void chg_ins_wval(register word32 *, register int32, register word32 *,
94 register int32);
95 static int32 chg_ofs_cmp(register word32 *, register word32 *, int32, int32);
96 static void eval_wide_gate(struct gate_t *, struct xstk_t *);
97 static void st_psel(struct net_t *, int32, int32, register word32 *,
98 register word32 *);
99 static void chg_st_psel(struct net_t *, int32, int32, register word32 *,
100 register word32 *);
101
102 /* extern prototypes (maybe defined in this module) */
103 extern void __exec2_proc_assign(struct expr_t *, register word32 *,
104 register word32 *);
105 extern void __exec2_proc_concat_assign(struct expr_t *, word32 *, word32 *);
106 extern void __exec_qc_assign(struct st_t *, int32);
107 extern void __exec_qc_deassign(struct st_t *, int32);
108 extern void __exec_qc_wireforce(struct st_t *);
109 extern void __exec_qc_wirerelease(struct st_t *);
110 extern void __assign_qcaf(struct dcevnt_t *);
111 extern void __do_qc_store(struct net_t *, struct qcval_t *, int32);
112 extern void __xmrpush_refgrp_to_targ(struct gref_t *);
113 extern struct itree_t *__find_unrt_targitp(struct gref_t *,
114 register struct itree_t *, int32);
115 extern struct inst_t *__get_gref_giarr_ip(struct gref_t *, int32,
116 struct itree_t *);
117 extern int32 __match_push_targ_to_ref(word32, struct gref_t *);
118 extern void __exec_ca_concat(struct expr_t *, register word32 *,
119 register word32 *, int32);
120 extern void __stren_exec_ca_concat(struct expr_t *, byte *, int32);
121 extern void __exec_conta_assign(struct expr_t *, register word32 *,
122 register word32 *, int32);
123 extern int32 __correct_forced_newwireval(struct net_t *, word32 *, word32 *);
124 extern void __bld_forcedbits_mask(word32 *, struct net_t *);
125 extern void __pth_schd_allofwire(struct net_t *, register word32 *,
126 register word32 *, int32);
127 extern void __wdel_schd_allofwire(struct net_t *, register word32 *,
128 register word32 *, int32);
129 extern void __pth_stren_schd_allofwire(struct net_t *, register byte *, int32);
130 extern void __wdel_schd_1wirebit(register struct net_t *, register int32,
131 register word32, register word32, int32);
132 extern void __wdel_stren_schd_allofwire(struct net_t *, register byte *,
133 int32);
134 extern void __emit_path_distinform(struct net_t *, struct pthdst_t *,
135 word64 *);
136 extern void __emit_path_samewarn(struct net_t *, int32, struct tev_t *,
137 word64 *, char *, word32);
138 extern void __emit_path_pulsewarn(struct pthdst_t *, struct tev_t *,
139 word64 *, word64 *, char *, word32);
140 extern struct pthdst_t *__get_path_del(struct rngdwir_t *, int32, word64 *);
141 extern void __schedule_1wev(struct net_t *, int32, int32, word64, word64,
142 word32, i_tev_ndx *, int32);
143 extern void __reschedule_1wev(i_tev_ndx, word32, word64, word64, i_tev_ndx *);
144 extern void __cancel_1wev(struct tev_t *);
145 extern void __st_val(struct net_t *, register word32 *, register word32 *);
146 extern void __st_perinst_val(union pck_u, int32, register word32 *,
147 register word32 *);
148 extern void __chg_st_val(register struct net_t *, word32 *, word32 *);
149 extern void __assign_to_bit(struct net_t *, struct expr_t *, struct expr_t *,
150 register word32 *, register word32 *);
151 extern void __assign_to_arr(struct net_t *, struct expr_t *, struct expr_t *,
152 register word32 *, register word32 *);
153 extern int32 __forced_inhibit_bitassign(struct net_t *, struct expr_t *,
154 struct expr_t *);
155 extern void __stren_schedorassign_unknown_bit(struct net_t *, word32, int32);
156 extern void __schedorassign_unknown_bit(struct net_t *np, word32 av,
157 word32 bv, int32 schd_wire);
158 extern void __lhsbsel(register word32 *, register int32, word32);
159 extern void __chg_st_bit(struct net_t *, int32, register word32,
160 register word32);
161 extern void __st_bit(struct net_t *, int32, register word32, register word32);
162 extern void __st_arr_val(union pck_u, int32, int32, int32, register word32 *,
163 register word32 *);
164 extern void __chg_st_arr_val(union pck_u, int32, int32, int32,
165 register word32 *, register word32 *);
166 extern void __assign_to_psel(struct expr_t *, int32, int32, struct net_t *,
167 register word32 *, register word32 *);
168 extern void __lhspsel(register word32 *, register int32, register word32 *,
169 register int32);
170 extern void __cp_sofs_wval(register word32 *, register word32 *,
171 register int32, register int32);
172 extern void __chg_lhspsel(register word32 *, register int32,
173 register word32 *, register int32);
174 extern void __sizchgxs(register struct xstk_t *, int32);
175 extern void __narrow_to1bit(register struct xstk_t *);
176 extern void __narrow_to1wrd(register struct xstk_t *);
177 extern void __fix_widened_tozs(struct xstk_t *, int32);
178 extern void __fix_widened_toxs(register struct xstk_t *, int32);
179 extern void __strenwiden_sizchg(struct xstk_t *, int32);
180 extern int32 __eval_logic_gate(struct gate_t *, word32, int32 *);
181 extern void __ld_gate_wide_val(word32 *, word32 *, word32 *, int32);
182 extern int32 __eval_bufif_gate(register struct gate_t *, word32, int32 *);
183 extern void __eval_nmos_gate(register word32);
184 extern void __eval_rnmos_gate(register word32);
185 extern void __eval_pmos_gate(register word32);
186 extern void __eval_rpmos_gate(register word32);
187 extern void __eval_cmos_gate(struct gate_t *);
188 extern char *__to_gassign_str(char *, struct expr_t *);
189 extern int32 __eval_udp(register struct gate_t *, word32, int32 *, int32);
190 extern void __grow_xstk(void);
191 extern void __chg_xstk_width(struct xstk_t *, int32);
192 extern void __rhspsel(register word32 *, register word32 *, register int32,
193 register int32);
194 extern char *__to_idnam(struct expr_t *);
195 extern char *__to_wtnam(char *, struct net_t *);
196 extern char *__bld_lineloc(char *, word32, int32);
197 extern char *__msg2_blditree(char *, struct itree_t *);
198 extern char *__to_timstr(char *, word64 *);
199 extern char *__my_malloc(int32);
200 extern void __my_free(char *, int32);
201 extern void __find_call_force_cbs(struct net_t *, int32);
202 extern void __cb_all_rfs(struct net_t *, int32, int32);
203 extern void __find_call_rel_cbs(struct net_t *, int32);
204 extern char *__msgexpr_tostr(char *, struct expr_t *);
205 extern int32 __get_const_bselndx(register struct expr_t *);
206 extern void __assign_1mdrwire(register struct net_t *);
207 extern struct xstk_t *__eval_assign_rhsexpr(register struct expr_t *,
208 register struct expr_t *);
209 extern char *__xregab_tostr(char *, word32 *, word32 *, int32,
210 struct expr_t *);
211 extern char *__regab_tostr(char *, word32 *, word32 *, int32, int32, int32);
212 extern void __st_standval(register byte *, register struct xstk_t *, byte);
213 extern char *__st_regab_tostr(char *, byte *, int32);
214 extern int32 __vval_is1(register word32 *, int32);
215 extern int32 __wide_vval_is0(register word32 *, int32);
216 extern void __ld_wire_val(register word32 *, register word32 *,
217 struct net_t *);
218 extern void __ld_bit(register word32 *, register word32 *,
219 register struct net_t *, int32);
220 extern void __add_nchglst_el(register struct net_t *);
221 extern void __add_select_nchglst_el(register struct net_t *, register int32,
222 register int32);
223 extern void __wakeup_delay_ctrls(register struct net_t *, register int32,
224 register int32);
225 extern void __get_del(register word64 *, register union del_u, word32);
226 extern char *__to_evtrwnam(char *, struct net_t *, int32, int32,
227 struct itree_t *);
228 extern char *__to_vnam(char *, word32, word32);
229 extern int32 __em_suppr(int32);
230 extern void __insert_event(register i_tev_ndx);
231 extern int32 __comp_ndx(register struct net_t *, register struct expr_t *);
232 extern int32 __get_arrwide(struct net_t *);
233 extern void __ld_psel(register word32 *, register word32 *,
234 register struct net_t *, int32, int32);
235 extern struct xstk_t *__eval2_xpr(register struct expr_t *);
236 extern word32 __wrd_redxor(word32);
237 extern void __lunredand(int32 *, int32 *, word32 *, word32 *, int32);
238 extern void __lunredor(int32 *, int32 *, word32 *, word32 *, int32);
239 extern void __lunredxor(int32 *, int32 *, word32 *, word32 *, int32);
240 extern char *__gstate_tostr(char *, struct gate_t *, int32);
241 extern word32 __comb_1bitsts(word32, register word32, register word32);
242 extern struct xstk_t *__ndst_eval_xpr(struct expr_t *);
243 extern void __add_dmpv_chglst_el(struct net_t *);
244 extern void __qc_tran_wireforce(struct net_t *, int32, int32, int32,
245 struct itree_t *, struct st_t *);
246 extern void __qc_tran_wirerelease(struct net_t *, int32, int32,
247 struct itree_t *, struct expr_t *lhsx);
248 extern void __eval_tran_1bit(register struct net_t *, register int32);
249 extern char *__to_ptnam(char *, word32);
250 extern int32 __eval_1wide_gate(struct gate_t *, int32);
251 extern void __ins_wval(register word32 *, register int32, register word32 *,
252 int32);
253 extern void __rem_stren(word32 *ap, word32 *bp, byte *, int32);
254 extern void __get_qc_wirrng(struct expr_t *, struct net_t **, int32 *,
255 int32 *, struct itree_t **);
256 extern void __dcelst_on(struct dceauxlst_t *);
257 extern void __dcelst_off(struct dceauxlst_t *);
258
259 extern void __tr_msg(char *, ...);
260 extern void __gfwarn(int32, word32, int32, char *, ...);
261 extern void __sgfwarn(int32, char *, ...);
262 extern void __pv_warn(int32, char *,...);
263 extern void __gfinform(int32, word32, int32, char *, ...);
264 extern void __dbg_msg(char *, ...);
265 extern void __sgfinform(int32, char *, ...);
266 extern void __gferr(int32, word32, int32, char *, ...);
267 extern void __arg_terr(char *, int32);
268 extern void __case_terr(char *, int32);
269 extern void __misc_terr(char *, int32);
270
271 extern word32 __masktab[];
272
273 /*
274 * LHS ASSIGN ROUTINES
275 */
276
277 /*
278 * PROCEDURAL ASSIGNMENT ROUTINES
279 */
280
281 /*
282 * immediate procedural assign from stacked value to lhs expr. xlhs
283 * if assign changes, sets lhs change to T
284 * know rhs side width same as lhs destination
285 *
286 * notice every path through here must add the net change element - called
287 * this routine must be only possible way lhs procedural reg can be changed
288 */
__exec2_proc_assign(struct expr_t * xlhs,register word32 * ap,register word32 * bp)289 extern void __exec2_proc_assign(struct expr_t *xlhs, register word32 *ap,
290 register word32 *bp)
291 {
292 register struct net_t *np;
293 int32 nd_itpop, ri1, ri2;
294 struct expr_t *idndp, *ndx1;
295 struct gref_t *grp;
296
297 nd_itpop = FALSE;
298 switch ((byte) xlhs->optyp) {
299 case GLBREF:
300 grp = xlhs->ru.grp;
301 __xmrpush_refgrp_to_targ(grp);
302 nd_itpop = TRUE;
303 /* FALLTHRU */
304 case ID:
305 np = xlhs->lu.sy->el.enp;
306 if (np->frc_assgn_allocated && reg_fr_inhibit_(np)) goto chk_itpop;
307
308 /* this add the changed wire to nchglst if needed */
309 /* SJM 03/15/01 - change to fields in net record */
310 if (np->nchg_nd_chgstore) __chg_st_val(np, ap, bp);
311 else __st_val(np, ap, bp);
312
313 chk_itpop:
314 if (nd_itpop) __pop_itstk();
315 break;
316 case LSB:
317 /* for now first determine if array index */
318 idndp = xlhs->lu.x;
319 ndx1 = xlhs->ru.x;
320 np = idndp->lu.sy->el.enp;
321 /* notice can never assign or force arrays */
322 if (np->n_isarr) __assign_to_arr(np, idndp, ndx1, ap, bp);
323 else
324 {
325 if (np->frc_assgn_allocated)
326 {
327 if (idndp->optyp == GLBREF)
328 {
329 grp = idndp->ru.grp;
330 __xmrpush_refgrp_to_targ(grp);
331 nd_itpop = TRUE;
332 }
333 if (reg_fr_inhibit_(np)) goto chk_itpop;
334 /* assign to bit handles own grep itree stack pushing */
335 if (nd_itpop) __pop_itstk();
336 }
337 __assign_to_bit(np, idndp, ndx1, ap, bp);
338 }
339 break;
340 case PARTSEL:
341 idndp = xlhs->lu.x;
342 np = idndp->lu.sy->el.enp;
343 if (np->frc_assgn_allocated)
344 {
345 if (idndp->optyp == GLBREF)
346 { grp = idndp->ru.grp; __xmrpush_refgrp_to_targ(grp); nd_itpop = TRUE; }
347 /* do not assign if assign or force pending on inst. of wire */
348 if (reg_fr_inhibit_(np)) goto chk_itpop;
349 /* assign to psel handles own grep itree stack pushing */
350 if (nd_itpop) __pop_itstk();
351 }
352 ri1 = (int32) __contab[xlhs->ru.x->lu.x->ru.xvi];
353 ri2 = (int32) __contab[xlhs->ru.x->ru.x->ru.xvi];
354 __assign_to_psel(idndp, ri1, ri2, np, ap, bp);
355 break;
356 case LCB:
357 /* know evaluated rhs (maybe concatenate) in top of stack reg. */
358 __exec2_proc_concat_assign(xlhs, ap, bp);
359 break;
360 default: __case_terr(__FILE__, __LINE__);
361 }
362 }
363
364 /*
365 * execute an assignment or schedule to a concatentate (know non strength)
366 * rhs value on stack apportioned into parts of concatenate
367 * know xsp width same as lhs destination
368 * caller must pop stack on return
369 */
__exec2_proc_concat_assign(struct expr_t * xlhs,word32 * ap,word32 * bp)370 extern void __exec2_proc_concat_assign(struct expr_t *xlhs, word32 *ap, word32 *bp)
371 {
372 register struct expr_t *catndp, *catlhsx;
373 register int32 catxlen;
374 int32 bi1;
375 struct xstk_t *catxsp;
376
377 /* do lhs concatenate assigns from left to right */
378 for (catndp = xlhs->ru.x; catndp != NULL; catndp = catndp->ru.x)
379 {
380 catlhsx = catndp->lu.x;
381 catxlen = catlhsx->szu.xclen;
382 /* bi1 is low bit of rhs part select */
383 /* length for catndp is distance from high bit of section to right end */
384 bi1 = catndp->szu.xclen - catxlen;
385
386 /* --- DBG remove
387 if (__debug_flg)
388 __dbg_msg(
389 "+++lhs proc: total cat wid=%u, low index=%d, wid=%u, remaining wid=%u\n",
390 xlhs->szu.xclen, bi1, catxlen, catndp->szu.xclen);
391 --- */
392
393 /* select current pos. right width piece from rhs and put on tos reg */
394 /* notice assignment always same width */
395 push_xstk_(catxsp, catxlen);
396 if (catxlen == 1)
397 { catxsp->ap[0] = rhsbsel_(ap, bi1); catxsp->bp[0] = rhsbsel_(bp, bi1); }
398 else
399 {
400 __rhspsel(catxsp->ap, ap, bi1, catxlen);
401 __rhspsel(catxsp->bp, bp, bi1, catxlen);
402 }
403
404 /* know reals illegal in concatenates (rhs/lhs components of) */
405 /* also nested lhs concatenates illegal - will never appear */
406 /* notice this is part of immediate assign must not inc assign counter */
407 /* and know no nested lhs concatenates */
408 __exec2_proc_assign(catlhsx, catxsp->ap, catxsp->bp);
409 __pop_xstk();
410 }
411 }
412
413 /*
414 * QUASI CONTINUOUS REG ASSIGN/DEASSIGN/FORCE/RELEASE ROUTINES
415 */
416
417 /*
418 * exec a quasi-continuous assign or force of register (same as qc assign)
419 * this is for both reg force and reg assign
420 */
__exec_qc_assign(struct st_t * stp,int32 is_force)421 extern void __exec_qc_assign(struct st_t *stp, int32 is_force)
422 {
423 register struct expr_t *catndp;
424 register struct dceauxlstlst_t *dcllp;
425 int32 catxlen, bi1;
426 struct expr_t *lhsx, *catlhsx;
427
428 /* first evaluate rhs */
429 lhsx = stp->st.sqca->qclhsx;
430 /* only possibilities are concat and ID - list of ptrs to peri/bit lists */
431 dcllp = stp->st.sqca->rhs_qcdlstlst;
432 if (lhsx->optyp != LCB)
433 {
434 if (is_force) do_qc_regforce(stp, lhsx, -1, dcllp);
435 else do_qc_assign(stp, lhsx, -1, dcllp);
436 }
437 else
438 {
439 /* concatenate case know lhs full wire - tricky extractions of rhs */
440 for (catndp = lhsx->ru.x; catndp != NULL; catndp = catndp->ru.x,
441 dcllp = dcllp->dcelstlstnxt)
442 {
443 catlhsx = catndp->lu.x;
444 catxlen = catlhsx->szu.xclen;
445 bi1 = catndp->szu.xclen - catxlen;
446 if (is_force) do_qc_regforce(stp, catlhsx, bi1, dcllp);
447 else do_qc_assign(stp, catlhsx, bi1, dcllp);
448 }
449 }
450 }
451
452 /*
453 * execute a quasi-continuous deassign
454 * inverse of assign
455 */
__exec_qc_deassign(struct st_t * stp,int32 is_force)456 extern void __exec_qc_deassign(struct st_t *stp, int32 is_force)
457 {
458 register struct expr_t *catndp;
459 struct expr_t *lhsx, *catlhsx;
460
461 /* SJM 07/19/02 - was wrongly accessing qconta not qcontdea record */
462 lhsx = stp->st.sqcdea.qcdalhs;
463 /* only possibilities are concat and ID */
464 if (lhsx->optyp != LCB)
465 {
466 if (is_force) do_qc_regrelease(lhsx);
467 else do_qc_deassign(lhsx);
468 }
469 else
470 {
471 /* concatenate case know lhs full wire - tricky extractions of rhs */
472 for (catndp = lhsx->ru.x; catndp != NULL; catndp = catndp->ru.x)
473 {
474 catlhsx = catndp->lu.x;
475 if (is_force) do_qc_regrelease(catlhsx);
476 else do_qc_deassign(catlhsx);
477 }
478 }
479 }
480
481 /*
482 * exec quasi continuous assign for one expr in one inst.
483 *
484 * know lhs always entire register - no assign for wires - lhs can be xmr
485 *
486 * if active force do nothing but save assign rhs expr. so if force released
487 * assign expr. evaluated and activated
488 *
489 * SJM 06/23/02 - new qcaf algorithm build qcaf lists during prep and moves
490 * to and from stmt sqca fields and turns on/off when needed
491 */
do_qc_assign(struct st_t * qcastp,struct expr_t * lhsx,int32 rhsbi,struct dceauxlstlst_t * dcllp)492 static void do_qc_assign(struct st_t *qcastp, struct expr_t *lhsx, int32 rhsbi,
493 struct dceauxlstlst_t *dcllp)
494 {
495 int32 nd_itpop, stmt_inum;
496 struct net_t *np;
497 struct gref_t *grp;
498 struct qcval_t *frc_qcp, *assgn_qcp;
499
500 /* assign to lhs itree loc. */
501 nd_itpop = FALSE;
502 /* SJM 05/23/03 - need to access stmt info from original inum if XMR */
503 stmt_inum = __inum;
504 if (lhsx->optyp == GLBREF)
505 { grp = lhsx->ru.grp; __xmrpush_refgrp_to_targ(grp); nd_itpop = TRUE; }
506 else if (lhsx->optyp != ID) __case_terr(__FILE__, __LINE__);
507 np = lhsx->lu.sy->el.enp;
508
509 /* for reg var 2 records always allocated, first is force and 2nd assign */
510 frc_qcp = &(np->nu2.qcval[2*__inum]);
511 assgn_qcp = &(np->nu2.qcval[2*__inum + 1]);
512 /* if active force of wire just fill assign qcval so when force removed */
513 /* assign becomes active - but do not make qc assign active */
514 if (frc_qcp->qc_active)
515 {
516 /* notice this can replace other over-ridden */
517 assgn_qcp->qc_overridden = TRUE;
518 assgn_qcp->qcstp = qcastp;
519 assgn_qcp->qcrhsbi = rhsbi;
520 assgn_qcp->qclhsbi = -1;
521 /* need lhs target inst. loc including non xmr same for change assign */
522 assgn_qcp->lhsitp = __inst_ptr;
523 /* assign group of dces (usually 1) - if lhs concat this lhs expr's one */
524 /* but don't turn on yet */
525 /* SJM 05/23/03 - dcllp is linked off stmt and has stmt instances */
526 assgn_qcp->qcdcep = dcllp->dcelsttab[stmt_inum];
527
528 if (nd_itpop) __pop_itstk();
529
530 if (__debug_flg && __ev_tracing)
531 {
532 char s1[RECLEN];
533
534 /* context for trace message is stmt not possible xmr */
535 __tr_msg(
536 ":: quasi-continuous assign to reg %s at %s in %s now %s - no effect active force\n",
537 __to_idnam(lhsx), __bld_lineloc(__xs, (word32) __sfnam_ind, __slin_cnt),
538 __msg2_blditree(__xs2, __inst_ptr), __to_timstr(s1, &__simtime));
539 }
540 return;
541 }
542 /* if active assign, deactivate before setting new - know fields replaced */
543 if (assgn_qcp->qc_active)
544 {
545 assgn_qcp->qc_active = FALSE;
546 /* turn on dces after doing store if rhs dces */
547 /* SJM 08/18/02 - bug - this was turning on but must turn off */
548 if (assgn_qcp->qcdcep != NULL) __dcelst_off(assgn_qcp->qcdcep);
549 }
550
551 /* SJM 07/19/02 - was not making assign active */
552 assgn_qcp->qc_active = TRUE;
553 /* DBG remove - can't be over-ridden by force if get here */
554 if (assgn_qcp->qc_overridden) __misc_terr(__FILE__, __LINE__);
555 /* --- */
556 /* but still save in case reg var force removed */
557 assgn_qcp->qcstp = qcastp;
558 assgn_qcp->qcrhsbi = rhsbi;
559 assgn_qcp->qclhsbi = -1;
560 /* do store and build dces in ref. itree loc. */
561 assgn_qcp->lhsitp = __inst_ptr;
562 /* assign group of dces (usually 1) - if lhs concat this lhs expr's one */
563 assgn_qcp->qcdcep = dcllp->dcelsttab[stmt_inum];
564
565 if (nd_itpop) __pop_itstk();
566
567 if (__debug_flg && __ev_tracing)
568 {
569 char s1[RECLEN];
570
571 __tr_msg(":: quasi-continuous assign to reg %s at %s in %s now %s\n",
572 __to_idnam(lhsx), __bld_lineloc(__xs, (word32) __sfnam_ind, __slin_cnt),
573 __msg2_blditree(__xs2, __inst_ptr), __to_timstr(s1, &__simtime));
574 }
575
576 /* these routines need ref. itree loc - they set right context for xmr */
577 __do_qc_store(np, assgn_qcp, TRUE);
578
579 /* turn on dces after doing store if rhs dces */
580 if (assgn_qcp->qcdcep != NULL) __dcelst_on(assgn_qcp->qcdcep);
581
582 /* FIXME ??? - assign callbacks go here */
583 }
584
585 /*
586 * do the quasi continuous deassign for one lhs expr.
587 *
588 * know lhs always entire register and cannot be wire
589 * notice lhs here can be xmr
590 * sematics is deassign leaves val until next assgn which now happens execs
591 * value of wire not changed here
592 * for xmr (reg only possible), must exec in target itree loc.
593 */
do_qc_deassign(struct expr_t * lhsx)594 static void do_qc_deassign(struct expr_t *lhsx)
595 {
596 int32 nd_itpop;
597 struct net_t *np;
598 struct gref_t *grp;
599 struct qcval_t *frc_qcp, *assgn_qcp;
600 char s1[RECLEN];
601
602 strcpy(s1, "");
603 /* must work in itree loc. of lhs if xmr */
604 nd_itpop = FALSE;
605 if (lhsx->optyp == GLBREF)
606 { grp = lhsx->ru.grp; __xmrpush_refgrp_to_targ(grp); nd_itpop = TRUE; }
607 else if (lhsx->optyp != ID) __case_terr(__FILE__, __LINE__);
608 np = lhsx->lu.sy->el.enp;
609
610 /* for reg var 2 records always allocated, first is force and 2nd assign */
611 frc_qcp = &(np->nu2.qcval[2*__inum]);
612 assgn_qcp = &(np->nu2.qcval[2*__inum + 1]);
613 if (!assgn_qcp->qc_active && !assgn_qcp->qc_overridden)
614 {
615 __sgfinform(462,
616 "attempted deassign of reg %s in instance %s failed - not assigned",
617 __to_idnam(lhsx), __msg2_blditree(__xs, __inst_ptr));
618 goto done;
619 }
620 assgn_qcp->qc_active = FALSE;
621 /* turn off dces but do not empty qcval rec - will get refilled if needed */
622 /* if over-ridden were not turned so do not need to turn off */
623 if (!assgn_qcp->qc_overridden)
624 {
625 if (assgn_qcp->qcdcep != NULL) __dcelst_off(assgn_qcp->qcdcep);
626 }
627 assgn_qcp->qcdcep = NULL;
628 assgn_qcp->qc_overridden = FALSE;
629 __assign_active = FALSE;
630 /* LOOKATME - extra work to set force flag only used for tracing */
631 if (frc_qcp->qc_active)
632 {
633 /* dce list can be empty if forced to constant */
634 __force_active = TRUE;
635 strcpy(s1, " force active");
636 }
637 if (__debug_flg && __ev_tracing)
638 {
639 char s2[RECLEN];
640
641 /* messages needs itree context of stmt */
642 if (nd_itpop) __pop_itstk();
643
644 __tr_msg(":: quasi-continuous deassign of reg %s%s at %s in %s now %s\n",
645 __to_idnam(lhsx), s1, __bld_lineloc(__xs, (word32) __sfnam_ind,
646 __slin_cnt), __msg2_blditree(__xs2, __inst_ptr),
647 __to_timstr(s2, &__simtime));
648
649 return;
650 }
651
652 done:
653 if (nd_itpop) __pop_itstk();
654 }
655
656 /*
657 * do the quasi continuous force for reg variables
658 *
659 * know lhs always entire register
660 * lhs here can be xmr
661 * force of entire reg only overrides possible active reg assign
662 *
663 * SJM 06/15/02 - new algorithm leaves dce list always linked on, turns on/off
664 * when active and keeps one different qc dce lists for each lhs concat el
665 * so can reg release each reg listed in lhs concats separately
666 */
do_qc_regforce(struct st_t * qcastp,struct expr_t * lhsx,int32 rhsbi,struct dceauxlstlst_t * dcllp)667 static void do_qc_regforce(struct st_t *qcastp, struct expr_t *lhsx,
668 int32 rhsbi, struct dceauxlstlst_t *dcllp)
669 {
670 int32 nd_itpop, stmt_inum;
671 struct net_t *np;
672 struct gref_t *grp;
673 struct qcval_t *assgn_qcp, *frc_qcp;
674 struct itree_t *itp;
675 char s1[RECLEN];
676
677 strcpy(s1, "");
678 /* if lhs xmr, change to target since forcing in target instance */
679 nd_itpop = FALSE;
680 /* for XMR need stmt context inum for getting dcellst linked on stmt */
681 stmt_inum = __inum;
682 itp = NULL;
683 if (lhsx->optyp == GLBREF)
684 {
685 grp = lhsx->ru.grp;
686 __xmrpush_refgrp_to_targ(grp);
687 nd_itpop = TRUE;
688 }
689 else if (lhsx->optyp != ID) __case_terr(__FILE__, __LINE__);
690 np = lhsx->lu.sy->el.enp;
691
692 /* for reg var 2 records always allocated, first is force and 2nd assign */
693 frc_qcp = &(np->nu2.qcval[2*__inum]);
694 assgn_qcp = &(np->nu2.qcval[2*__inum + 1]);
695
696 /* case 1, force pending */
697 if (frc_qcp->qc_active)
698 {
699 strcat(s1, " replace force");
700 /* turn off current (if lhs concat many) list of rhs dces */
701 if (frc_qcp->qcdcep != NULL) __dcelst_off(frc_qcp->qcdcep);
702 frc_qcp->qcdcep = NULL;
703 frc_qcp->qc_active = FALSE;
704 goto setup_force;
705 }
706
707 /* if qc assign pending, inactivate but leave ptrs and set bit */
708 if (assgn_qcp->qc_active)
709 {
710 /* turn off the assign list - will be turned on if reg force released */
711 if (assgn_qcp->qcdcep != NULL) __dcelst_off(assgn_qcp->qcdcep);
712 assgn_qcp->qc_active = FALSE;
713 assgn_qcp->qc_overridden = TRUE;
714 strcat(s1, " override assign");
715 }
716 setup_force:
717 /* setup the new force */
718 frc_qcp->qc_active = TRUE;
719 frc_qcp->qcstp = qcastp;
720 frc_qcp->qcrhsbi = rhsbi;
721 frc_qcp->qclhsbi = -1;
722 frc_qcp->lhsitp = __inst_ptr;
723 /* SJM 06/23/02 - add qc dcep list (right one if lhs cat) to qcval rec */
724 /* one needed for each lhs element because reg release can do separately */
725 frc_qcp->qcdcep = dcllp->dcelsttab[stmt_inum];
726
727 if (nd_itpop) { itp = __inst_ptr; __pop_itstk(); }
728 if (__debug_flg && __ev_tracing)
729 {
730 char s2[RECLEN];
731
732 /* message needs itree context of stmt not lhs if xmr */
733 __tr_msg(":: quasi-continuous force of reg %s%s at %s in %s now %s\n",
734 __to_idnam(lhsx), s1, __bld_lineloc(__xs, (word32) __sfnam_ind,
735 __slin_cnt), __msg2_blditree(__xs2, __inst_ptr),
736 __to_timstr(s2, &__simtime));
737 }
738
739 /* start force by storing rhs of force - dces will cause dce chges */
740 /* these routines need ref itree loc not lhs xmr */
741 __do_qc_store(np, frc_qcp, TRUE);
742
743 /* SJM 07/19/02 - must not turn on any rhs dces until store done */
744 /* turn on reg force for this set of dces if non constant rhs */
745 if (frc_qcp->qcdcep != NULL) __dcelst_on(frc_qcp->qcdcep);
746
747 /* but these need to run in itree context of lhs */
748 if (nd_itpop) __push_itstk(itp);
749
750 /* notice can have both many wire specific and many all cbs */
751 if (__num_vpi_force_cbs > 0) __find_call_force_cbs(np, -1);
752 if (__vpi_force_cb_always) __cb_all_rfs(np, -1, TRUE);
753
754 if (nd_itpop) __pop_itstk();
755 }
756
757 /*
758 * do a quasi continuous release for a reg lhs (lvalue)
759 *
760 * know lhs always entire register
761 * lhs here can be xmr
762 * releasing reg with pending active assign re-establishes assign
763 *
764 * SJM 06/23/02 - new qcaf algorithm build qcaf lists during prep and moves
765 * to and from stmt sqca fields and turns on/off when needed
766 */
do_qc_regrelease(struct expr_t * lhsx)767 static void do_qc_regrelease(struct expr_t *lhsx)
768 {
769 int32 nd_itpop;
770 struct net_t *np;
771 struct gref_t *grp;
772 struct qcval_t *assgn_qcp, *frc_qcp;
773 struct itree_t *itp;
774 char s1[RECLEN];
775
776 strcpy(s1, "");
777 /* must release reg in itree loc. of lhs if xmr */
778 nd_itpop = FALSE;
779 itp = NULL;
780 if (lhsx->optyp == GLBREF)
781 { grp = lhsx->ru.grp; __xmrpush_refgrp_to_targ(grp); nd_itpop = TRUE; }
782 else if (lhsx->optyp != ID) __case_terr(__FILE__, __LINE__);
783 np = lhsx->lu.sy->el.enp;
784
785 /* for reg var 2 records always allocated, first is force and 2nd assign */
786 frc_qcp = &(np->nu2.qcval[2*__inum]);
787 assgn_qcp = &(np->nu2.qcval[2*__inum + 1]);
788
789 /* if no force, nothing to do */
790 if (!frc_qcp->qc_active)
791 {
792 /* message here needs lhs xmr context */
793 __sgfinform(465,
794 "attempted release of reg %s in instance %s failed - never forced",
795 __to_idnam(lhsx), __msg2_blditree(__xs, __inst_ptr));
796 if (nd_itpop) __pop_itstk();
797 return;
798 }
799 frc_qcp->qc_active = FALSE;
800 /* turn off active force dces */
801 if (frc_qcp->qcdcep != NULL) __dcelst_off(frc_qcp->qcdcep);
802 frc_qcp->qcdcep = NULL;
803 __force_active = FALSE;
804
805 /* if pending but inactive assign - must reactivate it */
806 if (assgn_qcp->qc_overridden)
807 {
808 /* these need to run in itree context of stmt not lhs if xmr */
809 if (nd_itpop) { itp = __inst_ptr; __pop_itstk(); }
810
811 __do_qc_store(np, assgn_qcp, TRUE);
812 /* build the QCAF dcelst - this must be build in rhs ref. itree loc. */
813
814 assgn_qcp->qc_active = TRUE;
815 assgn_qcp->qc_overridden = FALSE;
816 /* turn stored last dce list on */
817 /* FIXME - this is never first time */
818 if (assgn_qcp->qcdcep != NULL) __dcelst_on(assgn_qcp->qcdcep);
819 __assign_active = TRUE;
820 strcpy(s1, " reactivating assign");
821 }
822 else { if (nd_itpop) { itp = __inst_ptr; __pop_itstk(); } }
823
824 /* message need stmt itree context */
825 if (__debug_flg && __ev_tracing)
826 {
827 char s2[RECLEN];
828
829 __tr_msg(":: quasi-continuous release of reg %s%s at %s in %s now %s\n",
830 __to_idnam(lhsx), s1, __bld_lineloc(__xs, (word32) __sfnam_ind,
831 __slin_cnt), __msg2_blditree(__xs2, __inst_ptr),
832 __to_timstr(s2, &__simtime));
833 }
834
835 /* these must run in lhs itree context for xmr */
836 if (nd_itpop) __push_itstk(itp);
837
838 /* notice can have both many wire specific and many all cbs */
839 if (__num_vpi_rel_cbs > 0) __find_call_rel_cbs(np, -1);
840 if (__vpi_rel_cb_always) __cb_all_rfs(np, -1, FALSE);
841
842 if (nd_itpop) __pop_itstk();
843 }
844
845 /*
846 * QUASI CONTINUOUS WIRE FORCE/RELEASE ROUTINES
847 */
848
849 /*
850 * execute a quasi-continuous force on a wire
851 * possibilities here are wire, constant bit select, part select
852 * also concat of above
853 * wire must be scalared and everything decomposed to bits
854 */
__exec_qc_wireforce(struct st_t * stp)855 extern void __exec_qc_wireforce(struct st_t *stp)
856 {
857 register struct expr_t *catndp;
858 register struct dceauxlstlst_t *dcllp;
859 int32 catxlen, bi1;
860 struct expr_t *lhsx, *catlhsx;
861
862 /* DBG remove --
863 struct itree_t *sav_itp = __inst_ptr;
864 ---*/
865
866 lhsx = stp->st.sqca->qclhsx;
867 dcllp = stp->st.sqca->rhs_qcdlstlst;
868 /* only possibilities are concat and ID */
869 if (lhsx->optyp != LCB) do_qc_wireforce(stp, lhsx, -1, dcllp);
870 else
871 {
872 /* concatenate case know lhs full wire - tricky extractions of rhs */
873 for (catndp = lhsx->ru.x; catndp != NULL; catndp = catndp->ru.x,
874 dcllp = dcllp->dcelstlstnxt)
875 {
876 catlhsx = catndp->lu.x;
877 catxlen = catlhsx->szu.xclen;
878 bi1 = catndp->szu.xclen - catxlen;
879 do_qc_wireforce(stp, catlhsx, bi1, dcllp);
880 }
881 }
882 /* DBG remove --
883 if (sav_itp != __inst_ptr) __misc_terr(__FILE__, __LINE__);
884 ---*/
885 }
886
887 /*
888 * execute a quasi-continuous release
889 * only scalared wires or selects or cats not regs
890 * wire force/release is one level only
891 * called in itree context of release stmt
892 */
__exec_qc_wirerelease(struct st_t * stp)893 extern void __exec_qc_wirerelease(struct st_t *stp)
894 {
895 register struct expr_t *catndp;
896 struct expr_t *lhsx, *catlhsx;
897
898 /* DBG remove --
899 struct itree_t *sav_itp = __inst_ptr;
900 ---*/
901
902 lhsx = stp->st.sqcdea.qcdalhs;
903 /* only possibilities are concat and ID */
904 if (lhsx->optyp != LCB) do_qc_wirerelease(lhsx);
905 else
906 {
907 /* concatenate case know lhs full wire - tricky extractions of rhs */
908 for (catndp = lhsx->ru.x; catndp != NULL; catndp = catndp->ru.x)
909 { catlhsx = catndp->lu.x; do_qc_wirerelease(catlhsx); }
910 }
911 /* DBG remove --
912 if (sav_itp != __inst_ptr) __misc_terr(__FILE__, __LINE__);
913 ---*/
914 }
915
916 /*
917 * after possible concat unwinding, exec the wire force
918 *
919 * wire force is bit by bit unless vectored wire (when only entire wire)
920 *
921 * force which is for debugging overrides any wire delay assign
922 * when wire change happens (wire event process) if force active, no assign
923 * rhsbi is low bit of possible rhs section select (0 for not concat)
924 * this is called with stmt itree loc even if lhs xmr and handled push/pop
925 *
926 * SJM 11/14/00 - tran channel (inout port) force now is separate routine
927 * LOOKATME - think could simplify since for wire force always one bit
928 */
do_qc_wireforce(struct st_t * qcfstp,struct expr_t * lhsx,int32 rhsbi,struct dceauxlstlst_t * dcllp)929 static void do_qc_wireforce(struct st_t *qcfstp, struct expr_t *lhsx,
930 int32 rhsbi, struct dceauxlstlst_t *dcllp)
931 {
932 register int32 bi, ibase;
933 int32 biti, bitj, rhsbi2, lhsbi2, ndx;
934 struct net_t *np;
935 struct itree_t *itp;
936 struct dceauxlst_t *qcdcep;
937
938 /* step 1: get the wire range */
939 /* for psel or vector, range is biti down to bitj - for scalar 0,0 */
940 /* this computes any xmr new itp but does not push it */
941 __get_qc_wirrng(lhsx, &np, &biti, &bitj, &itp);
942
943 /* SJM 11/14/00 - if wire in tran chan, force all wires in it*/
944 if (np->ntraux != NULL)
945 {
946 /* this pushes and pops lhs xmr itree context itp if needed */
947 /* routine also handles any PLI force callbacks */
948 /* SJM 02/26/02 - no dces so new pre-build qcaf dces does not chg this */
949 __qc_tran_wireforce(np, biti, bitj, rhsbi, itp, qcfstp);
950
951 /* SJM - 04/15/01 - must eval tran chan in lhs xmr itree context */
952 if (itp != NULL) __push_itstk(itp);
953 /* SJM - 03/15/01 - must re-eval all bits if this is vector range */
954 /* new tran force algorithm - force wire in tran channel and then */
955 /* re-eval channel */
956 for (bi = biti; bi >= bitj; bi--) { __eval_tran_1bit(np, bi); }
957 if (itp != NULL) __pop_itstk();
958
959 /* but trace message must use stmt context */
960 if (__debug_flg && __ev_tracing)
961 {
962 char s1[RECLEN], s2[RECLEN];
963
964 __tr_msg(":: quasi-continuous force of wire %s at %s in %s now %s\n",
965 __msgexpr_tostr(__xs, lhsx), __bld_lineloc(__xs2,
966 (word32) __sfnam_ind, __slin_cnt), __msg2_blditree(s1, __inst_ptr),
967 __to_timstr(s2, &__simtime));
968 }
969 return;
970 }
971
972 /* SJM 07/22/02 - need to access dce list form stmt inum not lhs if xmr */
973 /* access dce list from stmt not lhs itree context */
974 qcdcep = dcllp->dcelsttab[__inum];
975 /* wire force must run in lhs itree context */
976 if (itp != NULL) __push_itstk(itp);
977 /* ibase is lhs xmr qcval base */
978 ibase = __inum*np->nwid;
979 if (!np->n_isavec)
980 {
981 /* DBG remove */
982 if (biti != 0 || bitj != 0) __misc_terr(__FILE__, __LINE__);
983 /* --- */
984
985 /* this pops itstk if needed */
986 do_1bit_wireforce(qcfstp, np, ibase, -1, rhsbi, itp, qcdcep);
987 ndx = -1;
988 goto done;
989 }
990
991 /* force every bit in range using same rhs dce list for each */
992 for (bi = bitj; bi <= biti; bi++)
993 {
994 /* rhsbi is low bit of possible lhs concat caused rhs select */
995 if (rhsbi == -1) rhsbi2 = bi - bitj;
996 else rhsbi2 = rhsbi + (bi - bitj);
997 lhsbi2 = bi;
998
999 do_1bit_wireforce(qcfstp, np, ibase, lhsbi2, rhsbi2, itp, qcdcep);
1000 }
1001
1002 if (biti != bitj) ndx = -1; else ndx = biti;
1003
1004 done:
1005 if (__debug_flg && __ev_tracing)
1006 {
1007 char s1[RECLEN], s2[RECLEN];
1008
1009 /* this must run in itree context of stmt not possible lhs xmr */
1010 if (itp != NULL) __pop_itstk();
1011 __tr_msg(":: quasi-continuous force of wire %s at %s in %s now %s\n",
1012 __msgexpr_tostr(__xs, lhsx), __bld_lineloc(__xs2, (word32) __sfnam_ind,
1013 __slin_cnt), __msg2_blditree(s1, __inst_ptr),
1014 __to_timstr(s2, &__simtime));
1015 if (itp != NULL) __push_itstk(itp);
1016 }
1017
1018 /* these need to run in itree context of possible lhs xmr */
1019 /* notice can have both many wire specific and many all cbs */
1020 if (__num_vpi_force_cbs > 0) __find_call_force_cbs(np, ndx);
1021 if (__vpi_force_cb_always) __cb_all_rfs(np, ndx, TRUE);
1022
1023 /* on return, itree context of force stmt needed */
1024 if (itp != NULL) __pop_itstk();
1025 }
1026
1027 /*
1028 * do 1 bit wire force
1029 *
1030 * this is called with itree context of lhs that is passed in itp if lhs xmr
1031 * it handles it own popping and pushing and leave itree same as called
1032 */
do_1bit_wireforce(struct st_t * qcstp,struct net_t * np,int32 ibase,int32 lhsbi,int32 rhsbi,struct itree_t * itp,struct dceauxlst_t * qcdcep)1033 static void do_1bit_wireforce(struct st_t *qcstp, struct net_t *np,
1034 int32 ibase, int32 lhsbi, int32 rhsbi, struct itree_t *itp,
1035 struct dceauxlst_t *qcdcep)
1036 {
1037 int32 biti;
1038 struct qcval_t *frc_qcp;
1039
1040 if (lhsbi == -1) biti = 0; else biti = lhsbi;
1041
1042 /* for reg var 2 records always allocated, first is force and 2nd assign */
1043 frc_qcp = &(np->nu2.qcval[ibase + biti]);
1044 /* forcing to different expr */
1045 if (frc_qcp->qc_active)
1046 {
1047 if (frc_qcp->qcdcep != NULL) __dcelst_off(frc_qcp->qcdcep);
1048 frc_qcp->qcdcep = NULL;
1049 }
1050
1051 /* setup the new force */
1052 frc_qcp->qcstp = qcstp;
1053 /* rhsbi is low bit of rhs range in case lhs concatenate */
1054 frc_qcp->qcrhsbi = rhsbi;
1055 frc_qcp->qclhsbi = lhsbi;
1056 /* store and build dces in lhs ref. itree location */
1057 frc_qcp->lhsitp = __inst_ptr;
1058 frc_qcp->qcdcep = qcdcep;
1059
1060 /* store and setup dces needs to run in stmt itree context */
1061 if (itp != NULL) __pop_itstk();
1062
1063 /* start force by storing rhs of force - dces will cause dce chges */
1064 __do_qc_store(np, frc_qcp, FALSE);
1065 frc_qcp->qc_active = TRUE;
1066
1067 /* turn on dces after doing store */
1068 if (frc_qcp->qcdcep != NULL) __dcelst_on(frc_qcp->qcdcep);
1069
1070 if (itp != NULL) __push_itstk(itp);
1071 }
1072
1073 /*
1074 * get qc wire element (after lhs concat separation)
1075 * this sets needed itree loc. to itpp, also sets wire, and range
1076 */
__get_qc_wirrng(struct expr_t * lhsx,struct net_t ** nnp,int32 * biti,int32 * bitj,struct itree_t ** itpp)1077 extern void __get_qc_wirrng(struct expr_t *lhsx, struct net_t **nnp,
1078 int32 *biti, int32 *bitj, struct itree_t **itpp)
1079 {
1080 int32 nd_itpop;
1081 word32 *wp;
1082 struct gref_t *grp;
1083 struct net_t *np;
1084 struct expr_t *idndp, *ndx;
1085
1086 *itpp = NULL;
1087 np = NULL;
1088 nd_itpop = FALSE;
1089 switch ((byte) lhsx->optyp) {
1090 case GLBREF:
1091 grp = lhsx->ru.grp;
1092 __xmrpush_refgrp_to_targ(grp);
1093 nd_itpop = TRUE;
1094 /* FALLTHRU */
1095 case ID:
1096 np = lhsx->lu.sy->el.enp;
1097 *biti = np->nwid - 1;
1098 *bitj = 0;
1099 break;
1100 case LSB: case PARTSEL:
1101 idndp = lhsx->lu.x;
1102 if (idndp->optyp == GLBREF)
1103 {
1104 grp = idndp->ru.grp;
1105 __xmrpush_refgrp_to_targ(grp);
1106 nd_itpop = TRUE;
1107 }
1108 np = idndp->lu.sy->el.enp;
1109 /* know error before here if non in range constant value */
1110 if (lhsx->optyp == LSB) *bitj = *biti = __get_const_bselndx(lhsx);
1111 else
1112 {
1113 ndx = lhsx->ru.x->lu.x;
1114 __inst_mod = __inst_mod;
1115 wp = &(__contab[ndx->ru.xvi]);
1116 *biti = wp[0];
1117 ndx = lhsx->ru.x->ru.x;
1118 wp = &(__contab[ndx->ru.xvi]);
1119 *bitj = wp[0];
1120 }
1121 break;
1122 default: __case_terr(__FILE__, __LINE__);
1123 }
1124 if (nd_itpop) { *itpp = __inst_ptr; __pop_itstk(); }
1125 *nnp = np;
1126 }
1127
1128 /*
1129 * after possible concat unwinding, exec the wire section release
1130 *
1131 * tricky part is need to force evaluation and store of all drivers
1132 * LOOKATME - is there any reason cannot just call multi driver eval
1133 * even for 1 or no driver case
1134 */
do_qc_wirerelease(struct expr_t * lhsx)1135 static void do_qc_wirerelease(struct expr_t *lhsx)
1136 {
1137 register int32 bi, ibase;
1138 int32 biti, bitj, all_forced, ndx;
1139 struct net_t *np;
1140 struct itree_t *itp;
1141 struct qcval_t *frc_qcp;
1142 char s1[RECLEN];
1143
1144 /* step 1: get the wire range */
1145 /* for psel or vector, range is biti down to bitj - for scalar 0,0 */
1146 __get_qc_wirrng(lhsx, &np, &biti, &bitj, &itp);
1147
1148 /* SJM 11/14/00 - if wire in tran chan, force all wires in it*/
1149 if (np->ntraux != NULL)
1150 {
1151 __qc_tran_wirerelease(np, biti, bitj, itp, lhsx);
1152
1153 /* SJM 04/15/01 - need to eval tran in lhs itree context */
1154 if (itp != NULL) __push_itstk(itp);
1155
1156 /* SJM - 03/15/01 - must re-eval all bits if this is vector range */
1157 /* new tran force algorithm - force wire in tran channel and then */
1158 /* re-eval channel */
1159 for (bi = biti; bi >= bitj; bi--)
1160 {
1161 __eval_tran_1bit(np, bi);
1162 }
1163 if (itp != NULL) __pop_itstk();
1164
1165 /* but messages needs stmt itree context */
1166 if (__debug_flg && __ev_tracing)
1167 {
1168 char s2[RECLEN];
1169
1170 __tr_msg(":: quasi-continuous force of wire %s at %s in %s now %s\n",
1171 __msgexpr_tostr(__xs, lhsx), __bld_lineloc(__xs2,
1172 (word32) __sfnam_ind, __slin_cnt), __msg2_blditree(s1, __inst_ptr),
1173 __to_timstr(s2, &__simtime));
1174 }
1175 return;
1176 }
1177
1178 /* SJM 07/23/02 - this needs lhs expr context */
1179 if (itp != NULL) __push_itstk(itp);
1180
1181 ibase = __inum*np->nwid;
1182 for (bi = 0; bi < np->nwid; bi++)
1183 {
1184 frc_qcp = &(np->nu2.qcval[ibase + bi]);
1185 if (frc_qcp->qc_active) goto some_bit_forced;
1186 }
1187 strcpy(s1, " - no bits forced");
1188 __sgfinform(465,
1189 "attempted release of %s in instance %s failed%s",
1190 __msgexpr_tostr(__xs2, lhsx), __msg2_blditree(__xs, __inst_ptr), s1);
1191 /* SJM 04/15/01 - if no bits forced, do not try to exec call backs */
1192 if (itp != NULL) __pop_itstk();
1193 return;
1194
1195 some_bit_forced:
1196 if (__debug_flg && __ev_tracing)
1197 {
1198 char s2[RECLEN];
1199
1200 __tr_msg(":: quasi-continuous release of wire %s at %s in %s now %s\n",
1201 __msgexpr_tostr(__xs, lhsx), __bld_lineloc(__xs2, (word32) __sfnam_ind,
1202 __slin_cnt), __msg2_blditree(s1, __inst_ptr),
1203 __to_timstr(s2, &__simtime));
1204 }
1205
1206 /* know some forced or will not get here - release all in range */
1207 /* notice wire force is per bit but no second qc assigns for wires */
1208 all_forced = TRUE;
1209 for (bi = biti; bi >= bitj; bi--)
1210 {
1211 frc_qcp = &(np->nu2.qcval[ibase + bi]);
1212 if (!frc_qcp->qc_active) { all_forced = FALSE; continue; }
1213 frc_qcp->qc_active = FALSE;
1214
1215 /* turn off dces after doing store if rhs non constant */
1216 if (frc_qcp->qcdcep != NULL) __dcelst_off(frc_qcp->qcdcep);
1217 frc_qcp->qcdcep = NULL;
1218 }
1219
1220 /* assign expected value now that force removed by evaling all drivers */
1221 /* must re-eval entire wire since other drivers may overlap forced range */
1222 /* notice this must be called from target of xmr and/or col. to */
1223 /* it handles moving back to references */
1224 __assign_1mdrwire(np);
1225
1226 if (!all_forced)
1227 {
1228 if (itp != NULL) __pop_itstk();
1229 strcpy(s1, " - some bits forced");
1230 __sgfinform(465, "attempted release of %s in instance %s failed%s",
1231 __msgexpr_tostr(__xs2, lhsx), __msg2_blditree(__xs, __inst_ptr), s1);
1232 if (itp != NULL) __push_itstk(itp);
1233
1234 /* here still need to try to exec PLI callbacks */
1235 }
1236
1237 /* must exec call backs in possible lhs xmr itree context */
1238 /* FIXME - why not calling for every bit in range - only 1 bit possible? */
1239 if (biti != bitj) ndx = -1; else ndx = biti;
1240 /* notice can have both many wire specific and many all cbs */
1241 if (__num_vpi_rel_cbs > 0) __find_call_rel_cbs(np, ndx);
1242 if (__vpi_rel_cb_always) __cb_all_rfs(np, ndx, FALSE);
1243
1244 if (itp != NULL) __pop_itstk();
1245 }
1246
1247 /*
1248 * ROUTINES TO EXEC QUASI-CONTINOUS ASSIGN OPERATIONS
1249 */
1250
1251 /*
1252 * exec the quasi-continuous assign or force under control of qcval
1253 * change may be in rhs xmr target, but must exec this is ref.
1254 * rhs wire np changed
1255 *
1256 * SJM 06/16/02 - new algorithm that builds qcval and qc dces once
1257 * works because dce points to corresponding qcval record
1258 */
__assign_qcaf(struct dcevnt_t * dcep)1259 extern void __assign_qcaf(struct dcevnt_t *dcep)
1260 {
1261 register struct expr_t *catndp;
1262 int32 nd_itpop, nd_itpop2;
1263 struct qcval_t *qcvalp;
1264 struct st_t *stp;
1265 struct expr_t *lhsx, *lhsx2, *rhsx;
1266 struct xstk_t *xsp;
1267 struct net_t *np;
1268
1269 nd_itpop = FALSE;
1270 /* first must move itree loc. back to ref. (where lhs and rhs are) */
1271 if (dcep->dce_1inst) { nd_itpop = TRUE; __push_itstk(dcep->dce_refitp); }
1272
1273 /* know some bit in rhs changed, get one qcval */
1274 /* here using fmon field really as union since unused in qca case */
1275 qcvalp = dcep->dceu2.dce_qcvalp;
1276
1277 /* get the qc statement */
1278 stp = qcvalp->qcstp;
1279 /* evaluate the rhs */
1280 rhsx = stp->st.sqca->qcrhsx;
1281 lhsx = stp->st.sqca->qclhsx;
1282 /* this converts rhs if needed and makes right lhs width */
1283 xsp = __eval_assign_rhsexpr(rhsx, lhsx);
1284
1285 if (__debug_flg && __ev_tracing)
1286 {
1287 char s1[RECLEN], s2[RECLEN];
1288
1289 __tr_msg(
1290 ":: quasi-continuous rhs at %s changed in %s now %s - assign/force to %s\n",
1291 __bld_lineloc(__xs, stp->stfnam_ind, stp->stlin_cnt),
1292 __msg2_blditree(__xs2, __inst_ptr), __to_timstr(s1, &__simtime),
1293 __msgexpr_tostr(s2, stp->st.sqca->qclhsx));
1294 }
1295 /* if reg form and not concatenate, easy just use changed qcval */
1296 if (stp->st.sqca->regform)
1297 {
1298 if (lhsx->optyp != LCB)
1299 {
1300 np = lhsx->lu.sy->el.enp;
1301 do_qc2_regstore(np, qcvalp, xsp);
1302 }
1303 else
1304 {
1305 /* concatenate required finding qcval for each */
1306 for (catndp = lhsx->ru.x; catndp != NULL; catndp = catndp->ru.x)
1307 {
1308 lhsx2 = catndp->lu.x;
1309 /* each lhs concatenate component may be xmr */
1310 if (lhsx2->optyp == GLBREF)
1311 { __xmrpush_refgrp_to_targ(lhsx2->ru.grp); nd_itpop2 = TRUE; }
1312 else nd_itpop2 = FALSE;
1313
1314 np = lhsx2->lu.sy->el.enp;
1315 if (stp->st.sqca->qcatyp == ASSIGN)
1316 qcvalp = &(np->nu2.qcval[2*__inum + 1]);
1317 else qcvalp = &(np->nu2.qcval[2*__inum]);
1318 do_qc2_regstore(np, qcvalp, xsp);
1319 if (nd_itpop2) __pop_itstk();
1320 }
1321 }
1322 }
1323 else
1324 {
1325 /* for every lhs concat part and bit do the assign */
1326 if (lhsx->optyp != LCB) assign_alllhs_bits(lhsx, xsp);
1327 else
1328 {
1329 for (catndp = lhsx->ru.x; catndp != NULL; catndp = catndp->ru.x)
1330 assign_alllhs_bits(catndp->lu.x, xsp);
1331 }
1332 }
1333 __pop_xstk();
1334 if (nd_itpop) __pop_itstk();
1335 }
1336
1337 /*
1338 * assign to all qcval bits of passed lhs expr (know not concat)
1339 */
assign_alllhs_bits(struct expr_t * lhsx,struct xstk_t * xsp)1340 static void assign_alllhs_bits(struct expr_t *lhsx, struct xstk_t *xsp)
1341 {
1342 register int32 bi, ibase;
1343 int32 biti, bitj, nd_itpop;
1344 struct net_t *np;
1345 struct itree_t *lhsitp;
1346 struct qcval_t *frc_qcp;
1347
1348 nd_itpop = FALSE;
1349 __get_qc_wirrng(lhsx, &np, &biti, &bitj, &lhsitp);
1350 if (lhsitp != NULL) { nd_itpop = TRUE; __push_itstk(lhsitp); }
1351 /* must run this in lhs itree loc. */
1352 ibase = np->nwid*__inum;
1353 for (bi = biti; bi >= bitj; bi--, frc_qcp--)
1354 {
1355 frc_qcp = &(np->nu2.qcval[ibase + bi]);
1356 do_qc2_wirestore(np, frc_qcp, xsp);
1357 }
1358 if (nd_itpop) __pop_itstk();
1359 }
1360
1361 /*
1362 * execute a qc assign under control of qc val record into net np
1363 *
1364 * this must be called from ref. stmt (not xmr lhs) itree location
1365 * notice assign is either entire reg or bit of wire
1366 */
__do_qc_store(struct net_t * np,struct qcval_t * qcvalp,int32 is_reg)1367 extern void __do_qc_store(struct net_t *np, struct qcval_t *qcvalp, int32 is_reg)
1368 {
1369 struct xstk_t *xsp;
1370 struct expr_t *rhsx;
1371
1372 rhsx = qcvalp->qcstp->st.sqca->qcrhsx;
1373 /* this converts rhs if needed and makes lhs right width */
1374 xsp = __eval_assign_rhsexpr(rhsx, qcvalp->qcstp->st.sqca->qclhsx);
1375 if (is_reg) do_qc2_regstore(np, qcvalp, xsp);
1376 else do_qc2_wirestore(np, qcvalp, xsp);
1377 __pop_xstk();
1378 }
1379
1380 /*
1381 * store q qc value with rhs in xsp for reg entire wire only
1382 * ths must run in assign/force stmt context since get rhs
1383 */
do_qc2_regstore(struct net_t * np,struct qcval_t * qcvalp,struct xstk_t * xsp)1384 static void do_qc2_regstore(struct net_t *np, struct qcval_t *qcvalp,
1385 struct xstk_t *xsp)
1386 {
1387 int32 nd_itpop, nd_xpop;
1388 struct xstk_t *xsp2;
1389
1390 /* know lhs always entire reg but rhs may need select out */
1391 /* rhsbi field is low bit of section from lhs concatenate if needed */
1392 if (qcvalp->qcrhsbi != -1)
1393 {
1394 push_xstk_(xsp2, np->nwid);
1395 __rhspsel(xsp2->ap, xsp->ap, qcvalp->qcrhsbi, np->nwid);
1396 __rhspsel(xsp2->bp, xsp->bp, qcvalp->qcrhsbi, np->nwid);
1397 nd_xpop = TRUE;;
1398 }
1399 else { xsp2 = xsp; nd_xpop = FALSE; }
1400
1401 if (qcvalp->lhsitp != NULL)
1402 { nd_itpop = TRUE; __push_itstk(qcvalp->lhsitp); }
1403 else nd_itpop = FALSE;
1404
1405 /* emit debug tracing message if needed */
1406 if (__debug_flg && __ev_tracing)
1407 {
1408 __tr_msg(" QC immediate store of %s into reg %s\n",
1409 __xregab_tostr(__xs2, xsp2->ap, xsp2->bp, xsp2->xslen,
1410 qcvalp->qcstp->st.sqca->qcrhsx), np->nsym->synam);
1411 }
1412 __chg_st_val(np, xsp2->ap, xsp2->bp);
1413
1414 if (nd_xpop) __pop_xstk();
1415 if (nd_itpop) __pop_itstk();
1416 }
1417
1418 /*
1419 * store qc value with rhs in xsp for wire either scalar or 1 bit select only
1420 */
do_qc2_wirestore(struct net_t * np,struct qcval_t * qcvalp,struct xstk_t * xsp)1421 static void do_qc2_wirestore(struct net_t *np, struct qcval_t *qcvalp,
1422 struct xstk_t *xsp)
1423 {
1424 int32 ind, nd_itpop, nd_xpop;
1425 byte *sbp;
1426 struct xstk_t *xsp2, *xsp3;
1427
1428 /* DBG remove ---
1429 __dbg_msg(" +++ before QC cat rhs value %s rhsbi=%d, lhsbi=%d\n",
1430 __regab_tostr(__xs, xsp->ap, xsp->bp, xsp->xslen, BHEX, FALSE),
1431 qcvalp->qcrhsbi, qcvalp->qclhsbi);
1432 --- */
1433
1434 /* 2 cases that require selecting a bit from evaled rhs xsp */
1435 /* rhs bi or lhs bi not -1 because lhs concatenate */
1436 if (qcvalp->qcrhsbi != -1 || qcvalp->qclhsbi != -1)
1437 {
1438 ind = ((qcvalp->qcrhsbi == -1) ? 0 : qcvalp->qcrhsbi);
1439 push_xstk_(xsp2, 1);
1440 xsp2->ap[0] = rhsbsel_(xsp->ap, ind);
1441 xsp2->bp[0] = rhsbsel_(xsp->bp, ind);
1442 nd_xpop = TRUE;
1443 }
1444 else { xsp2 = xsp; nd_xpop = FALSE; }
1445
1446 /* now know lot bit of xsp is value to assign */
1447 /* DBG remove ---
1448 if (__debug_flg)
1449 {
1450 __dbg_msg(" +++ after QC cat rhs value %s rhsbi=%d, lhsbi=%d\n",
1451 __regab_tostr(__xs, xsp2->ap, xsp2->bp, xsp2->xslen, BHEX, FALSE),
1452 qcvalp->qcrhsbi, qcvalp->qclhsbi);
1453 }
1454 --- */
1455
1456 if (qcvalp->lhsitp != NULL)
1457 { nd_itpop = TRUE; __push_itstk(qcvalp->lhsitp); }
1458 else nd_itpop = FALSE;
1459
1460 /* emit debug tracing message if needed */
1461 if (__debug_flg && __ev_tracing)
1462 {
1463 if (qcvalp->qclhsbi == -1) strcpy(__xs, "");
1464 else sprintf(__xs, "[%d]", qcvalp->qclhsbi);
1465 __tr_msg(" QC immediate store of %s into wire %s%s\n",
1466 __xregab_tostr(__xs2, xsp2->ap, xsp2->bp, xsp->xslen,
1467 qcvalp->qcstp->st.sqca->qcrhsx), np->nsym->synam, __xs);
1468 }
1469
1470 /* quasi-continuous assign to strength wire always strong */
1471 if (np->n_stren)
1472 {
1473 push_xstk_(xsp3, 4);
1474 sbp = (byte *) xsp3->ap;
1475 /* LOOKATME could simpify since know only 1 bit */
1476 __st_standval(sbp, xsp2, ST_STRVAL);
1477 /* notice if vector for wire know lhs bi always non zero */
1478
1479 /* SJM 11/14/00 slightly better to just pass a part as sbp */
1480 /* notice if vector for wire know lhs bi always non zero */
1481 if (np->n_isavec) __chg_st_bit(np, qcvalp->qclhsbi, (word32) sbp[0], 0L);
1482
1483 /* AIV 07/09/04 - was calling macro but macro never checked record nchg */
1484 /* notice for non stren case was calling chg st as needed */
1485 else __chg_st_val(np, (word32 *) sbp, 0L);
1486 __pop_xstk();
1487 }
1488 else
1489 {
1490 if (np->n_isavec)
1491 __chg_st_bit(np, qcvalp->qclhsbi, xsp2->ap[0], xsp2->bp[0]);
1492 else __chg_st_val(np, xsp2->ap, xsp2->bp);
1493 }
1494 if (nd_xpop) __pop_xstk();
1495 if (nd_itpop) __pop_itstk();
1496 }
1497
1498 /*
1499 * ROUTINES TO MOVE ITREE LOCATION (PUSH/POP ITREE STACK)
1500 */
1501
1502 /*
1503 * routine to push something onto itstk when there is no itree context
1504 */
__push_wrkitstk(struct mod_t * mdp,int32 winum)1505 extern void __push_wrkitstk(struct mod_t *mdp, int32 winum)
1506 {
1507 struct itree_t *tmpitp;
1508 struct inst_t *tmpip;
1509
1510 /* DBG REMOVE ---
1511 if (__inst_mod != NULL)
1512 __dbg_msg("### pushing inst mod (before) = %s\n", __inst_mod->msym->synam);
1513 --- */
1514
1515 if (__tmpitp_freelst == NULL)
1516 {
1517 tmpitp = (struct itree_t *) __my_malloc(sizeof(struct itree_t));
1518 tmpip = (struct inst_t *) __my_malloc(sizeof(struct inst_t));
1519 }
1520 else
1521 {
1522 tmpitp = __tmpitp_freelst;
1523 tmpip = __tmpip_freelst;
1524 __tmpitp_freelst = __tmpitp_freelst->up_it;
1525 __tmpip_freelst = (struct inst_t *) __tmpip_freelst->imsym;
1526 }
1527
1528 tmpitp->itinum = winum;
1529 tmpitp->up_it = NULL;
1530 tmpitp->in_its = NULL;
1531 tmpitp->itip = tmpip;
1532 tmpip->imsym = mdp->msym;
1533 /* indicates dummy work itp */
1534 tmpip->isym = NULL;
1535 __push_itstk(tmpitp);
1536 /* DBG REMOVE ---
1537 __dbg_msg("### pushing inst mod (after) = %s\n", __inst_mod->msym->synam);
1538 --- */
1539 }
1540
1541 /*
1542 * routine to pop something from itstk when there is no itree context
1543 * must pair with push_wrkistk
1544 */
__pop_wrkitstk(void)1545 extern void __pop_wrkitstk(void)
1546 {
1547 struct itree_t *tmpitp;
1548 struct inst_t *tmpip;
1549
1550 tmpitp = __inst_ptr;
1551 /* REMOVE ---
1552 __dbg_msg("### popping inst mod (before) = %s\n", __inst_mod->msym->synam);
1553 --- */
1554 tmpip = tmpitp->itip;
1555 __pop_itstk();
1556 /* DBG REMOVE ---
1557 if (__inst_mod != NULL)
1558 __dbg_msg("### popping inst mod (after) = %s\n", __inst_mod->msym->synam);
1559 -- */
1560 tmpip = tmpitp->itip;
1561 tmpitp->up_it = __tmpitp_freelst;
1562 __tmpitp_freelst = tmpitp;
1563 tmpip->imsym = (struct sy_t *) __tmpitp_freelst;
1564 __tmpip_freelst = tmpip;
1565 }
1566
1567 /*
1568 * push new current itp and set current inst num
1569 * LOOKATME - maybe these could be macros (see cvmacros.h)
1570 * SJM 04/20/01 - this now used instead of macros since do not slow down much
1571 */
1572 /* ---- */
__push_itstk(struct itree_t * itp)1573 extern void __push_itstk(struct itree_t *itp)
1574 {
1575 /* DBG remove --
1576 if (itp == NULL) __misc_terr(__FILE__, __LINE__);
1577 if (__itspi + 1 >= MAXITDPTH) __misc_terr(__FILE__, __LINE__);
1578 --- */
1579 __itstk[++__itspi] = __inst_ptr = itp;
1580 /* DBG remove --
1581 if (__inst_ptr == NULL) __misc_terr(__FILE__, __LINE__);
1582 --- */
1583 __inst_mod = __inst_ptr->itip->imsym->el.emdp;
1584 __inum = __inst_ptr->itinum;
1585 /* DBG REMOVE ---
1586 if (__debug_flg)
1587 {
1588 if (__inst_ptr->itip == NULL) strcpy(__xs, "**no itip (dummy)**");
1589 else if (__inst_ptr->itip->isym == NULL) strcpy(__xs, "*no ip (wrkitp?)");
1590 else __msg2_blditree(__xs, __inst_ptr);
1591 __dbg_msg(
1592 "+++ pushing itree stack to height %d - inum %d, inst %s mod %s\n",
1593 __itspi + 1, __inum, __xs, __inst_mod->msym->synam);
1594 }
1595 -- */
1596 /* DBG remove - this can happen but need to study example */
1597 /* if (__itspi > 6) __misc_terr(__FILE__, __LINE__); */
1598 }
1599 /* --- */
1600
1601 /*
1602 * pop cur. itp - module itree place
1603 *
1604 * this is for debugging normally use this macro
1605 */
1606 /* --- */
__pop_itstk(void)1607 extern void __pop_itstk(void)
1608 {
1609 /* DBG remove ---
1610 if (__itspi < 0) __misc_terr(__FILE__, __LINE__);
1611 -- */
1612 if (--__itspi < 0)
1613 {
1614 __inst_ptr = NULL;
1615 __inst_mod = NULL;
1616 __inum = 0xffffffff;
1617 }
1618 else
1619 {
1620 __inst_ptr = __itstk[__itspi];
1621
1622 /* DBG remove ---
1623 if (__inst_ptr == NULL || __inum == 0xffffffff)
1624 __misc_terr(__FILE__, __LINE__);
1625 -- */
1626 __inst_mod = __inst_ptr->itip->imsym->el.emdp;
1627 __inum = __inst_ptr->itinum;
1628 }
1629
1630 /* DBG remove ---
1631 if (__debug_flg)
1632 __dbg_msg("+++ popping itree stack to height %d\n", __itspi + 1);
1633 --- */
1634 }
1635 /* --- */
1636
1637 /*
1638 * push an itree stack entry that is the global inst. place
1639 * current itp place is place xmr appears - push target after tracing
1640 *
1641 * this pushes target given reference xmr location
1642 * by here all selects of instance arrays have been resolved to right symbol
1643 */
__xmrpush_refgrp_to_targ(struct gref_t * grp)1644 extern void __xmrpush_refgrp_to_targ(struct gref_t *grp)
1645 {
1646 struct itree_t *itp;
1647
1648 /* rooted case */
1649 if (grp->is_rooted)
1650 {
1651 __push_itstk(grp->targu.targitp);
1652 /* DBG remove ---
1653 if (__debug_flg)
1654 __dbg_msg("== pushing rooted global %s target (itree=%s)\n",
1655 grp->gnam, __msg2_blditree(__xs, __inst_ptr));
1656 --- */
1657 return;
1658 }
1659 /* upward relative case - requires linear list search */
1660 if (grp->upwards_rel)
1661 {
1662 __push_itstk(grp->targu.uprel_itps[__inum]);
1663 /* --- DBG remove */
1664 if (__debug_flg)
1665 __dbg_msg("== pushing upward relative global %s target (itree=%s)\n",
1666 grp->gnam, __msg2_blditree(__xs, __inst_ptr));
1667 /* -- */
1668 return;
1669 }
1670 /* normal downward instance path case */
1671 itp = __find_unrt_targitp(grp, __inst_ptr, 0);
1672 __push_itstk(itp);
1673
1674 /* DBG remove ---
1675 if (__debug_flg)
1676 __dbg_msg("== pushing downward global %s target (itree=%s) \n",
1677 grp->gnam, __msg2_blditree(__xs, __inst_ptr));
1678 --- */
1679 }
1680
1681 /*
1682 * return target itp given current itp and downward xmr (gref)
1683 *
1684 * by here all selects of instance arrays resolved to right inst symbol
1685 */
__find_unrt_targitp(struct gref_t * grp,register struct itree_t * itp,int32 startii)1686 extern struct itree_t *__find_unrt_targitp(struct gref_t *grp,
1687 register struct itree_t *itp, int32 startii)
1688 {
1689 register int32 gri;
1690 int32 ii;
1691 byte *bp1, *bp2;
1692 struct inst_t *ip;
1693 struct mod_t *imdp;
1694
1695 /* notice first is inst. in module gref appears in */
1696 imdp = itp->itip->imsym->el.emdp;
1697 for (gri = startii;;)
1698 {
1699 if (grp->grxcmps[gri] != NULL) ip = __get_gref_giarr_ip(grp, gri, itp);
1700 else ip = grp->grcmps[gri]->el.eip;
1701
1702 /* DBG remove RELEASE REMOVEME -- */
1703 if (imdp->minsts == NULL) __misc_terr(__FILE__, __LINE__);
1704 /* -- */
1705
1706 /* making use of c pointer subtraction correction object size here */
1707 /* changing to byte ptr because not sure of c ptr size object rules */
1708 bp1 = (byte *) ip;
1709 bp2 = (byte *) imdp->minsts;
1710 /* DBG remove ---
1711 if (bp2 > bp1)
1712 {
1713 __dbg_msg("== global %s comp=%d mod=%s inst=%s itp=%s(%s).\n",
1714 grp->gnam, gri, imdp->msym->synam, ip->isym->synam,
1715 itp->itip->isym->synam, itp->itip->imsym->synam);
1716 __arg_terr(__FILE__, __LINE__);
1717 }
1718 --- */
1719 ii = (bp1 - bp2)/sizeof(struct inst_t);
1720 itp = &(itp->in_its[ii]);
1721 /* DBG remove ---
1722 if (__debug_flg)
1723 {
1724 __dbg_msg("== global %s comp=%d mod=%s inst=%s num=%d, itp=%s(%s).\n",
1725 grp->gnam, gri, imdp->msym->synam, ip->isym->synam, ii,
1726 itp->itip->isym->synam, itp->itip->imsym->synam);
1727 }
1728 --- */
1729 if (++gri > grp->last_gri) break;
1730 imdp = itp->itip->imsym->el.emdp;
1731 }
1732 return(itp);
1733 }
1734
1735 /*
1736 * access a gref inst array index and return the right expanded instance
1737 *
1738 * assumes grxcmps folded to 32 bit non x/z constants (maybe IS)
1739 * LOOKATME - this always checks ranges - maybe should have separate routine
1740 */
__get_gref_giarr_ip(struct gref_t * grp,int32 gri,struct itree_t * itp)1741 extern struct inst_t *__get_gref_giarr_ip(struct gref_t *grp, int32 gri,
1742 struct itree_t *itp)
1743 {
1744 int32 indx, ii2, ii3;
1745 byte *bp1, *bp2;
1746 struct sy_t *syp;
1747 struct xstk_t *xsp;
1748 struct giarr_t *giap;
1749 struct inst_t *ip;
1750 struct mod_t *imdp;
1751
1752 syp = grp->grcmps[gri];
1753 /* DBG remove --- */
1754 if (!syp->sy_giabase) __arg_terr(__FILE__, __LINE__);
1755 /* --- */
1756 __push_itstk(itp);
1757 /* evaluate - this just loads constant but may be IS form constant */
1758 /* know checked so will be non x/z 32 bit value or will not get here */
1759 xsp = __eval_xpr(grp->grxcmps[gri]);
1760 indx = (int32) xsp->ap[0];
1761 __pop_xstk();
1762 __pop_itstk();
1763 imdp = itp->itip->imsym->el.emdp;
1764 /* syp points to first instance of expanded */
1765 ip = syp->el.eip;
1766 bp1 = (byte *) ip;
1767 bp2 = (byte *) imdp->minsts;
1768 ii2 = (bp1 - bp2)/sizeof(struct inst_t);
1769 giap = imdp->miarr[ii2];
1770 if (giap->gia1 > giap->gia2)
1771 {
1772 if (indx > giap->gia1 || indx < giap->gia2) goto bad_ref;
1773 else ii3 = giap->gia_bi + (giap->gia1 - indx);
1774 }
1775 else
1776 {
1777 if (indx < giap->gia1 || indx > giap->gia2) goto bad_ref;
1778 ii3 = giap->gia_bi + (indx - giap->gia1);
1779 }
1780 done:
1781 ip = &(imdp->minsts[ii3]);
1782 return(ip);
1783
1784 bad_ref:
1785 __gferr(680, grp->grfnam_ind, grp->grflin_cnt,
1786 "hierarchical reference %s of %s index %d (comp. %d) out of range [%d:%d]",
1787 grp->gnam, syp->synam, indx, gri + 1, giap->gia1, giap->gia2);
1788 ii3 = giap->gia_bi;
1789 goto done;
1790 }
1791
1792 /*
1793 * routine to push ref. itstk place onto inst. stack when at define place
1794 * propagation caller will pop back to target place - only called if xmr
1795 * notice if this is called, caller will need to pop
1796 */
__match_push_targ_to_ref(word32 xmrtyp,struct gref_t * grp)1797 extern int32 __match_push_targ_to_ref(word32 xmrtyp, struct gref_t *grp)
1798 {
1799 register int32 i, gri;
1800 struct itree_t *itp;
1801 struct inst_t *ip;
1802
1803 switch ((byte) xmrtyp) {
1804 case XNP_DOWNXMR:
1805 /* since current itp is target - back up to reference itp */
1806 itp = __inst_ptr;
1807 /* SJM 04/17/03 - must move up and check that each one on way up */
1808 /* matches inst above since down can be multiply instanciated */
1809 for (gri = grp->last_gri; gri >= 0; gri--)
1810 {
1811 /* need to use inst array select expr which is itree loc. specific */
1812 if (grp->grxcmps[gri] != NULL) ip = __get_gref_giarr_ip(grp, gri, itp);
1813 else ip = grp->grcmps[gri]->el.eip;
1814
1815 if (ip != itp->itip)
1816 {
1817 /* DBG remove --- */
1818 if (__debug_flg)
1819 {
1820 __dbg_msg(
1821 "== down glb drive/load of %s (in %s) mismatch instance %s (comp %s != %s)\n",
1822 grp->gnam, grp->gin_mdp->msym->synam, __msg2_blditree(__xs,
1823 __inst_ptr), ip->isym->synam, itp->itip->isym->synam);
1824 }
1825 /* --- */
1826 return(FALSE);
1827 }
1828 /* DBG remove --- */
1829 if (itp->up_it == NULL) __misc_terr(__FILE__, __LINE__);
1830 /* --- */
1831 /* DBG remove --- */
1832 if (__debug_flg)
1833 {
1834 __dbg_msg(
1835 "== down glb %s drive/load move from decl in targ %s to ref in %s\n",
1836 grp->gnam, __msg2_blditree(__xs, itp),
1837 __msg2_blditree(__xs2, itp->up_it));
1838 }
1839 /* --- */
1840 itp = itp->up_it;
1841 }
1842 break;
1843 case XNP_RTXMR:
1844 /* rooted not part of np union field - never called uses filter fld */
1845 __case_terr(__FILE__, __LINE__);
1846 return(FALSE);
1847 case XNP_UPXMR:
1848 /* SJM 09/14/00 - must search for current target place to move back */
1849 /* to location referenced in */
1850 /* SJM 07/01/03 - index was one too many */
1851 for (i = 0; i < grp->gin_mdp->flatinum; i++)
1852 {
1853 if (__inst_ptr == grp->targu.uprel_itps[i])
1854 {
1855 /* DBG remove --- */
1856 if (__debug_flg)
1857 {
1858 __dbg_msg(
1859 "== up rel glb %s drive/load move from decl in targ %s to ref in %s\n",
1860 grp->gnam, __msg2_blditree(__xs, __inst_ptr),
1861 __msg2_blditree(__xs2, grp->gin_mdp->moditps[i]));
1862 }
1863 /* --- */
1864 goto got_itp;
1865 }
1866 }
1867 /* SJM 05/23/03 - possible for down declared in upwards rel to all go */
1868 /* to only some instances so this declared in may not exist */
1869 if (__debug_flg)
1870 {
1871 __dbg_msg(
1872 "== uprel glb %s drive/load of %s (in %s) no matching uprel inst\n",
1873 grp->gnam, grp->gin_mdp->msym->synam, __msg2_blditree(__xs,
1874 __inst_ptr));
1875 }
1876 return(FALSE);
1877 got_itp:
1878 itp = grp->gin_mdp->moditps[i];
1879 break;
1880 default: __case_terr(__FILE__, __LINE__); return(TRUE);
1881 }
1882 __push_itstk(itp);
1883 return(TRUE);
1884 }
1885
1886 /*
1887 * CONTINUOUS ASSIGNMENT ROUTINES
1888 */
1889
1890 /*
1891 * execute concat assign part of non strength lhs wire continuous assign
1892 *
1893 * know rhs non strength and no component of lhs needs strength
1894 * i.e. continuous assign does not drive strength (strong0, strong1)
1895 * caller must pop stack on return
1896 */
__exec_ca_concat(struct expr_t * xlhs,register word32 * ap,register word32 * bp,int32 must_schedule)1897 extern void __exec_ca_concat(struct expr_t *xlhs, register word32 *ap,
1898 register word32 *bp, int32 must_schedule)
1899 {
1900 register struct expr_t *catndp;
1901 register int32 catxlen;
1902 register struct xstk_t *catxsp;
1903 int32 bi1;
1904 struct expr_t *catlhsx;
1905
1906 /* do lhs concatenate assigns from left to right */
1907 for (catndp = xlhs->ru.x; catndp != NULL; catndp = catndp->ru.x)
1908 {
1909 catlhsx = catndp->lu.x;
1910 catxlen = catlhsx->szu.xclen;
1911 /* catndp comma node is dist. to low bit, bi1 is low bit of rhs psel */
1912 bi1 = catndp->szu.xclen - catxlen;
1913
1914 /* DBG remove ---
1915 if (__debug_flg)
1916 __dbg_msg(
1917 "+++lhs: total concat wid=%u, low index=%d, wid=%u, remaining wid=%u\n",
1918 xlhs->szu.xclen, bi1, catxlen, catndp->szu.xclen);
1919 --- */
1920
1921 /* select current pos. right width piece from rhs and put on tos reg */
1922 /* notice assignment always same width */
1923 push_xstk_(catxsp, catxlen);
1924 if (catxlen == 1)
1925 { catxsp->ap[0] = rhsbsel_(ap, bi1); catxsp->bp[0] = rhsbsel_(bp, bi1); }
1926 else
1927 {
1928 __rhspsel(catxsp->ap, ap, bi1, catxlen);
1929 __rhspsel(catxsp->bp, bp, bi1, catxlen);
1930 }
1931 /* also nested lhs concatenates illegal - will never appear */
1932 /* notice this is part of immediate assign must not inc assign counter */
1933 /* here know every catlhsx wire must be strength */
1934 __exec_conta_assign(catlhsx, catxsp->ap, catxsp->bp, must_schedule);
1935 __pop_xstk();
1936 }
1937 }
1938
1939 /*
1940 * execute assign part of strength lhs wire continuous assign concat
1941 *
1942 * know rhs has strength - maybe only (strong0, strong1) because at least
1943 * one lhs wire needs strength - if any lhs concat component non stren
1944 * that section converted back to value here
1945 * caller must pop stack on return
1946 */
__stren_exec_ca_concat(struct expr_t * xlhs,byte * sbp,int32 must_schedule)1947 extern void __stren_exec_ca_concat(struct expr_t *xlhs, byte *sbp,
1948 int32 must_schedule)
1949 {
1950 register int32 catxlen;
1951 register struct expr_t *catndp;
1952 register int32 sbi, sbi2;
1953 int32 bi1;
1954 byte *sbp2;
1955 struct expr_t *catlhsx;
1956 struct xstk_t *catxsp, *xsp;
1957
1958 /* do lhs concatenate assigns from left to right */
1959 for (catndp = xlhs->ru.x; catndp != NULL; catndp = catndp->ru.x)
1960 {
1961 catlhsx = catndp->lu.x;
1962 catxlen = catlhsx->szu.xclen;
1963 /* bi1 is low bit of rhs evaluted value part select */
1964 bi1 = catndp->szu.xclen - catxlen;
1965
1966 /* --- DBG remove
1967 if (__debug_flg)
1968 __dbg_msg(
1969 "+++lhs proc: total cat wid=%u, low index=%d, wid=%u, remaining wid=%u\n",
1970 xlhs->szu.xclen, bi1, catxlen, catndp->szu.xclen);
1971 --- */
1972
1973 /* select current pos. right width piece from rhs and put on tos reg */
1974 /* notice assignment always same width */
1975 push_xstk_(catxsp, 4*catxlen);
1976 sbp2 = (byte *) catxsp->ap;
1977 if (catxlen == 1) sbp2[0] = sbp[bi1];
1978 else
1979 {
1980 /* sbp2 and sbi2 is section selected from concatenate rhs */
1981 for (sbi = bi1, sbi2 = 0; sbi2 < catxlen; sbi++, sbi2++)
1982 sbp2[sbi2] = sbp[sbi];
1983 }
1984 if (!catlhsx->x_stren)
1985 {
1986 push_xstk_(xsp, catxlen);
1987 __rem_stren(xsp->ap, xsp->bp, sbp2, catxlen);
1988 /* DBG remove */
1989 if (__debug_flg && __ev_tracing)
1990 __tr_msg("+++ strength concat assign - needed to remove strength?");
1991 /* --- */
1992 __exec_conta_assign(catlhsx, xsp->ap, xsp->bp, must_schedule);
1993 __pop_xstk();
1994 }
1995 else
1996 {
1997 /* also nested lhs concatenates illegal - will never appear */
1998 /* this is part of immediate assign must not inc assign counter */
1999 __exec_conta_assign(catlhsx, catxsp->ap, catxsp->bp, must_schedule);
2000 }
2001 __pop_xstk();
2002 }
2003 }
2004
2005 /*
2006 * actually execute the continuous assign store
2007 *
2008 * concatenates removed before here
2009 * the various store routines assume xsp stren consistent with wire type
2010 * i.e. this routine handles both stren and non stren
2011 * know xlhs is same width as ap/bp that is new rhs value
2012 *
2013 * key here is that mutiple driver or supply0/1 or tri0/1 or tran chan nets
2014 * never just assigned here without fi>1 eval. all drivers evaluated
2015 *
2016 * notice that if this is called from force mechanism caller must
2017 * turn off any bits before or will not really change value
2018 */
__exec_conta_assign(struct expr_t * xlhs,register word32 * ap,register word32 * bp,int32 must_schedule)2019 extern void __exec_conta_assign(struct expr_t *xlhs, register word32 *ap,
2020 register word32 *bp, int32 must_schedule)
2021 {
2022 int32 nd_itpop, ri1, ri2;
2023 struct expr_t *idndp, *ndx1;
2024 struct net_t *np;
2025 struct gref_t *grp;
2026
2027 /* notice if forced still must schedule since force maybe off later */
2028 if (must_schedule) { sched_conta_assign(xlhs, ap, bp); return; }
2029
2030 nd_itpop = FALSE;
2031 switch ((byte) xlhs->optyp) {
2032 case GLBREF:
2033 grp = xlhs->ru.grp;
2034 __xmrpush_refgrp_to_targ(grp);
2035 nd_itpop = TRUE;
2036 /* FALLTHRU */
2037 case ID:
2038 np = xlhs->lu.sy->el.enp;
2039 /* this add the changed wire to nchglst if needed */
2040 if (np->frc_assgn_allocated)
2041 {
2042 /* return F if all of wire forced, nothing to do */
2043 if (!__correct_forced_newwireval(np, ap, bp))
2044 { if (nd_itpop) __pop_itstk(); return; }
2045 }
2046 /* SJM 03/15/01 - change to fields in net record */
2047 if (np->nchg_nd_chgstore) __chg_st_val(np, ap, bp);
2048 else __st_val(np, ap, bp);
2049
2050 if (nd_itpop) __pop_itstk();
2051 break;
2052 case LSB:
2053 /* for now first determine if array index */
2054 idndp = xlhs->lu.x;
2055 ndx1 = xlhs->ru.x;
2056 np = idndp->lu.sy->el.enp;
2057 /* the 1 bit is forced nothing to do else normal assign */
2058 if (np->frc_assgn_allocated
2059 && __forced_inhibit_bitassign(np, idndp, ndx1)) return;
2060 __assign_to_bit(np, idndp, ndx1, ap, bp);
2061 break;
2062 case PARTSEL:
2063 idndp = xlhs->lu.x;
2064 np = idndp->lu.sy->el.enp;
2065 ri1 = (int32) __contab[xlhs->ru.x->lu.x->ru.xvi];
2066 ri2 = (int32) __contab[xlhs->ru.x->ru.x->ru.xvi];
2067
2068 /* if all bits of lhs part select range forced, do not do assign */
2069 /* this also update ap and bp to include forced values */
2070 if (np->frc_assgn_allocated
2071 && !forced_assign_to_psel(idndp, ri1, ri2, np, ap, bp)) return;
2072
2073 /* know part select here in range and converted to h:0 index form */
2074 __assign_to_psel(idndp, ri1, ri2, np, ap, bp);
2075 break;
2076 default: __case_terr(__FILE__, __LINE__);
2077 }
2078 /* DBG remove --- */
2079 if (__debug_flg && __ev_tracing) trace_conta_assign(xlhs, ap, bp);
2080 /* --- */
2081 }
2082
2083 /*
2084 * trace conta assign
2085 */
trace_conta_assign(struct expr_t * xlhs,word32 * ap,word32 * bp)2086 static void trace_conta_assign(struct expr_t *xlhs, word32 *ap, word32 *bp)
2087 {
2088 char s1[RECLEN], s2[RECLEN], s3[RECLEN];
2089
2090 if (xlhs->x_stren) __st_regab_tostr(s1, (byte *) ap, xlhs->szu.xclen);
2091 else __regab_tostr(s1, ap, bp, xlhs->szu.xclen, BHEX, FALSE);
2092 /* SJM 08/24/03 - printing chg state wrong since this is not chg */
2093 __tr_msg("## declarative assign to %s (itree=%s) value %s\n",
2094 __msgexpr_tostr(s2, xlhs), __msg2_blditree(s3, __inst_ptr), s1);
2095 }
2096
2097 /*
2098 * correct an entire wire about to be assigned value for forced bits
2099 * if no assign needed return F else T
2100 * must be called from correct itree location
2101 * know has qcval or will not get here
2102 * this does not leave anything on stack
2103 */
__correct_forced_newwireval(struct net_t * np,word32 * ap,word32 * bp)2104 extern int32 __correct_forced_newwireval(struct net_t *np, word32 *ap, word32 *bp)
2105 {
2106 register int32 bi, ibase, wi;
2107 int32 nd_assign, wlen;
2108 byte *sbp, *sbp2;
2109 struct xstk_t *xsp, *xsp2;
2110
2111 /* if scalar or 1 bit only, no correction but maybe skip assign */
2112 if (!np->n_isavec)
2113 {
2114 if (np->nu2.qcval[__inum].qc_active) return(FALSE);
2115 return(TRUE);
2116 }
2117 ibase = __inum*np->nwid;
2118 if (np->n_stren)
2119 {
2120 sbp = (byte *) ap;
2121 get_stwire_addr_(sbp2, np);
2122 /* trick is to replace forced bits so new assign is same as forced val */
2123 for (nd_assign = FALSE, bi = 0; bi < np->nwid; bi++)
2124 {
2125 /* some bits not forced - so need assign */
2126 if (np->nu2.qcval[ibase + bi].qc_active) sbp[bi] = sbp2[bi];
2127 else nd_assign = TRUE;
2128 }
2129 return(nd_assign);
2130 }
2131
2132 push_xstk_(xsp, np->nwid);
2133 __bld_forcedbits_mask(xsp->ap, np);
2134 zero_allbits_(xsp->bp, np->nwid);
2135
2136 /* if all bits forced nothing to do */
2137 if (__vval_is1(xsp->ap, np->nwid)) { __pop_xstk(); return(FALSE); }
2138 /* if no bits forced, just assign ap */
2139 if (vval_is0_(xsp->ap, np->nwid)) { __pop_xstk(); return(TRUE); }
2140
2141 /* only load wire if some bits forced */
2142 push_xstk_(xsp2, np->nwid);
2143 __ld_wire_val(xsp2->ap, xsp2->bp, np);
2144
2145 /* take new value and merge in some forced bits */
2146 wlen = wlen_(np->nwid);
2147 for (wi = 0; wi < wlen; wi++)
2148 {
2149 /* remove forced bits from new value */
2150 ap[wi] &= ~(xsp->ap[wi]);
2151 bp[wi] &= ~(xsp->ap[wi]);
2152 /* remove non forced bits from new wire */
2153 xsp2->ap[wi] &= (xsp->ap[wi]);
2154 xsp2->bp[wi] &= (xsp->ap[wi]);
2155 /* combine old maybe forced bits into new value - so will be same */
2156 ap[wi] |= (xsp2->ap[wi]);
2157 bp[wi] |= (xsp2->ap[wi]);
2158 }
2159 __pop_xstk();
2160 __pop_xstk();
2161 return(TRUE);
2162 }
2163
2164 /*
2165 * convert the per bit forced table to a per bit forced vector
2166 */
__bld_forcedbits_mask(word32 * ap,struct net_t * np)2167 extern void __bld_forcedbits_mask(word32 *ap, struct net_t *np)
2168 {
2169 register int32 bi;
2170 int32 ibase;
2171
2172 zero_allbits_(ap, np->nwid);
2173 /* build mask in ap part with 1 for every forced bit */
2174 ibase = __inum*np->nwid;
2175 for (bi = 0; bi < np->nwid; bi++)
2176 {
2177 if (np->nu2.qcval[ibase + bi].qc_active) __lhsbsel(ap, bi, 1L);
2178 }
2179 }
2180
2181 /*
2182 * schedule assignment of all bits from wire that is lhs of conta
2183 * processing conta assign event - this delays wire value change
2184 */
sched_conta_assign(struct expr_t * xlhs,register word32 * ap,register word32 * bp)2185 static void sched_conta_assign(struct expr_t *xlhs, register word32 *ap,
2186 register word32 *bp)
2187 {
2188 int32 nd_itpop;
2189 byte *sbp;
2190 struct gref_t *grp;
2191 struct net_t *np;
2192
2193 nd_itpop = FALSE;
2194 switch ((byte) xlhs->optyp) {
2195 case GLBREF:
2196 grp = xlhs->ru.grp;
2197 __xmrpush_refgrp_to_targ(grp);
2198 nd_itpop = TRUE;
2199 /* FALLTHRU */
2200 case ID:
2201 np = xlhs->lu.sy->el.enp;
2202 if (np->nu.rngdwir->n_delrep == DT_PTHDST)
2203 {
2204 if (np->n_stren)
2205 {
2206 sbp = (byte *) ap;
2207 __pth_stren_schd_allofwire(np, sbp, xlhs->szu.xclen);
2208 }
2209 else __pth_schd_allofwire(np, ap, bp, xlhs->szu.xclen);
2210 }
2211 else
2212 {
2213 if (np->n_stren)
2214 {
2215 sbp = (byte *) ap;
2216 __wdel_stren_schd_allofwire(np, sbp, xlhs->szu.xclen);
2217 }
2218 else __wdel_schd_allofwire(np, ap, bp, xlhs->szu.xclen);
2219 }
2220 if (nd_itpop) __pop_itstk();
2221 break;
2222 case LSB:
2223 np = xlhs->lu.x->lu.sy->el.enp;
2224 schedassign_to_bit(np, xlhs->lu.x, xlhs->ru.x, ap, bp);
2225 break;
2226 case PARTSEL:
2227 schedassign_to_psel(xlhs, ap, bp);
2228 break;
2229 default: __case_terr(__FILE__, __LINE__);
2230 }
2231 }
2232
2233 /*
2234 * convert conta where lhs is strength wire to strength form
2235 * trick here is that conta always eats strength and maybe generates its own
2236 */
__rem_stren(word32 * ap,word32 * bp,byte * sbp,int32 blen)2237 extern void __rem_stren(word32 *ap, word32 *bp, byte *sbp, int32 blen)
2238 {
2239 register int32 bi, bi2, wi;
2240 int32 wlen, ubits;
2241 word32 aw, bw;
2242
2243 wlen = wlen_(blen);
2244 if ((ubits = ubits_(blen)) == 0) ubits = WBITS;
2245 for (bi2 = blen - 1, wi = wlen - 1; wi >= 0; wi--)
2246 {
2247 aw = bw = 0L;
2248 for (bi = ubits - 1; bi >= 0; bi--, bi2--)
2249 {
2250 aw |= ((sbp[bi2] & 1L) << bi);
2251 bw |= (((sbp[bi2] & 2L) >> 1) << bi);
2252 }
2253 ap[wi] = aw;
2254 bp[wi] = bw;
2255 ubits = WBITS;
2256 }
2257 }
2258
2259 /*
2260 * INTERMEDIATE WIRE SCHEDULING ROUTINES
2261 */
2262
2263 /*
2264 * schedule an entire non strength wire with delay
2265 * this always requires z extension
2266 *
2267 * blen here is real not 4x too big
2268 */
__pth_schd_allofwire(struct net_t * np,register word32 * ap,register word32 * bp,int32 blen)2269 extern void __pth_schd_allofwire(struct net_t *np, register word32 *ap,
2270 register word32 *bp, int32 blen)
2271 {
2272 register int32 bi;
2273 register word32 aval, bval;
2274 word32 av, bv;
2275
2276 if (!np->n_isavec)
2277 {
2278 aval = ap[0] | (bp[0] << 1);
2279 /* must load wire value here in case packed */
2280 __ld_wire_val(&av, &bv, np);
2281 av |= (bv << 1);
2282 /* must pass index of 0, since only bit is 0th here */
2283 schd_1pthwirebit(np, 0, aval, av);
2284 return;
2285 }
2286
2287 /* case 1: same or truncate */
2288 if (blen >= np->nwid)
2289 {
2290 for (bi = 0; bi < blen; bi++)
2291 {
2292 aval = rhsbsel_(ap, bi);
2293 bval = rhsbsel_(bp, bi);
2294 aval |= (bval << 1);
2295 __ld_bit(&av, &bv, np, bi);
2296 schd_1pthwirebit(np, bi, aval, (av | (bv << 1)));
2297 }
2298 return;
2299 }
2300 /* case 2: widen rhs */
2301 for (bi = 0; bi < blen; bi++)
2302 {
2303 aval = rhsbsel_(ap, bi);
2304 bval = rhsbsel_(bp, bi);
2305 aval |= (bval << 1);
2306 __ld_bit(&av, &bv, np, bi);
2307 schd_1pthwirebit(np, bi, aval, (av | (bv << 1)));
2308 }
2309 for (bi = blen; bi < np->nwid; bi++)
2310 {
2311 __ld_bit(&av, &bv, np, bi);
2312 schd_1pthwirebit(np, bi, (word32) 2, (av | (bv << 1)));
2313 }
2314 }
2315
2316 /*
2317 * schedule an entire non strength delay with wire
2318 * this always requires z extension
2319 * ap/bp is new value to assign
2320 *
2321 * blen here is real not 4x too big
2322 */
__wdel_schd_allofwire(struct net_t * np,register word32 * ap,register word32 * bp,int32 blen)2323 extern void __wdel_schd_allofwire(struct net_t *np, register word32 *ap,
2324 register word32 *bp, int32 blen)
2325 {
2326 register int32 bi;
2327 register word32 aval, bval;
2328 word32 av, bv;
2329
2330 if (!np->n_isavec)
2331 {
2332 aval = ap[0] | (bp[0] << 1);
2333 /* must load wire value here in case packed */
2334 __ld_wire_val(&av, &bv, np);
2335 av |= (bv << 1);
2336 /* must pass index of 0, since only bit is 0th here */
2337 __wdel_schd_1wirebit(np, 0, aval, av, FALSE);
2338 return;
2339 }
2340
2341 /* case 1: same or truncate */
2342 if (blen >= np->nwid)
2343 {
2344 for (bi = 0; bi < blen; bi++)
2345 {
2346 aval = rhsbsel_(ap, bi);
2347 bval = rhsbsel_(bp, bi);
2348 aval |= (bval << 1);
2349 __ld_bit(&av, &bv, np, bi);
2350 __wdel_schd_1wirebit(np, bi, aval, (av | (bv << 1)), FALSE);
2351 }
2352 return;
2353 }
2354 /* case 2: widen rhs */
2355 for (bi = 0; bi < blen; bi++)
2356 {
2357 aval = rhsbsel_(ap, bi);
2358 bval = rhsbsel_(bp, bi);
2359 aval |= (bval << 1);
2360 __ld_bit(&av, &bv, np, bi);
2361 __wdel_schd_1wirebit(np, bi, aval, (av | (bv << 1)), FALSE);
2362 }
2363 for (bi = blen; bi < np->nwid; bi++)
2364 {
2365 __ld_bit(&av, &bv, np, bi);
2366 __wdel_schd_1wirebit(np, bi, (word32) 2, (av | (bv << 1)), FALSE);
2367 }
2368 }
2369
2370 /* table (copied in other places) to convert from cap to 6 bit stren */
2371 /* 0 is impossible any error caught before here */
2372 word32 __cap_to_stren[] = { 0, 0x24, 0x48, 0x90 };
2373
2374 /*
2375 * schedule an entire strength path delay wire
2376 * this always requires z extension
2377 *
2378 * blen here is real not 4x too big
2379 * this handles change from z to previous value plus cap. size
2380 * never see trireg here since cannot be path destination
2381 */
__pth_stren_schd_allofwire(struct net_t * np,register byte * sbp,int32 sblen)2382 extern void __pth_stren_schd_allofwire(struct net_t *np, register byte *sbp,
2383 int32 sblen)
2384 {
2385 register int32 bi;
2386 register byte *sbp2;
2387
2388 /* DBG remove --- */
2389 if (np->ntyp == N_TRIREG) __misc_terr(__FILE__, __LINE__);
2390 /* --- */
2391
2392 /* get strength wire address */
2393 get_stwire_addr_(sbp2, np);
2394 if (!np->n_isavec)
2395 {
2396 schd_1pthwirebit(np, 0, (word32) sbp[0], (word32) sbp2[0]);
2397 return;
2398 }
2399
2400 /* case 1: same or truncate */
2401 if (sblen >= np->nwid)
2402 {
2403 for (bi = 0; bi < np->nwid; bi++)
2404 schd_1pthwirebit(np, bi, (word32) sbp[bi], (word32) sbp2[bi]);
2405 return;
2406 }
2407 /* case 2: widen rhs */
2408 for (bi = 0; bi < sblen; bi++)
2409 schd_1pthwirebit(np, bi, (word32) sbp[bi], (word32) sbp2[bi]);
2410 for (bi = sblen; bi < np->nwid; bi++)
2411 schd_1pthwirebit(np, bi, (word32) 2, (word32) sbp2[bi]);
2412 }
2413
2414 /*
2415 * schedule an entire strength wire with delay
2416 * this always requires z extension
2417 *
2418 * blen here is real not 4x too big
2419 * this handles change from z to previous value plus cap. size
2420 *
2421 * this is only place need to schedule trireg decay since trireg always fi>1
2422 * non fi>1 lhs select assigns only for non fi>1
2423 */
__wdel_stren_schd_allofwire(struct net_t * np,register byte * sbp,int32 sblen)2424 extern void __wdel_stren_schd_allofwire(struct net_t *np, register byte *sbp,
2425 int32 sblen)
2426 {
2427 register int32 bi;
2428 register byte *sbp2;
2429
2430 /* get strength wire address */
2431 get_stwire_addr_(sbp2, np);
2432
2433 if (np->ntyp == N_TRIREG)
2434 {
2435 int32 tr_decay;
2436 byte ntrival;
2437 word64 ndel;
2438
2439 /* DBG remove ---
2440 if (np->nwid != sblen) __misc_terr(__FILE__, __LINE__);
2441 --- */
2442 for (bi = 0; bi < np->nwid; bi++)
2443 {
2444 if (sbp[bi] == ST_HIZ)
2445 {
2446 /* immediately assign the cap. size strength (same old value) */
2447 ntrival = (sbp2[bi] & 3) | __cap_to_stren[np->n_capsiz];
2448 if (ntrival != sbp2[bi])
2449 {
2450 sbp2[bi] = ntrival;
2451 /* since otherwise scheduling must indicate immed. net changed */
2452 /* also schedule decay */
2453 record_sel_nchg_(np, bi, bi);
2454 }
2455 /* schedule decay to cap. size x value change if z delay not 0 */
2456 /* trireg charge decay time is third to-z delay - 0 means never */
2457 /* decays to z */
2458 __new_gateval = 2;
2459 __old_gateval = sbp2[bi];
2460 /* index in get_del removes any indexing */
2461 __get_del(&ndel, np->nu.rngdwir->n_du, np->nu.rngdwir->n_delrep);
2462 if (ndel == 0ULL) continue;
2463 sbp[bi] = 3 | __cap_to_stren[np->n_capsiz];
2464 tr_decay = TRUE;
2465 }
2466 /* use normal to 0, 1, or x delay if not all z drivers of bit */
2467 else tr_decay = FALSE;
2468 __wdel_schd_1wirebit(np, bi, (word32) sbp[bi], (word32) sbp2[bi], tr_decay);
2469 }
2470 return;
2471 }
2472
2473 if (!np->n_isavec)
2474 {
2475 __wdel_schd_1wirebit(np, 0, (word32) sbp[0], (word32) sbp2[0], FALSE);
2476 return;
2477 }
2478
2479 /* case 1: same or truncate */
2480 if (sblen >= np->nwid)
2481 {
2482 for (bi = 0; bi < np->nwid; bi++)
2483 __wdel_schd_1wirebit(np, bi, (word32) sbp[bi], (word32) sbp2[bi], FALSE);
2484 return;
2485 }
2486 /* case 2: widen rhs */
2487 for (bi = 0; bi < sblen; bi++)
2488 __wdel_schd_1wirebit(np, bi, (word32) sbp[bi], (word32) sbp2[bi], FALSE);
2489 for (bi = sblen; bi < np->nwid; bi++)
2490 __wdel_schd_1wirebit(np, bi, (word32) 2, (word32) sbp2[bi], FALSE);
2491 }
2492
2493 /*
2494 * ROUTINES TO SCHEDULE AND PROCESS WIRE EVENTS - ALWAYS BIT BY BIT
2495 */
2496
2497 /*
2498 * schedule a 1 bit wire change - know wire has delay to get here
2499 *
2500 * no spike analysis because even though inertial modeling here
2501 * wires do not switch
2502 *
2503 * this works for both strength 8 bit nval and oval and non strength
2504 * if scalar biti must be 0 (i.e. biti can not be -1)
2505 * nval is new value to schedule change to, old value is current wire value
2506 *
2507 * for trireg - if nval is weak previous, ndselval is 2 and nval for
2508 * all z drivers is built cap. stren plus current value
2509 */
__wdel_schd_1wirebit(register struct net_t * np,register int32 biti,register word32 nval,register word32 oval,int32 tr_decay)2510 extern void __wdel_schd_1wirebit(register struct net_t *np, register int32 biti,
2511 register word32 nval, register word32 oval, int32 tr_decay)
2512 {
2513 word64 ndel, schtim;
2514 i_tev_ndx tevpi, *itevpi;
2515 struct tev_t *tevp;
2516 struct rngdwir_t *dwirp;
2517
2518 if (__ev_tracing)
2519 { evtr_wdel_schd_1wirebit(np, biti, nval, oval, tr_decay); return; }
2520
2521 dwirp = np->nu.rngdwir;
2522 itevpi = &(dwirp->wschd_pbtevs[np->nwid*__inum]);
2523 tevpi = itevpi[biti];
2524
2525 /* since always use last changed value, if last same as current */
2526 /* because gate style spike nothing to do since already right value */
2527 if (tevpi == -1 && nval == oval) return;
2528
2529 /* get delay and if path delay immediate (distributed longer) store */
2530 /* globals must be set for get del routine */
2531 __old_gateval = oval;
2532 /* notice new gateval is not value set but to-z needed for get del */
2533 __new_gateval = (tr_decay) ? 2 : nval;
2534
2535 /* normal wire delay */
2536 /* notice old and new gate values always set before here */
2537 __get_del(&ndel, dwirp->n_du, dwirp->n_delrep);
2538 schtim = __simtime + ndel;
2539
2540 if (tevpi == -1)
2541 {
2542 /* if nothing pending can just schedule */
2543 __schedule_1wev(np, biti, TE_WIRE, ndel, schtim, nval, itevpi, tr_decay);
2544 return;
2545 }
2546 tevp = &(__tevtab[tevpi]);
2547 /* DBG remove -- */
2548 if (tevp->tetyp != TE_WIRE) __misc_terr(__FILE__, __LINE__);
2549 /* --- */
2550
2551 /* there is a pending unmatured event */
2552 /* case 1: real pulse (aka spike or glitch) just cancel */
2553 if (nval == oval)
2554 {
2555 /* cancel */
2556 __cancel_1wev(tevp);
2557 itevpi[biti] = -1;
2558 }
2559 /* this handles cancel too */
2560 else __reschedule_1wev(tevpi, nval, ndel, schtim, itevpi);
2561 }
2562
2563 /*
2564 * trace version sched 1 bit wire change - know wire has delay to get here
2565 *
2566 * no spike analysis but normal inertial rescheduling
2567 * this works for both strength 8 bit nval and oval and non strength
2568 *
2569 * for trireg - if nval is weak previous, ndselval is 2 and nval for
2570 * all z drivers is built cap. stren plus current value
2571 */
evtr_wdel_schd_1wirebit(register struct net_t * np,register int32 biti,register word32 nval,register word32 oval,int32 tr_decay)2572 static void evtr_wdel_schd_1wirebit(register struct net_t *np,
2573 register int32 biti, register word32 nval, register word32 oval, int32 tr_decay)
2574 {
2575 word32 is_stren;
2576 i_tev_ndx tevpi, *itevpi;
2577 word64 ndel, schtim;
2578 struct tev_t *tevp;
2579 struct rngdwir_t *dwirp;
2580 char s1[RECLEN], vs1[10], vs2[10], vs3[10];
2581
2582 dwirp = np->nu.rngdwir;
2583 itevpi = &(dwirp->wschd_pbtevs[np->nwid*__inum]);
2584 tevpi = itevpi[biti];
2585 is_stren = np->n_stren;
2586
2587 /* if no change and do not need schedule time for cancel, done */
2588 __tr_msg("-- delayed wire %s changed:\n",
2589 __to_evtrwnam(__xs, np, biti, biti, __inst_ptr));
2590
2591 /* since always use last changed value, if last same as current */
2592 /* because gate style spike nothing to do since already right value */
2593 if (tevpi == -1 && nval == oval)
2594 {
2595 __tr_msg(" NOPEND, NOCHG <OV=%s>\n", __to_vnam(vs1, is_stren, nval));
2596 return;
2597 }
2598
2599 /* get delay and if path delay immediate (distributed longer) store */
2600 /* these globals must be set for get del routine */
2601 __new_gateval = (tr_decay) ? 2 : nval;
2602 __old_gateval = oval;
2603 /* normal wire delay */
2604 /* notice old and new gate values always set before here */
2605 __get_del(&ndel, dwirp->n_du, dwirp->n_delrep);
2606 schtim = __simtime + ndel;
2607
2608 if (tevpi == -1)
2609 {
2610 __tr_msg(" SCHD AT %s <OV=%s, NSV=%s>\n",
2611 __to_timstr(s1, &schtim), __to_vnam(vs1, is_stren, oval),
2612 __to_vnam(vs2, is_stren, nval));
2613 /* if nothing pending can just schedule */
2614 __schedule_1wev(np, biti, TE_WIRE, ndel, schtim, nval, itevpi,
2615 tr_decay);
2616 return;
2617 }
2618
2619 tevp = &(__tevtab[tevpi]);
2620 /* DBG remove -- */
2621 if (tevp->tetyp != TE_WIRE) __misc_terr(__FILE__, __LINE__);
2622 /* --- */
2623
2624 /* pending event - no spike analysis but inertial reschedule */
2625 /* current driving and schedule same causes scheduled to be removed since */
2626 /* output value correct */
2627 if (nval == oval)
2628 {
2629 __tr_msg(" PENDING EVENT, SAME <OV=NSV=%s, OSV=%s AT %s CANCEL>\n",
2630 __to_vnam(vs1, is_stren, nval), __to_vnam(vs2, is_stren, oval),
2631 __to_timstr(s1, &schtim));
2632
2633 /* cancel */
2634 __cancel_1wev(tevp);
2635 itevpi[biti] = -1;
2636 }
2637 else
2638 {
2639 __tr_msg(
2640 " PENDING EVENT, RESCHD <OV=%s, OSV=%s AT %s, NSV=%s AT %s REPLACES>\n",
2641 __to_vnam(vs1, is_stren, oval), __to_vnam(vs2, is_stren,
2642 (word32) tevp->outv), __to_timstr(s1, &(tevp->etime)), __to_vnam(vs3,
2643 is_stren, nval), __to_timstr(__xs, &schtim));
2644
2645 __reschedule_1wev(tevpi, nval, ndel, schtim, itevpi);
2646 }
2647 }
2648
2649 /*
2650 * schedule a 1 bit path delay
2651 *
2652 * implements show cancel e analysis including non path distributed delay
2653 *
2654 * this works for both strength 8 bit nval and oval and non strength
2655 * if scalar biti must be 0 (i.e. biti can not be -1)
2656 * nval is new value to schedule change to, old value is current wire value
2657 */
schd_1pthwirebit(register struct net_t * np,register int32 biti,register word32 nval,register word32 oval)2658 static void schd_1pthwirebit(register struct net_t *np, register int32 biti,
2659 register word32 nval, register word32 oval)
2660 {
2661 word32 is_stren;
2662 word64 schtim;
2663 i_tev_ndx tevpi, *itevpi;
2664 struct tev_t *tevp;
2665 struct rngdwir_t *dwirp;
2666 struct pthdst_t *pdp;
2667
2668 if (__ev_tracing)
2669 {
2670 evtr_schd_1pthwirebit(np, biti, nval, oval);
2671 return;
2672 }
2673 dwirp = np->nu.rngdwir;
2674 itevpi = &(dwirp->wschd_pbtevs[np->nwid*__inum]);
2675 tevpi = itevpi[biti];
2676
2677 /* since always use last changed value, if last same as current */
2678 /* because gate style glitch nothing to do since already right value */
2679 if (tevpi == -1 && nval == oval)
2680 return;
2681
2682 /* DBG remove --
2683 if (tevpi != -1 && __tevtab[tevpi].tetyp != TE_WIRE)
2684 __misc_terr(__FILE__, __LINE__);
2685 --- */
2686
2687 /* these globals must be set for get del routine */
2688 is_stren = (word32) np->n_stren;
2689 __new_gateval = nval;
2690 __old_gateval = oval;
2691 /* possible for some bits to not be path destinations - just immed assign */
2692 /* SJM 11/24/00 - if values same won't find path or delay since need */
2693 /* transition */
2694 if (nval != oval)
2695 {
2696 if ((pdp = __get_path_del(dwirp, biti, &schtim)) == NULL)
2697 {
2698 if (is_stren) __chg_st_bit(np, biti, nval, 0L);
2699 else __chg_st_bit(np, biti, nval & 1L, (nval >> 1) & 1L);
2700 return;
2701 }
2702 }
2703 else { pdp = NULL; schtim = 0ULL; }
2704
2705 /* special case 0 - distributed delay longer - immediate assign */
2706 /* normal cause is path (probably from multiple input final driving gate) */
2707 /* that has not path delay on it - this may be ok */
2708 if (pdp != NULL && schtim <= __simtime)
2709 {
2710 /* problem with modeling - distributed delay longer than path */
2711 if (!__no_informs) __emit_path_distinform(np, pdp, &__pdmindel);
2712
2713 /* modeling anomally style spike possible - know immed. assign earlier */
2714 if (tevpi != -1)
2715 {
2716 tevp = &(__tevtab[tevpi]);
2717
2718 if (__warn_cancel_e && !__no_warns && !__em_suppr(592))
2719 __emit_path_pulsewarn(pdp, tevp, &(tevp->etime), NULL,
2720 "distributed longer or path destination driver unstable",
2721 is_stren);
2722 /* always cancel pending */
2723 __cancel_1wev(tevp);
2724 itevpi[biti] = -1;
2725
2726 /* this is same for on detect and on event */
2727 if (__show_cancel_e)
2728 {
2729 /* this is special case where immediate assign must be to x */
2730 /* and cancel future event that can be scheduled for now */
2731 set_on_detect_x:
2732 if (is_stren) __chg_st_bit(np, biti, (word32) ST_STRONGX, 0L);
2733 else __chg_st_bit(np, biti, 1L, 1L);
2734 return;
2735 }
2736 /* if no show canceled e, just assign */
2737 }
2738
2739 if (is_stren) __chg_st_bit(np, biti, nval, 0L);
2740 else __chg_st_bit(np, biti, nval & 1L, (nval >> 1) & 1L);
2741 return;
2742 }
2743
2744 /* no pending event - know nval not = oval or will not get here */
2745 if (tevpi == -1)
2746 {
2747 /* because no pending event must be different */
2748 __schedule_1wev(np, biti, TE_WIRE, __pdmindel, schtim, nval, itevpi,
2749 FALSE);
2750 return;
2751 }
2752
2753 /* pending event */
2754 /* new and old same but know scheduled different - classic pulse/glitch */
2755 tevp = &(__tevtab[tevpi]);
2756 if (nval == oval)
2757 {
2758 /* spike analysis, know scheduled different - tell user */
2759 /* this is classical spike analysis */
2760 /* do not have delay to use to select pa0th */
2761 if (__warn_cancel_e && !__no_warns && !__em_suppr(592))
2762 {
2763 __emit_path_samewarn(np, biti, tevp, &(tevp->etime), "pulse", is_stren);
2764 }
2765
2766 /* if spike, suppress future but schedule to x at currently scheduled */
2767 if (__show_cancel_e)
2768 {
2769 if (__showe_onevent) { tevp->outv = (is_stren) ? ST_STRONGX : 3; return; }
2770 __cancel_1wev(tevp);
2771 itevpi[biti] = -1;
2772 goto set_on_detect_x;
2773 }
2774 /* remove pulse */
2775 __cancel_1wev(tevp);
2776 itevpi[biti] = -1;
2777 return;
2778 }
2779 /* SJM 11/24/00 - now know has pdp delay since old and new not same */
2780
2781 /* new schedule to same value case */
2782 /* here delay can be different because different path selected */
2783 /* and maybe other reasons */
2784 /* done silently here - trace message only below */
2785 if (tevp->outv == (byte) nval) return;
2786
2787 /* inertial reschedule */
2788 if (__warn_cancel_e && !__no_warns && !__em_suppr(592))
2789 __emit_path_pulsewarn(pdp, tevp, &(tevp->etime), &schtim, "unstable",
2790 is_stren);
2791
2792 /* easy show cancel (set to x case) - no new event may or may not switch */
2793 if (__show_cancel_e)
2794 {
2795 /* LOOKATME - maybe need to check old tevp and new schd time and if 2nd */
2796 /* input change results in earlier edge cancel and schedule earlier */
2797 if (__showe_onevent)
2798 { tevp->outv = (is_stren) ? ST_STRONGX : 3; return; }
2799
2800 /* remove pulse */
2801 __cancel_1wev(tevp);
2802 itevpi[biti] = -1;
2803 goto set_on_detect_x;
2804 }
2805 /* inertial reschedule, this handles cancel if needed */
2806 __reschedule_1wev(tevpi, nval, __pdmindel, schtim, itevpi);
2807 }
2808
2809 /*
2810 * trace version - schedule a 1 bit path delay
2811 *
2812 * schedule a 1 bit path delay change
2813 *
2814 * show cancel e analysis including non path distributed delay
2815 *
2816 * this works for both strength 8 bit nval and oval and non strength
2817 * if scalar biti must be 0 (i.e. biti can not be -1)
2818 * nval is new value to schedule change to, old value is current wire value
2819 */
evtr_schd_1pthwirebit(register struct net_t * np,register int32 biti,register word32 nval,register word32 oval)2820 static void evtr_schd_1pthwirebit(register struct net_t *np, register int32 biti,
2821 register word32 nval, register word32 oval)
2822 {
2823 word32 is_stren;
2824 word32 outval;
2825 word64 schtim, distdel, tevptim;
2826 i_tev_ndx tevpi, *itevpi;
2827 struct tev_t *tevp;
2828 struct rngdwir_t *dwirp;
2829 struct pthdst_t *pdp;
2830 struct spcpth_t *pthp;
2831 char s1[RECLEN], s2[RECLEN], vs1[10], vs2[10], vs3[10];
2832
2833 is_stren = np->n_stren;
2834 dwirp = np->nu.rngdwir;
2835 itevpi = &(dwirp->wschd_pbtevs[np->nwid*__inum]);
2836 tevpi = itevpi[biti];
2837
2838 if (tevpi != -1)
2839 {
2840 /* DBG remove -- */
2841 if (__tevtab[tevpi].tetyp != TE_WIRE) __misc_terr(__FILE__, __LINE__);
2842 /* --- */
2843 strcpy(s1, " (pending event)");
2844 }
2845 else strcpy(s1, "");
2846
2847 /* if no change and do not need schedule time for cancel, done */
2848 __tr_msg("-- path delay destination %s driver change%s now %s:\n",
2849 __to_evtrwnam(__xs, np, biti, biti, __inst_ptr), s1,
2850 __to_timstr(__xs2, &__simtime));
2851
2852 /* since always use last changed value, if last same as current */
2853 /* because gate style glitch nothing to do since already right value */
2854 if (tevpi == -1 && nval == oval)
2855 {
2856 __tr_msg(" PATHDEL, NOCHG <OV=%s> now %s\n",
2857 __to_vnam(vs1, is_stren, nval), __to_timstr(__xs, &__simtime));
2858 return;
2859 }
2860
2861 /* these globals must be set for get del routine */
2862 __new_gateval = nval;
2863 __old_gateval = oval;
2864
2865 if (nval != oval)
2866 {
2867 /* possible for some bits to not be path desitinations - just immed assign */
2868 if ((pdp = __get_path_del(dwirp, biti, &schtim)) == NULL)
2869 {
2870 __tr_msg(" BIT %d NOT PATH DEST: IMMED ASSIGN <OV=%s, NV=%s>\n",
2871 biti, __to_vnam(vs1, is_stren, oval), __to_vnam(vs2, is_stren, nval));
2872
2873 if (is_stren) __chg_st_bit(np, biti, nval, 0L);
2874 else __chg_st_bit(np, biti, nval & 1L, (nval >> 1) & 1L);
2875 return;
2876 }
2877 pthp = pdp->pstchgp->chgu.chgpthp;
2878 __tr_msg(" PATH (at line %s) SRC CHG TIME %s\n",
2879 __bld_lineloc(s1, pthp->pthsym->syfnam_ind, pthp->pthsym->sylin_cnt),
2880 __to_timstr(__xs, &__pdlatechgtim));
2881 }
2882 else { pdp = NULL; schtim = 0ULL; }
2883
2884
2885 /* special case 0 - distributed delay longer - immediate assign */
2886 if (pdp != NULL && schtim <= __simtime)
2887 {
2888 /* problem with modeling - distributed delay longer than path */
2889 /* or changed path has no path delay */
2890 if (!__no_informs) __emit_path_distinform(np, pdp, &__pdmindel);
2891
2892 /* modeling anomally style spike possible - know immed. assign earlier */
2893 if (tevpi != -1)
2894 {
2895 tevp = &(__tevtab[tevpi]);
2896 if (__warn_cancel_e && !__no_warns && !__em_suppr(592))
2897 __emit_path_pulsewarn(pdp, tevp, &(tevp->etime), NULL,
2898 "distributed longer or path destination driver unstable",
2899 is_stren);
2900
2901 outval = (word32) tevp->outv;
2902 tevptim = tevp->etime;
2903 /* always cancel pending */
2904 __cancel_1wev(tevp);
2905 itevpi[biti] = -1;
2906
2907 /* this is same for on detect and on event since immed. assign */
2908 if (__show_cancel_e)
2909 {
2910 /* this is special case where immediate assign must be to x */
2911 /* and cancel future event that can be scheduled for now */
2912 __tr_msg(
2913 " PATH, DIST DELAY PULSE <OV=%s, OSV=%s at %s NV=%s SHOWING X FROM NOW>\n",
2914 __to_vnam(vs1, is_stren, oval), __to_vnam(vs2, is_stren, outval),
2915 __to_timstr(s1, &tevptim), __to_vnam(vs3, is_stren, nval));
2916
2917 set_on_detect_x:
2918 if (is_stren) __chg_st_bit(np, biti, (word32) ST_STRONGX, 0L);
2919 else __chg_st_bit(np, biti, 1L, 1L);
2920 return;
2921 }
2922 __tr_msg(
2923 " PATH, DIST DELAY PULSE <OV=%s, OSV=%s at %s - NV=%s ASSIGN AND CANCEL>\n",
2924 __to_vnam(vs1, is_stren, oval), __to_vnam(vs2, is_stren, outval),
2925 __to_timstr(s1, &tevptim), __to_vnam(vs3, is_stren, nval));
2926 if (is_stren) __chg_st_bit(np, biti, nval, 0L);
2927 else __chg_st_bit(np, biti, nval & 1L, (nval >> 1) & 1L);
2928 return;
2929 }
2930 /* no pending event store - know must be different */
2931 distdel = __simtime - __pdlatechgtim;
2932 __tr_msg(
2933 " DIST DELAY %s LONGER THAN PATH %s: IMMED ASSIGN <OV=%s, NV=%s>\n",
2934 __to_timstr(__xs2, &distdel), __to_timstr(s1, &__pdmindel),
2935 __to_vnam(vs1, is_stren, oval), __to_vnam(vs2, is_stren, nval));
2936 if (is_stren) __chg_st_bit(np, biti, nval, 0L);
2937 else __chg_st_bit(np, biti, nval & 1L, (nval >> 1) & 1L);
2938 return;
2939 }
2940
2941 /* real path delay */
2942 /* case 1: no pending event - know have different new value */
2943 if (tevpi == -1)
2944 {
2945 /* because no pending event must be different */
2946 __tr_msg(" PATH DEL, SCHD AT %s <OV=%s, NSV=%s>\n",
2947 __to_timstr(s1, &schtim), __to_vnam(vs1, is_stren, oval),
2948 __to_vnam(vs2, is_stren, nval));
2949 __schedule_1wev(np, biti, TE_WIRE, __pdmindel, schtim, nval,
2950 itevpi, FALSE);
2951 return;
2952 }
2953
2954 /* pending event */
2955 tevp = &(__tevtab[tevpi]);
2956 /* new and old same but know scheduled different - classic pulse/glitch */
2957 if (nval == oval)
2958 {
2959 /* show cancel e analysis, know scheduled different - tell user */
2960 /* this is classical spike analysis */
2961 if (__warn_cancel_e && !__no_warns && !__em_suppr(592))
2962 {
2963 __emit_path_samewarn(np, biti, tevp, &(tevp->etime), "pulse", is_stren);
2964 }
2965
2966 /* if spike, suppress future but schedule to x at currently scheduled */
2967 if (__show_cancel_e)
2968 {
2969 if (__showe_onevent) sprintf(s1, "%s (on event)", __to_timstr(__xs,
2970 &(tevp->etime)));
2971 else sprintf(s1, "%s (on detect)", __to_timstr(__xs, &__simtime));
2972
2973 /* LOOKATME - think on event pulse should use schedule if earlier? */
2974 __tr_msg(
2975 " PATH DEL, PEND AT %s, PULSE <OV=NSV=%s, OSV=%s SHOWING X FROM %s>\n",
2976 __to_timstr(__xs, &(tevp->etime)), __to_vnam(vs1, is_stren, oval),
2977 __to_vnam(vs2, is_stren, (word32) tevp->outv), s1);
2978
2979 if (__showe_onevent)
2980 { tevp->outv = (is_stren) ? ST_STRONGX : 3; return; }
2981
2982 /* cancel pending */
2983 __cancel_1wev(tevp);
2984 itevpi[biti] = -1;
2985 goto set_on_detect_x;
2986 }
2987 /* remove pulse */
2988 __tr_msg(" PATH DEL, PEND, PULSE, INERTIAL CANCEL AT %s <OV=%s, OSV=%s>\n",
2989 __to_timstr(s1, &(tevp->etime)), __to_vnam(vs1, is_stren, oval),
2990 __to_vnam(vs2, is_stren, (word32) tevp->outv));
2991 /* no spike, but newly scheduled to same so no event - cancel */
2992 __cancel_1wev(tevp);
2993 itevpi[biti] = -1;
2994 return;
2995 }
2996
2997 /* new schedule to same value case - know have pdp and delay */
2998 /* know that delay same and later so just discard new event */
2999 /* done silently here - trace message only */
3000 if (tevp->outv == (byte) __new_gateval)
3001 {
3002 __tr_msg(
3003 " PATH DEL, MODEL ANOMALLY IGNORE SCHED TO SAME <OSV=NSV=%s> OLD AT %s NEW %s\n",
3004 __to_vnam(vs1, is_stren, nval), __to_timstr(s1, &(tevp->etime)),
3005 __to_timstr(s2, &schtim));
3006 return;
3007 }
3008
3009 /* inertial reschedule */
3010 if (__warn_cancel_e && !__no_warns && !__em_suppr(592))
3011 __emit_path_pulsewarn(pdp, tevp, &(tevp->etime), &schtim, "unstable",
3012 is_stren);
3013
3014 /* easy show cancel (set to x case) - no new event may or may not switch */
3015 if (__show_cancel_e)
3016 {
3017 if (__showe_onevent) sprintf(s2, "%s (on event)", __to_timstr(__xs,
3018 &(tevp->etime)));
3019 else sprintf(s2, "%s (on detect)", __to_timstr(__xs, &__simtime));
3020
3021 __tr_msg(
3022 " PATH DEL, PEND AT %s, UNSTABLE <OV=%s, OSV=%s, NSV=%s SHOWING X FROM %s>\n",
3023 __to_timstr(s1, &(tevp->etime)), __to_vnam(vs1, is_stren, oval),
3024 __to_vnam(vs2, is_stren, (word32) tevp->outv), __to_vnam(vs3, is_stren,
3025 nval), s2);
3026 if (__showe_onevent)
3027 { tevp->outv = (is_stren) ? ST_STRONGX : 3; return; }
3028
3029 __cancel_1wev(tevp);
3030 itevpi[biti] = -1;
3031 goto set_on_detect_x;
3032 }
3033
3034 /* inertial reschedule, this handles cancel if needed */
3035 __tr_msg(
3036 " PATH DEL, PEND, UNSTABLE, INERTIAL RESCHD <OV=%s, OSV=%s AT %s, NSV=%s AT %s>\n",
3037 __to_vnam(vs1, is_stren, oval), __to_vnam(vs2, is_stren, (word32) tevp->outv),
3038 __to_timstr(s1, &(tevp->etime)), __to_vnam(vs3, is_stren, nval),
3039 __to_timstr(s2, &schtim));
3040
3041 __reschedule_1wev(tevpi, nval, __pdmindel, schtim, itevpi);
3042 }
3043
3044 /*
3045 * print a distributed delay longer than path warning
3046 * normally caused by path not having path delay which is maybe ok
3047 *
3048 * algorithm for path delays is: 1) record path source changes, 2) only when
3049 * destination changes (attempt to assign value to wire) schedule wire delay
3050 * at src change time plus path delay, 3) if dest. assign to wire after
3051 * time when path would have changed immediate assign with warning
3052 *
3053 * this is questionable inform because happens when source change
3054 * does not propagate to output
3055 */
__emit_path_distinform(struct net_t * np,struct pthdst_t * pdp,word64 * pdmindel)3056 extern void __emit_path_distinform(struct net_t *np, struct pthdst_t *pdp,
3057 word64 *pdmindel)
3058 {
3059 word64 distdel;
3060 struct spcpth_t *pthp;
3061 char s1[RECLEN], s2[RECLEN];
3062
3063 distdel = __simtime - __pdlatechgtim;
3064 pthp = pdp->pstchgp->chgu.chgpthp;
3065
3066 __gfinform(470, pthp->pthsym->syfnam_ind, pthp->pthsym->sylin_cnt,
3067 "path (in %s) distributed delay %s longer than path %s (path without path delay? or destination driver unstable) - storing %s",
3068 __msg2_blditree(s1, __inst_ptr), __to_timstr(__xs, &distdel),
3069 __to_timstr(__xs2, pdmindel), __to_vnam(s2,
3070 (unsigned) ((np->n_stren) ? TRUE : FALSE), (word32) __new_gateval));
3071 }
3072
3073 /*
3074 * emit path pulse warning if not turned off
3075 */
__emit_path_pulsewarn(struct pthdst_t * pdp,struct tev_t * tevp,word64 * etim,word64 * newetim,char * sptnam,word32 is_stren)3076 extern void __emit_path_pulsewarn(struct pthdst_t *pdp, struct tev_t *tevp,
3077 word64 *etim, word64 *newetim, char *sptnam, word32 is_stren)
3078 {
3079 struct spcpth_t *pthp;
3080 char s1[RECLEN], s2[RECLEN], s3[10], s4[10], s5[10], s6[RECLEN];
3081
3082 /* must turn on spike analysis */
3083 if (__show_cancel_e)
3084 {
3085 if (__showe_onevent) strcpy(s1, " - edge event to x");
3086 else strcpy(s1, " - now detect to x");
3087 }
3088 else strcpy(s1, "");
3089
3090 sprintf(s2, "old %s, scheduled %s, new %s%s",
3091 __to_vnam(s3, is_stren, (word32) __old_gateval), __to_vnam(s4, is_stren,
3092 (word32) tevp->outv), __to_vnam(s5, is_stren, (word32) __new_gateval), s1);
3093
3094 if (newetim == NULL)
3095 { sprintf(s6, "(edge at %s removed)", __to_timstr(__xs2, etim)); }
3096 else
3097 {
3098 sprintf(s6, "(edge at %s replaced by new at %s)",
3099 __to_timstr(__xs, etim), __to_timstr(__xs2, newetim));
3100 }
3101 pthp = pdp->pstchgp->chgu.chgpthp;
3102 /* notice spike means new and old the same */
3103 __gfwarn(592, pthp->pthsym->syfnam_ind, pthp->pthsym->sylin_cnt,
3104 "path (in %s) %s %s - %s", __msg2_blditree(s1, tevp->teitp), sptnam,
3105 s6, s2);
3106 }
3107
3108 /*
3109 * emit path pulse warning for same value unstable (no new path)
3110 */
__emit_path_samewarn(struct net_t * np,int32 biti,struct tev_t * tevp,word64 * etim,char * sptnam,word32 is_stren)3111 extern void __emit_path_samewarn(struct net_t *np, int32 biti,
3112 struct tev_t *tevp, word64 *etim, char *sptnam, word32 is_stren)
3113 {
3114 char s1[RECLEN], s2[RECLEN], s3[10], s4[10], s5[10], s6[RECLEN];
3115
3116 /* must turn on spike analysis */
3117 if (__show_cancel_e)
3118 {
3119 if (__showe_onevent) strcpy(s1, " - edge event to x");
3120 else strcpy(s1, " - now detect to x");
3121 }
3122 else strcpy(s1, "");
3123
3124 sprintf(s2, "old %s, scheduled %s, new %s%s",
3125 __to_vnam(s3, is_stren, (word32) __old_gateval), __to_vnam(s4, is_stren,
3126 (word32) tevp->outv), __to_vnam(s5, is_stren, (word32) __new_gateval), s1);
3127
3128 sprintf(s1, "(edge at %s removed)", __to_timstr(__xs2, etim));
3129
3130 if (np->n_isavec) sprintf(s6, "%s %s[%d]", __to_ptnam(__xs, np->iotyp),
3131 np->nsym->synam, biti);
3132 else sprintf(s6, "%s %s", __to_ptnam(__xs, np->iotyp), np->nsym->synam);
3133
3134 /* notice spike means new and old the same */
3135 __pv_warn(592, "path destination %s: %s %s - %s", s6, sptnam, s1, s2);
3136 }
3137
3138 /*
3139 * compute path delay and imputed schedule time (sets delay in global)
3140 *
3141 * caller determines action if immediate assign needed
3142 * return nil for bit not path dest. (other bits are), pschtim not set
3143 *
3144 * rules for multiple paths with this destination
3145 * 1) select latest change (inertial pattern - schedule with latest)
3146 * 2) if 2 sources changed at same time use shortest path - open path end
3147 * spigot as soon possible
3148 *
3149 * this assumes globals __new_gateval and __old_gateval have out transition
3150 * do not need delay because here know never pnd0
3151 * notice transition that selects delay here is output change
3152 *
3153 * this finds last change path even though maybe no input changed and
3154 * this is just path dest. change from non path delay cause
3155 *
3156 * LOOKATME - contrary to P1364 LRM, ifnone paths just like other paths
3157 * in selecting path delays to use. filtering for sdps done on input
3158 * and because input changes may not propagate to output, if ifnone selected
3159 * because latest change (and shortest delay if tie) must use - ifnone only
3160 * used to distinguish exact ties in last change and delay time
3161 */
__get_path_del(struct rngdwir_t * dwirp,int32 biti,word64 * pschtim)3162 extern struct pthdst_t *__get_path_del(struct rngdwir_t *dwirp, int32 biti,
3163 word64 *pschtim)
3164 {
3165 register struct pthdst_t *pdp, *latepdp;
3166 register struct spcpth_t *latepthp, *newpthp;
3167 word64 chgtim, newdel;
3168
3169 if ((pdp = dwirp->n_du.pb_pthdst[biti]) == NULL) return(NULL);
3170
3171 /* list of paths terminating on wire np is same for all insts */
3172 __pdlatechgtim = pdp->pstchgp->lastchg[__inum];
3173 latepdp = pdp;
3174 latepthp = latepdp->pstchgp->chgu.chgpthp;
3175 if (__pth_tracing || (__debug_flg && __ev_tracing))
3176 prt_dbgpthtrmsg(latepthp, __pdlatechgtim);
3177 __get_del(&__pdmindel, latepthp->pth_du, latepthp->pth_delrep);
3178 /* common only 1 path ends on net case */
3179 if ((pdp = pdp->pdnxt) == NULL)
3180 {
3181 *pschtim = __pdlatechgtim + __pdmindel;
3182 return(latepdp);
3183 }
3184
3185 /* complicated case where more than one path end on this dest. wire np */
3186 for (;pdp != NULL; pdp = pdp->pdnxt)
3187 {
3188 /* get source change time */
3189 chgtim = pdp->pstchgp->lastchg[__inum];
3190 if (__pth_tracing || (__debug_flg && __ev_tracing))
3191 prt_dbgpthtrmsg(pdp->pstchgp->chgu.chgpthp, chgtim);
3192
3193 /* case 1: change time earlier, always select latest */
3194 if (chgtim < __pdlatechgtim) continue;
3195
3196 newpthp = pdp->pstchgp->chgu.chgpthp;
3197 __get_del(&newdel, newpthp->pth_du, newpthp->pth_delrep);
3198
3199 /* if change times are the same, use the shortest path */
3200 if (chgtim == __pdlatechgtim)
3201 {
3202 if (newpthp->pth_ifnone && newdel == __pdmindel) continue;
3203
3204 /* newdel larger implies not shorter path, do not change */
3205 /* if same try to replace since for ties must replace ifnone */
3206 if (newdel > __pdmindel) continue;
3207 }
3208 /* this path's change time later(< handled above), just use */
3209 else __pdlatechgtim = chgtim;
3210
3211 __pdmindel = newdel;
3212 latepdp = pdp;
3213 latepthp = newpthp;
3214 }
3215 *pschtim = __pdlatechgtim + __pdmindel;
3216 return(latepdp);
3217 }
3218
3219 /*
3220 * print a debugging or path trace message
3221 */
prt_dbgpthtrmsg(struct spcpth_t * newpthp,word64 chgtim)3222 static void prt_dbgpthtrmsg(struct spcpth_t *newpthp, word64 chgtim)
3223 {
3224 word64 newdel;
3225 char s1[RECLEN];
3226
3227 __get_del(&newdel, newpthp->pth_du, newpthp->pth_delrep);
3228 __tr_msg("## path (line %s) last change %s delay %s.\n",
3229 __bld_lineloc(__xs, newpthp->pthsym->syfnam_ind, newpthp->pthsym->sylin_cnt),
3230 __to_timstr(__xs2, &chgtim), __to_timstr(s1, &newdel));
3231 }
3232
3233 /*
3234 * compute intermodule path delay and imputed schedule time
3235 * only called if at least one inter module interconnect path
3236 *
3237 * this assumes new and old gate vals glbs set
3238 * using same algorithm as used for normal specify path delays
3239 * each inst/bit for mipd src-dst delays different (outside inst struct)
3240 * LOOKATME - is this algorithm right for intra-module paths
3241 */
get_impth_del(word64 * pschtim,struct net_t * np,int32 bi,struct mipd_t * mipdp)3242 static void get_impth_del(word64 *pschtim, struct net_t *np, int32 bi,
3243 struct mipd_t *mipdp)
3244 {
3245 register struct impth_t *impdp;
3246 word64 chgtim, newdel;
3247
3248 impdp = mipdp->impthtab[__inum];
3249 __pdlatechgtim = impdp->lastchg;
3250 /* know delay is non IS since src-dst delays outside inst tree */
3251 __get_del(&__pdmindel, impdp->impth_du, impdp->impth_delrep);
3252
3253 if (__pth_tracing || (__debug_flg && __ev_tracing))
3254 { prt_dbgimpthtrmsg(np, bi, __pdlatechgtim, __pdmindel); }
3255
3256 /* common only 1 path ends on net/bit case */
3257 if ((impdp = impdp->impthnxt) == NULL)
3258 {
3259 *pschtim = __pdlatechgtim + __pdmindel;
3260 return;
3261 }
3262
3263 /* complicated case where more than one path end on this dest. wire np */
3264 for (;impdp != NULL; impdp = impdp->impthnxt)
3265 {
3266 /* get source change time */
3267 chgtim = impdp->lastchg;
3268
3269 if (__pth_tracing || (__debug_flg && __ev_tracing))
3270 {
3271 /* know this will be non IS delay */
3272 __get_del(&newdel, impdp->impth_du, impdp->impth_delrep);
3273 prt_dbgimpthtrmsg(np, bi, chgtim, newdel);
3274 }
3275
3276 /* case 1: change time earlier, always select latest */
3277 if (chgtim < __pdlatechgtim) continue;
3278
3279 /* know this will be non IS delay */
3280 __get_del(&newdel, impdp->impth_du, impdp->impth_delrep);
3281
3282 /* if change times are the same, use the shortest path */
3283 if (chgtim == __pdlatechgtim)
3284 {
3285 /* newdel larger implies not shorter path, do not change */
3286 /* if same try to replace */
3287 if (newdel > __pdmindel) continue;
3288 }
3289 /* this path's change time later(< handled above), just use */
3290 else __pdlatechgtim = chgtim;
3291
3292 __pdmindel = newdel;
3293 }
3294 *pschtim = __pdlatechgtim + __pdmindel;
3295 }
3296
3297 /*
3298 * print a debugging or path trace message
3299 * passed bi as 0 for scalar and corrects in here
3300 */
prt_dbgimpthtrmsg(struct net_t * np,int32 bi,word64 chgtim,word64 newdel)3301 static void prt_dbgimpthtrmsg(struct net_t *np, int32 bi, word64 chgtim,
3302 word64 newdel)
3303 {
3304 int32 bi2;
3305 char s1[RECLEN];
3306
3307 bi2 = (!np->n_isavec) ? -1 : bi;
3308 __tr_msg("## intermodule src-dest path end on %s last change %s delay %s.\n",
3309 __to_evtrwnam(__xs, np, bi2, bi2, __inst_ptr), __to_timstr(__xs2, &chgtim),
3310 __to_timstr(s1, &newdel));
3311 }
3312
3313 /*
3314 * schedule 1 wire event
3315 *
3316 * passed in np but may be mpp (no cast needed) user must access right union
3317 */
__schedule_1wev(struct net_t * np,int32 bi,int32 etyp,word64 ndel,word64 schtim,word32 newval,i_tev_ndx * itevpi,int32 tr_decay)3318 extern void __schedule_1wev(struct net_t *np, int32 bi, int32 etyp, word64 ndel,
3319 word64 schtim, word32 newval, i_tev_ndx *itevpi, int32 tr_decay)
3320 {
3321 register i_tev_ndx tevpi;
3322 register struct tev_t *tevp;
3323 register struct tenp_t *tenp;
3324
3325 alloc_tev_(tevpi, etyp, __inst_ptr, schtim);
3326 if (ndel == 0ULL)
3327 {
3328 /* this is #0, but must still build tev */
3329 if (__p0_te_hdri == -1) __p0_te_hdri = __p0_te_endi = tevpi;
3330 else { __tevtab[__p0_te_endi].tenxti = tevpi; __p0_te_endi = tevpi; }
3331 }
3332 else __insert_event(tevpi);
3333 tevp = &(__tevtab[tevpi]);
3334 tevp->outv = (byte) newval;
3335 tevp->te_trdecay = tr_decay;
3336 itevpi[bi] = tevpi;
3337
3338 tenp = (struct tenp_t *) __my_malloc(sizeof(struct tenp_t));
3339 tevp->tu.tenp = tenp;
3340 tenp->tenu.np = np;
3341 tenp->nbi = bi;
3342 }
3343
3344 /*
3345 * take wire del event and new val and update if time same or cancel and
3346 * create new event if later
3347 */
__reschedule_1wev(i_tev_ndx tevpi,word32 newval,word64 ndel,word64 newtim,i_tev_ndx * itevpi)3348 extern void __reschedule_1wev(i_tev_ndx tevpi, word32 newval, word64 ndel,
3349 word64 newtim, i_tev_ndx *itevpi)
3350 {
3351 struct tenp_t *tenp;
3352 struct tev_t *tevp;
3353
3354 tevp = &(__tevtab[tevpi]);
3355 /* if del == 0 (pnd0), will always be same time reschedule */
3356 if (ndel == 0ULL)
3357 {
3358 /* new scheduled value replaces old - itevp remains correct */
3359 __newval_rescheds++;
3360 tevp->outv = (byte) newval;
3361 return;
3362 }
3363 tenp = tevp->tu.tenp;
3364 /* notice this will replace pending event in itevp */
3365 __schedule_1wev(tenp->tenu.np, tenp->nbi, (int32) tevp->tetyp, ndel,
3366 newtim, newval, itevpi, (int32) tevp->te_trdecay);
3367 __cancel_1wev(tevp);
3368 }
3369
3370 /*
3371 * cancel 1 wev - after process or really cancel to free storage
3372 * know itevp new event already adjusted
3373 *
3374 * this must be event not index since need old here
3375 */
__cancel_1wev(struct tev_t * tevp)3376 extern void __cancel_1wev(struct tev_t *tevp)
3377 {
3378 tevp->te_cancel = TRUE;
3379 __inertial_cancels++;
3380 /* DBG remove --- */
3381 if (tevp->tu.tenp == NULL) __misc_terr(__FILE__, __LINE__);
3382 /* --- */
3383 __my_free((char *) tevp->tu.tenp, sizeof(struct tenp_t));
3384 tevp->tu.tenp = NULL;
3385 }
3386
3387 /*
3388 * ROUTINES TO PROCESS MIPD LOAD SCHEDULING AND EV PROCESSING
3389 */
3390
3391 /*
3392 * schedule one simple MIPD delay nchg propagate event
3393 * this runs with itree instance location pushed
3394 *
3395 * works by stopping normal net change load propagation and scheduling it
3396 * after MIPD delay has elaspsed
3397 */
__sched_mipd_nchg(struct net_t * np,int32 bi,struct mipd_t * mipdp)3398 extern void __sched_mipd_nchg(struct net_t *np, int32 bi, struct mipd_t *mipdp)
3399 {
3400 register i_tev_ndx tevpi;
3401 register word32 nval, oval;
3402 word32 nav, nbv;
3403 word64 ndel, schtim;
3404 struct tev_t *tevp;
3405 struct tenp_t *tenp;
3406
3407 if (__ev_tracing)
3408 { evtr_sched_mipd_nchg(np, bi, mipdp); return; }
3409
3410 /* load new wire value - need this call in case packed */
3411 if (!np->n_isavec)
3412 {
3413 /* BEWARE - this depends on all scalars stored as byte array */
3414 /* need to preserve strens for change check */
3415 nval = (word32) np->nva.bp[__inum];
3416 }
3417 else
3418 {
3419 if (np->srep == SR_SVEC)
3420 {
3421 /* BEWARE - this depends on stren vec stored as byte array */
3422 nval = (word32) np->nva.bp[__inum*np->nwid + bi];
3423 }
3424 else
3425 {
3426 __ld_bit(&nav, &nbv, np, bi);
3427 nval = (nav & 1) | ((nbv & 1) << 1);
3428 }
3429 }
3430
3431 tevpi = mipdp->mipdschd_tevs[__inum];
3432 oval = mipdp->oldvals[__inum];
3433
3434 /* if no pending event and this bit unchanged nothing to schedule */
3435 /* know at least one bit chged or will not get here but maybe not this one */
3436 if (tevpi == -1 && nval == oval) return;
3437
3438 mipdp->oldvals[__inum] = nval;
3439 /* delay only uses logic not stren part of value */
3440 __new_gateval = nval & 3;
3441 __old_gateval = oval & 3;
3442
3443 /* notice old and new gate values must be set before here */
3444 if (!mipdp->pth_mipd || mipdp->impthtab == NULL
3445 || mipdp->impthtab[__inum] == NULL)
3446 {
3447 /* non src-dst path delay for this simple MIPD case */
3448 __get_del(&ndel, mipdp->pb_mipd_du, mipdp->pb_mipd_delrep);
3449 schtim = __simtime + ndel;
3450 }
3451 else
3452 {
3453 /* use same algorithm as specify path delay algorithm to get last chged */
3454 get_impth_del(&schtim, np, bi, mipdp);
3455 ndel = schtim - __simtime;
3456 }
3457
3458 if (tevpi == -1)
3459 {
3460 /* if nothing pending can just schedule */
3461 alloc_tev_(tevpi, TE_MIPD_NCHG, __inst_ptr, schtim);
3462 if (ndel == 0ULL)
3463 {
3464 /* this is #0, but must still build tev */
3465 if (__p0_te_hdri == -1) __p0_te_hdri = __p0_te_endi = tevpi;
3466 else { __tevtab[__p0_te_endi].tenxti = tevpi; __p0_te_endi = tevpi; }
3467 }
3468 else __insert_event(tevpi);
3469 mipdp->mipdschd_tevs[__inum] = tevpi;
3470 tevp = &(__tevtab[tevpi]);
3471 tenp = (struct tenp_t *) __my_malloc(sizeof(struct tenp_t));
3472 tevp->tu.tenp = tenp;
3473 tenp->tenu.np = np;
3474 tenp->nbi = bi;
3475 return;
3476 }
3477
3478 tevp = &(__tevtab[tevpi]);
3479 /* DBG remove --- */
3480 if (tevp->tetyp != TE_MIPD_NCHG)
3481 {
3482 char s1[RECLEN], s2[RECLEN];
3483 extern char *__to_tetyp(char *, word32);
3484
3485 __dbg_msg("^^%s event index %d in inst. %s at %s cancel=%d\n",
3486 __to_tetyp(s1, tevp->tetyp), tevpi, __msg2_blditree(s2, tevp->teitp),
3487 __to_timstr(__xs, &__simtime), tevp->te_cancel);
3488 __misc_terr(__FILE__, __LINE__);
3489 }
3490
3491 /* --- */
3492
3493 /* pending event - no spike analysis but inertial reschedule */
3494 /* case 1a: pending event earlier than newly scheduled */
3495 if (tevp->etime <= schtim)
3496 {
3497 /* current driving and schedule same, new later inertial value just */
3498 /* causes scheduled to be removed since output at right value */
3499 if (nval == oval)
3500 {
3501 /* cancel */
3502 cancel_1mipdev(tevp);
3503 mipdp->mipdschd_tevs[__inum] = -1;
3504 }
3505 else
3506 {
3507 /* reschedule - cancel and sched new or replace if pound 0 */
3508 mipdp->mipdschd_tevs[__inum] = reschedule_1mipd(np, bi, tevpi, ndel,
3509 schtim);
3510 }
3511 /* fall through since next case does nothing */
3512 }
3513 /* case 1b: pending event later (rare modeling anomally?) */
3514 /* since inertial just ignore new change */
3515 }
3516
3517 /*
3518 * cancel 1 mipd ev - after process or really cancel to free storage
3519 * this must be event not index since need old here
3520 *
3521 * sinc wev also uses tenp malloced field this is same as cance 1 wev
3522 */
cancel_1mipdev(struct tev_t * tevp)3523 static void cancel_1mipdev(struct tev_t *tevp)
3524 {
3525 tevp->te_cancel = TRUE;
3526 __inertial_cancels++;
3527 /* DBG remove --- */
3528 if (tevp->tu.tenp == NULL) __misc_terr(__FILE__, __LINE__);
3529 /* --- */
3530 __my_free((char *) tevp->tu.tenp, sizeof(struct tenp_t));
3531 tevp->tu.tenp = NULL;
3532 }
3533
3534 /*
3535 * take wire del event and new val and update if time same or cancel and
3536 * create new event if later
3537 */
reschedule_1mipd(struct net_t * np,int32 bi,i_tev_ndx tevpi,word64 ndel,word64 newtim)3538 static i_tev_ndx reschedule_1mipd(struct net_t *np, int32 bi, i_tev_ndx tevpi,
3539 word64 ndel, word64 newtim)
3540 {
3541 register struct tev_t *tevp, *tevp2;
3542 register struct tenp_t *tenp;
3543 i_tev_ndx tevpi2;
3544
3545 tevp = &(__tevtab[tevpi]);
3546 /* if del == 0 (pnd0), will always be same time reschedule */
3547 if (ndel == 0ULL)
3548 {
3549 /* new scheduled value replaces old - same pending event */
3550 __newval_rescheds++;
3551 return(tevpi);
3552 }
3553
3554 /* if nothing pending can just schedule */
3555 alloc_tev_(tevpi2, TE_MIPD_NCHG, __inst_ptr, newtim);
3556 __insert_event(tevpi2);
3557 tevp2 = &(__tevtab[tevpi2]);
3558 tenp = (struct tenp_t *) __my_malloc(sizeof(struct tenp_t));
3559 tevp2->tu.tenp = tenp;
3560 tenp->tenu.np = np;
3561 tenp->nbi = bi;
3562
3563 cancel_1mipdev(tevp);
3564 return(tevpi2);
3565 }
3566
3567 /*
3568 * tracing version schedule one simple MIPD delay nchg propagate event
3569 *
3570 * no spike analysis but normal inertial rescheduling
3571 * this works for both strength 8 bit nval and oval and non strength
3572 */
evtr_sched_mipd_nchg(struct net_t * np,int32 bi,struct mipd_t * mipdp)3573 static void evtr_sched_mipd_nchg(struct net_t *np, int32 bi,
3574 struct mipd_t *mipdp)
3575 {
3576 register i_tev_ndx tevpi;
3577 register word32 nval, oval;
3578 word32 nav, nbv;
3579 word64 ndel, schtim;
3580 word32 is_stren;
3581 struct tev_t *tevp;
3582 struct tenp_t *tenp;
3583 char s1[RECLEN], vs1[10], vs2[10], vs3[10];
3584
3585 is_stren = np->n_stren;
3586 tevpi = mipdp->mipdschd_tevs[__inum];
3587
3588 /* if no change and do not need schedule time for cancel, done */
3589 __tr_msg("-- scheduling MIPD for %s:\n",
3590 __to_evtrwnam(__xs, np, bi, bi, __inst_ptr));
3591
3592 /* load new wire value - need this call in case packed */
3593 if (!np->n_isavec)
3594 {
3595 /* BEWARE - this depends on all scalars stored as byte array */
3596 /* need to preserve strens for change check */
3597 nval = (word32) np->nva.bp[__inum];
3598 }
3599 else
3600 {
3601 if (np->srep == SR_SVEC)
3602 {
3603 /* BEWARE - this depends on stren vec stored as byte array */
3604 nval = (word32) np->nva.bp[__inum*np->nwid + bi];
3605 }
3606 else
3607 {
3608 __ld_bit(&nav, &nbv, np, bi);
3609 nval = (nav & 1) | ((nbv & 1) << 1);
3610 }
3611 }
3612 oval = mipdp->oldvals[__inum];
3613
3614 /* since always use last changed value, if last same as current */
3615 /* because gate style spike nothing to do since already right value */
3616 if (tevpi == -1 && nval == oval)
3617 {
3618 __tr_msg(" NOPEND, NOCHG <OV=%s>\n", __to_vnam(vs1, is_stren, nval));
3619 return;
3620 }
3621
3622 mipdp->oldvals[__inum] = nval;
3623 /* delay only uses logic not stren part of value */
3624 __new_gateval = nval & 3;
3625 __old_gateval = oval & 3;
3626 /* notice old and new gate values must be set before here */
3627 if (!mipdp->pth_mipd || mipdp->impthtab == NULL
3628 || mipdp->impthtab[__inum] == NULL)
3629 {
3630 /* non src-dst path delay for this simple MIPD case */
3631 __get_del(&ndel, mipdp->pb_mipd_du, mipdp->pb_mipd_delrep);
3632 schtim = __simtime + ndel;
3633 }
3634 else
3635 {
3636 /* use same algorithm as specify path delay algorithm to get last chged */
3637 get_impth_del(&schtim, np, bi, mipdp);
3638 ndel = schtim - __simtime;
3639 }
3640
3641 if (tevpi == -1)
3642 {
3643 __tr_msg(" SCHD AT %s <OV=%s, NSV=%s>\n", __to_timstr(s1, &schtim),
3644 __to_vnam(vs1, is_stren, oval), __to_vnam(vs2, is_stren, nval));
3645
3646 /* if nothing pending can just schedule */
3647 alloc_tev_(tevpi, TE_MIPD_NCHG, __inst_ptr, schtim);
3648 if (ndel == 0ULL)
3649 {
3650 /* this is #0, but must still build tev */
3651 if (__p0_te_hdri == -1) __p0_te_hdri = __p0_te_endi = tevpi;
3652 else { __tevtab[__p0_te_endi].tenxti = tevpi; __p0_te_endi = tevpi; }
3653 }
3654 else __insert_event(tevpi);
3655
3656 mipdp->mipdschd_tevs[__inum] = tevpi;
3657 tevp = &(__tevtab[tevpi]);
3658 tenp = (struct tenp_t *) __my_malloc(sizeof(struct tenp_t));
3659 tevp->tu.tenp = tenp;
3660 tenp->tenu.np = np;
3661 tenp->nbi = bi;
3662 return;
3663 }
3664
3665 tevp = &(__tevtab[tevpi]);
3666 /* DBG remove --- */
3667 if (tevp->tetyp != TE_MIPD_NCHG)
3668 {
3669 char s2[RECLEN];
3670 extern char *__to_tetyp(char *, word32);
3671
3672 tevp = &(__tevtab[tevpi]);
3673 __dbg_msg("^^%s event index %d in inst. %s at %s cancel=%d\n",
3674 __to_tetyp(s1, tevp->tetyp), tevpi, __msg2_blditree(s2, tevp->teitp),
3675 __to_timstr(__xs, &__simtime), tevp->te_cancel);
3676 __misc_terr(__FILE__, __LINE__);
3677 }
3678 /* --- */
3679
3680 /* pending event - no spike analysis but inertial reschedule */
3681 /* case 1a: pending event earlier than newly scheduled */
3682 if (tevp->etime <= schtim)
3683 {
3684 /* current driving and schedule same, new later inertial value just */
3685 /* causes scheduled to be removed since output at right value */
3686 if (nval == oval)
3687 {
3688 __tr_msg(" PENDING NCHG EVENT, SAME <NV==%s, OV=%s AT %s CANCEL>\n",
3689 __to_vnam(vs1, is_stren, nval), __to_vnam(vs2, is_stren, oval),
3690 __to_timstr(s1, &schtim));
3691 /* cancel */
3692 cancel_1mipdev(tevp);
3693 mipdp->mipdschd_tevs[__inum] = -1;
3694 }
3695 else
3696 {
3697 __tr_msg(
3698 " PENDING NCHG EVENT, RESCHD <OV=%s, AT %s, NV=%s AT %s REPLACES>\n",
3699 __to_vnam(vs1, is_stren, oval), __to_timstr(s1, &(tevp->etime)),
3700 __to_vnam(vs3, is_stren, nval), __to_timstr(__xs, &schtim));
3701
3702 /* reschedule - cancel and sched new or replace if pound 0 */
3703 mipdp->mipdschd_tevs[__inum] = reschedule_1mipd(np, bi, tevpi, ndel,
3704 schtim);
3705 }
3706 /* although next case does nothing, must not emit the message */
3707 return;
3708 }
3709 /* case 1b: pending event later (rare modeling anomally?) */
3710 /* since inertial just ignore new change */
3711 __tr_msg(
3712 " PENDING NCHG EVENT, NEW EARLY <OV=%s AT %s, INERTIAL IGNORE NV=%s AT %s>\n",
3713 __to_vnam(vs1, is_stren, oval), __to_timstr(s1, &(tevp->etime)),
3714 __to_vnam(vs3, is_stren, nval), __to_timstr(__xs, &schtim));
3715 }
3716
3717 /*
3718 * VALUE STORE ROUTINES FOR ASSIGNS
3719 */
3720
3721 /*
3722 * store (copy) an entire value from rgap and rgbp into wp of length blen
3723 * from current instance
3724 * stored according to representation srep from from rgap and rgbp
3725 * but cannot be used to store strength values
3726 *
3727 * stored using representation srep
3728 * separate rgap and rgbp into code contigous wp since stack size changes
3729 * cause non contigous storage form
3730 * know rgap and rgbp exact np nwid width
3731 */
__st_val(struct net_t * np,register word32 * rgap,register word32 * rgbp)3732 extern void __st_val(struct net_t *np, register word32 *rgap,
3733 register word32 *rgbp)
3734 {
3735 register word32 *dwp;
3736 register int32 wlen;
3737 byte *newsbp;
3738
3739 switch ((byte) np->srep) {
3740 case SR_SCAL:
3741 st_scalval_(np->nva.bp, rgap[0], rgbp[0]);
3742 break;
3743 case SR_VEC:
3744 wlen = wlen_(np->nwid);
3745 dwp = &(np->nva.wp[2*wlen*__inum]);
3746 st_vecval(dwp, np->nwid, rgap, rgbp);
3747 break;
3748 case SR_SVEC:
3749 /* this must be strong */
3750 memcpy(&(np->nva.bp[np->nwid*__inum]), rgap, np->nwid);
3751 break;
3752 case SR_SSCAL:
3753 newsbp = (byte *) rgap;
3754 np->nva.bp[__inum] = *newsbp;
3755 break;
3756 default: __case_terr(__FILE__, __LINE__); return;
3757 }
3758 }
3759
3760 /*
3761 * store a per instance value into a word32 location
3762 * mostly for storing cont. assign driver values
3763 * bit width determines form
3764 *
3765 * caller must make sure source blen and vblen same
3766 */
__st_perinst_val(union pck_u pckv,int32 vblen,register word32 * rgap,register word32 * rgbp)3767 extern void __st_perinst_val(union pck_u pckv, int32 vblen, register word32 *rgap,
3768 register word32 *rgbp)
3769 {
3770 register word32 *dwp;
3771 int32 wlen;
3772
3773 /* know rgab always scalar here if 1 bit case */
3774 if (vblen == 1) { st_scalval_(pckv.bp, rgap[0], rgbp[0]); return; }
3775 /* SJM - 07/15/00 - all vectors now not packed - min 2 words */
3776 wlen = wlen_(vblen);
3777 dwp = &(pckv.wp[2*wlen*__inum]);
3778 st_vecval(dwp, vblen, rgap, rgbp);
3779 }
3780
3781 /*
3782 * store into value at dwp of length destination length from ap and bp of
3783 * srcblen
3784 *
3785 * any high bits of destination must be zeroed - not for lhs selects
3786 * know vectors always occupy number of words (no packing to less then word32)
3787 * routine called after array or instance decoding completed to get dwp
3788 */
st_vecval(word32 * dwp,int32 blen,register word32 * ap,register word32 * bp)3789 static void st_vecval(word32 *dwp, int32 blen, register word32 *ap,
3790 register word32 *bp)
3791 {
3792 int32 wlen;
3793
3794 /* truncating wide source into narrower dest. */
3795 wlen = wlen_(blen);
3796 /* know copy will 0 any high unused bits of high word32 */
3797 cp_walign_(dwp, ap, blen);
3798 cp_walign_(&(dwp[wlen]), bp, blen);
3799 }
3800
3801 /*
3802 * store if changed (and set flag) know rgap and rgbp are adjusted and
3803 * z extended (if needed) to exact wire width
3804 * also any 1 bit cases must already be adjusted to a part only form
3805 */
__chg_st_val(register struct net_t * np,word32 * rgap,word32 * rgbp)3806 extern void __chg_st_val(register struct net_t *np, word32 *rgap, word32 *rgbp)
3807 {
3808 register word32 *dwp;
3809 int32 wlen;
3810 byte *netsbp, *newsbp;
3811
3812 switch ((byte) np->srep) {
3813 case SR_SCAL:
3814 chg_st_scalval_(np->nva.bp, rgap[0], rgbp[0]);
3815 break;
3816 case SR_VEC:
3817 wlen = wlen_(np->nwid);
3818 dwp = &(np->nva.wp[2*wlen*__inum]);
3819 chg_st_vecval(dwp, np->nwid, rgap, rgbp);
3820 break;
3821 case SR_SVEC:
3822 /* this must be strong */
3823 netsbp = &(np->nva.bp[np->nwid*__inum]);
3824 newsbp = (byte *) rgap;
3825 if (memcmp(netsbp, newsbp, np->nwid) != 0)
3826 {
3827 /* bcopy 2nd argument is destination */
3828 memcpy(netsbp, newsbp, np->nwid);
3829 __lhs_changed = TRUE;
3830 }
3831 break;
3832 case SR_SSCAL:
3833 netsbp = &(np->nva.bp[__inum]);
3834 newsbp = (byte *) rgap;
3835 if (*netsbp != *newsbp) { *netsbp = *newsbp; __lhs_changed = TRUE; }
3836 break;
3837 default: __case_terr(__FILE__, __LINE__); return;
3838 }
3839 if (__lhs_changed) record_nchg_(np);
3840 }
3841
3842 /*
3843 * routine to record net change - non macro for debugging
3844 */
3845 /* DBG ??? add ---
3846 extern void __record_nchg(struct net_t *np)
3847 {
3848 -* SJM 08/08/03 - can't assume caller turns off chged flag any more *-
3849 -* but one record called, it must be off for dctrl processing - not needed *-
3850 __lhs_changed = FALSE;
3851
3852 -* --- DBG remove
3853 if (__debug_flg)
3854 {
3855 strcpy(__xs2, "");
3856 if (np->nlds == NULL) strcat(__xs2, "[no lds, ");
3857 else strcat(__xs2, "[ld, ");
3858 if (np->dcelst == NULL) strcat(__xs2, "no dces, ");
3859 else strcat(__xs2, "dces, ");
3860 if (np->ndrvs == NULL) strcat(__xs2, "no drvs]");
3861 else strcat(__xs2, "drvs]");
3862
3863 -* emit for inst 0 (all should be same) *-
3864 __dbg_msg("record nchg for net %s type %s nchgaction=%x conn=%s\n",
3865 np->nsym->synam, __to_wtnam(__xs, np), np->nchgaction[__inum], __xs2);
3866 }
3867 --- *-
3868
3869 -* SJM 07/24/00 - has dces only on for regs *-
3870 -* SJM 03/15/01 - change to fields in net record *-
3871 if (np->nchg_has_dces) __wakeup_delay_ctrls(np, -1, -1);
3872
3873 if ((np->nchgaction[__inum] & NCHG_ALL_CHGED) == 0)
3874 __add_nchglst_el(np);
3875
3876 if ((np->nchgaction[__inum] & (NCHG_DMPVARNOW | NCHG_DMPVNOTCHGED))
3877 == (NCHG_DMPVARNOW | NCHG_DMPVNOTCHGED))
3878 {
3879 np->nchgaction[__inum] &= ~(NCHG_DMPVNOTCHGED);
3880 __add_dmpv_chglst_el(np);
3881 }
3882 }
3883 --- */
3884
3885 /*
3886 * routine to record net select change - non macro for debugging
3887 */
3888 /* DBG ??? add ---
3889 extern void __record_sel_nchg(struct net_t *np, int32 i1, int32 i2)
3890 {
3891 -* SJM 08/08/03 - can't assume caller turns off chged flag any more *-
3892 -* but one record called, it must be off for dctrl processing - not needed *-
3893 __lhs_changed = FALSE;
3894
3895 -* --- DBG remove *-
3896 if (__debug_flg)
3897 {
3898 strcpy(__xs2, "");
3899 if (np->nlds == NULL) strcat(__xs2, "[no lds, ");
3900 else strcat(__xs2, "[ld, ");
3901 if (np->dcelst == NULL) strcat(__xs2, "no dces, ");
3902 else strcat(__xs2, "dces, ");
3903 if (np->ndrvs == NULL) strcat(__xs2, "no drvs]");
3904 else strcat(__xs2, "drvs]");
3905
3906 __dbg_msg("record nchg for net %s[%d:%d] type %s nchgaction=%x conn=%s\n",
3907 np->nsym->synam, i1, i2, __to_wtnam(__xs, np), np->nchgaction[__inum],
3908 __xs2);
3909 }
3910 --- *-
3911
3912 -* --- *-
3913 -* SJM 07/24/00 - has dces only on for regs *-
3914 if (np->nchg_has_dces) __wakeup_delay_ctrls(np, i1, i2);
3915
3916 if ((np->nchgaction[__inum] & NCHG_ALL_CHGED) == 0)
3917 __add_select_nchglst_el(np, i1, i2);
3918
3919 if ((np->nchgaction[__inum] & (NCHG_DMPVARNOW | NCHG_DMPVNOTCHGED))
3920 == (NCHG_DMPVARNOW | NCHG_DMPVNOTCHGED))
3921 {
3922 np->nchgaction[__inum] &= ~(NCHG_DMPVNOTCHGED);
3923 __add_dmpv_chglst_el(np);
3924 }
3925 }
3926 --- */
3927
3928 /*
3929 * change form of store vector value - know stacked ap and bp width
3930 * same as destination
3931 * know blen at least 16 bits or wider
3932 *
3933 * caller must adjust
3934 * notice this does not add changed net to change list
3935 */
chg_st_vecval(register word32 * dwp,int32 blen,register word32 * ap,register word32 * bp)3936 static void chg_st_vecval(register word32 *dwp, int32 blen,
3937 register word32 *ap, register word32 *bp)
3938 {
3939 int32 wlen;
3940 word32 *dwp2;
3941
3942 if (blen <= WBITS)
3943 {
3944 if (dwp[0] != ap[0])
3945 {
3946 dwp[0] = ap[0];
3947 /* AIV 09/19/06 - if a part is not the same assign bpart regardless */
3948 /* this is better than doing another compare */
3949 dwp[1] = bp[0];
3950 __lhs_changed = TRUE;
3951 return;
3952 }
3953 /* AIV 09/19/06 - was assuming contiguous words (using ap[1] for bpart) */
3954 /* which isn't always true local words av, bv can be passed */
3955 if (dwp[1] != bp[0])
3956 {
3957 dwp[1] = bp[0];
3958 __lhs_changed = TRUE;
3959 }
3960 return;
3961 }
3962 wlen = wlen_(blen);
3963 if (cmp_wvval_(dwp, ap, wlen) != 0)
3964 { cp_walign_(dwp, ap, blen); __lhs_changed = TRUE; }
3965 dwp2 = &(dwp[wlen]);
3966 if (cmp_wvval_(dwp2, bp, wlen) != 0)
3967 { cp_walign_(dwp2, bp, blen); __lhs_changed = TRUE; }
3968 }
3969
3970 /*
3971 * assign to a bit
3972 * if np is stren ap will point to array with one byte
3973 * know ap/bp either strength 8 bits in ap part or exactly 1 bit
3974 */
__assign_to_bit(struct net_t * np,struct expr_t * idndp,struct expr_t * ndx1,register word32 * ap,register word32 * bp)3975 extern void __assign_to_bit(struct net_t *np, struct expr_t *idndp,
3976 struct expr_t *ndx1, register word32 *ap, register word32 *bp)
3977 {
3978 int32 biti, nd_itpop;
3979 byte *sbp;
3980 struct gref_t *grp;
3981
3982 biti = __comp_ndx(np, ndx1);
3983 nd_itpop = FALSE;
3984 if (idndp->optyp == GLBREF)
3985 { grp = idndp->ru.grp; __xmrpush_refgrp_to_targ(grp); nd_itpop = TRUE; }
3986 if (biti == -1)
3987 {
3988 if (np->n_stren)
3989 {
3990 sbp = (byte *) ap;
3991 __stren_schedorassign_unknown_bit(np, (word32) sbp[0], FALSE);
3992 }
3993 else __schedorassign_unknown_bit(np, ap[0], bp[0], FALSE);
3994 }
3995 else
3996 {
3997 /* notice best to use change form since it has fast macro checking */
3998 /* need to record change - non change bit store not much better */
3999 /* SJM 03/15/01 - change to fields in net record */
4000 if (np->nchg_nd_chgstore)
4001 {
4002 if (np->n_stren)
4003 {
4004 sbp = (byte *) ap;
4005 __chg_st_bit(np, biti, (word32) sbp[0], (word32) 0);
4006 }
4007 else __chg_st_bit(np, biti, ap[0], bp[0]);
4008 }
4009 else
4010 {
4011 if (np->n_stren)
4012 {
4013 sbp = (byte *) ap;
4014 __st_bit(np, biti, (word32) sbp[0], (word32) 0);
4015 }
4016 else __st_bit(np, biti, ap[0], bp[0]);
4017 }
4018 }
4019 if (nd_itpop) __pop_itstk();
4020 }
4021
4022 /*
4023 * if 1 bit is forced return T (nothing to do) if need assign returns F
4024 */
__forced_inhibit_bitassign(struct net_t * np,struct expr_t * idndp,struct expr_t * ndx1)4025 extern int32 __forced_inhibit_bitassign(struct net_t *np, struct expr_t *idndp,
4026 struct expr_t *ndx1)
4027 {
4028 int32 biti, nd_itpop, rv;
4029 struct gref_t *grp;
4030
4031 /* SJM 10/11/02 - should compute index in assign itree cntxt not xmr */
4032 biti = __comp_ndx(np, ndx1);
4033
4034 nd_itpop = FALSE;
4035 if (idndp->optyp == GLBREF)
4036 { grp = idndp->ru.grp; __xmrpush_refgrp_to_targ(grp); nd_itpop = TRUE; }
4037
4038 if (biti == -1) __arg_terr(__FILE__, __LINE__);
4039 /* if the one bit is forced, no need to do assign */
4040 if (np->nu2.qcval[np->nwid*__inum + biti].qc_active) rv = TRUE;
4041 else rv = FALSE;
4042 if (nd_itpop) __pop_itstk();
4043 return(rv);
4044 }
4045
4046 /*
4047 * schedule assignment to a bit
4048 * ap and bp may be wider than 1 bit
4049 */
schedassign_to_bit(struct net_t * np,struct expr_t * idndp,struct expr_t * ndx1,register word32 * ap,register word32 * bp)4050 static void schedassign_to_bit(struct net_t *np, struct expr_t *idndp,
4051 struct expr_t *ndx1, register word32 *ap, register word32 *bp)
4052 {
4053 int32 biti, nd_itpop;
4054 byte *sbp, *sbp2;
4055 word32 av, bv, nval;
4056 struct gref_t *grp;
4057
4058 biti = __comp_ndx(np, ndx1);
4059 nd_itpop = FALSE;
4060 if (idndp->optyp == GLBREF)
4061 { grp = idndp->ru.grp; __xmrpush_refgrp_to_targ(grp); nd_itpop = TRUE; }
4062 if (biti == -1)
4063 {
4064 /* here same routine for both */
4065 if (np->n_stren)
4066 {
4067 sbp = (byte *) ap;
4068 __stren_schedorassign_unknown_bit(np, (word32) sbp[0], TRUE);
4069 }
4070 else __schedorassign_unknown_bit(np, ap[0], bp[0], TRUE);
4071 if (nd_itpop) __pop_itstk();
4072 return;
4073 }
4074
4075 if (np->n_stren)
4076 {
4077 /* get strength wire address */
4078 get_stwire_addr_(sbp, np);
4079 sbp2 = (byte *) ap;
4080 if (np->nu.rngdwir->n_delrep == DT_PTHDST)
4081 schd_1pthwirebit(np, biti, (word32) sbp2[0], (word32) sbp[biti]);
4082 else
4083 __wdel_schd_1wirebit(np, biti, (word32) sbp2[0], (word32) sbp[biti], FALSE);
4084 }
4085 else
4086 {
4087 __ld_bit(&av, &bv, np, biti);
4088 nval = (ap[0] & 1L) | ((bp[0] << 1) & 2L);
4089 if (np->nu.rngdwir->n_delrep == DT_PTHDST)
4090 schd_1pthwirebit(np, biti, nval, (av | (bv << 1)));
4091 else __wdel_schd_1wirebit(np, biti, nval, (av | (bv << 1)), FALSE);
4092 }
4093 if (nd_itpop) __pop_itstk();
4094 }
4095
4096 /*
4097 * assign or schedule to an unknown - bit - all x that differs from value
4098 *
4099 * strength version
4100 */
__stren_schedorassign_unknown_bit(struct net_t * np,word32 bval,int32 schd_wire)4101 extern void __stren_schedorassign_unknown_bit(struct net_t *np, word32 bval,
4102 int32 schd_wire)
4103 {
4104 register int32 i;
4105 byte *sbp;
4106 word32 newval;
4107
4108 /* get strength wire address */
4109 get_stwire_addr_(sbp, np);
4110 /* even if value the same - strength here always strong */
4111 if (schd_wire)
4112 {
4113 /* case 1a: schedule for delay wire - know not a path source */
4114 for (i = 0; i < np->nwid; i++)
4115 {
4116 if (sbp[i] != (byte) bval) newval = (word32) ST_STRONGX;
4117 else newval = bval;
4118 if (newval != (word32) sbp[i])
4119 {
4120 if (np->nu.rngdwir->n_delrep == DT_PTHDST)
4121 schd_1pthwirebit(np, i, newval, (word32) sbp[i]);
4122 else __wdel_schd_1wirebit(np, i, newval, (word32) sbp[i], FALSE);
4123 }
4124 }
4125 return;
4126 }
4127 /* case 1b : immediate assign */
4128 for (i = 0; i < np->nwid; i++)
4129 {
4130 /* LOOKATME - think this can be simplified - but if chg to x and was x*/
4131 /* still need to do nothing - also need cast to word32 */
4132 /* SJM 01/18/01 - was always setting lhs changed even if same */
4133 if (sbp[i] != (byte) bval) newval = (word32) ST_STRONGX;
4134 else newval = bval;
4135 if (newval != (word32) sbp[i])
4136 { sbp[i] = (byte) newval; __lhs_changed = TRUE; }
4137 }
4138 /* this could be lots of 1 bit schedules */
4139 if (__lhs_changed) record_nchg_(np);
4140 }
4141
4142 /*
4143 * assign or schedule to an unknown - bit - all x that differs from value
4144 *
4145 * non strength version
4146 */
__schedorassign_unknown_bit(struct net_t * np,word32 av,word32 bv,int32 schd_wire)4147 extern void __schedorassign_unknown_bit(struct net_t *np, word32 av, word32 bv,
4148 int32 schd_wire)
4149 {
4150 register int32 i;
4151 word32 bval, newval, oval, w1, w2;
4152 struct xstk_t *oxsp, *nxsp;
4153
4154 bval = av | (bv << 1);
4155 push_xstk_(oxsp, np->nwid);
4156 /* know net width > 1 */
4157 __ld_wire_val(oxsp->ap, oxsp->bp, np);
4158 push_xstk_(nxsp, np->nwid);
4159 get_unknown_biti_val(np, nxsp->ap, nxsp->bp, oxsp->ap, oxsp->bp, bval);
4160
4161 /* bit by bit select comparison if needed for source else just store */
4162 /* case 2a: schdule delay wire - know cannot be path source */
4163 if (schd_wire)
4164 {
4165 for (i = 0; i < np->nwid; i++)
4166 {
4167 w1 = rhsbsel_(nxsp->ap, i);
4168 w2 = rhsbsel_(nxsp->bp, i);
4169 newval = w1 | (w2 << 1);
4170 w1 = rhsbsel_(oxsp->ap, i);
4171 w2 = rhsbsel_(oxsp->bp, i);
4172 oval = w1 | (w2 << 1);
4173 if (newval != oval)
4174 {
4175 if (np->nu.rngdwir->n_delrep == DT_PTHDST)
4176 schd_1pthwirebit(np, i, newval, oval);
4177 else __wdel_schd_1wirebit(np, i, newval, oval, FALSE);
4178 }
4179 }
4180 }
4181 /* normal store entire value - faster than bit by bit */
4182 /* know store and both are wire width */
4183 else __chg_st_val(np, nxsp->ap, nxsp->bp);
4184 __pop_xstk();
4185 __pop_xstk();
4186 }
4187
4188 /*
4189 * build the new unknown biti value on stack where new bit value is newval
4190 * this is not needed for strength case where stored as bytes
4191 */
get_unknown_biti_val(struct net_t * np,word32 * nap,word32 * nbp,word32 * oap,word32 * obp,word32 newval)4192 static void get_unknown_biti_val(struct net_t *np, word32 *nap, word32 *nbp,
4193 word32 *oap, word32 *obp, word32 newval)
4194 {
4195 register int32 i;
4196 int32 wlen, ubits;
4197
4198 /* if new value x, then all bits of var. changed to x */
4199 if (newval == 3)
4200 { one_allbits_(nap, np->nwid); one_allbits_(nbp, np->nwid); return; }
4201 /* otherwide make all bits that differ from value x */
4202 wlen = wlen_(np->nwid);
4203 for (i = 0; i < wlen; i++)
4204 {
4205 nap[i] = oap[i];
4206 nbp[i] = obp[i];
4207 setx_ifnotval(&(nap[i]), &(nbp[i]), newval);
4208 }
4209 ubits = ubits_(np->nwid);
4210 nap[wlen - 1] &= __masktab[ubits];
4211 nbp[wlen - 1] &= __masktab[ubits];
4212 }
4213
4214 /*
4215 * set new a word32 and b word32 so that if value leave as is else x
4216 */
setx_ifnotval(word32 * ap,word32 * bp,word32 val)4217 static void setx_ifnotval(word32 *ap, word32 *bp, word32 val)
4218 {
4219 word32 mask;
4220
4221 switch ((byte) val) {
4222 case 0: mask = ap[0] | bp[0]; ap[0] = bp[0] = mask; break;
4223 case 1: ap[0] = ALL1W; bp[0] = ~(ap[0] | bp[0]) | bp[0]; break;
4224 case 2: ap[0] = ~(ap[0] | bp[0]) | ap[0]; bp[0] = ALL1W; break;
4225 case 3: ap[0]= bp[0] = ALL1W; break;
4226 default: __case_terr(__FILE__, __LINE__);
4227 }
4228 }
4229
4230 /*
4231 * set bit dbit of dwp from low bit of word32 sw
4232 * know dbit in range
4233 * this does not assume dbit in 1st word32 of dwp
4234 * also sw may have high bits beside low bit on
4235 * dwp must be set to right place using current instance
4236 *
4237 * these should be macros in asm
4238 */
__lhsbsel(register word32 * dwp,register int32 dbit,word32 sw)4239 extern void __lhsbsel(register word32 *dwp, register int32 dbit, word32 sw)
4240 {
4241 register int32 bi, wi;
4242
4243 /* dbit in word32 0 is 0-31, word32 1 is 32-63, etc */
4244 wi = get_wofs_(dbit);
4245 /* bi is index with 0 rightmost bit and 31 high bit in select word32 */
4246 bi = get_bofs_(dbit);
4247 dwp[wi] &= ~(1L << bi);
4248 dwp[wi] |= ((sw & 1L) << bi);
4249 }
4250
4251 /*
4252 * change versions of store bit
4253 * know biti in range
4254 */
__chg_st_bit(struct net_t * np,int32 biti,register word32 av,register word32 bv)4255 extern void __chg_st_bit(struct net_t *np, int32 biti, register word32 av,
4256 register word32 bv)
4257 {
4258 register word32 *rap;
4259 byte *netsbp;
4260 int32 wlen;
4261
4262 switch ((byte) np->srep) {
4263 /* this is same as full value store - biti 0 or will not get here */
4264 case SR_SCAL:
4265 /* DBG remove --
4266 if (biti != 0) __arg_terr(__FILE__, __LINE__);
4267 --- */
4268 chg_st_scalval_(np->nva.bp, av, bv);
4269 break;
4270 case SR_VEC:
4271 /* rap is base of vector for current inst */
4272 wlen = wlen_(np->nwid);
4273 rap = &(np->nva.wp[2*wlen*__inum]);
4274 chg_lhsbsel(rap, biti, av);
4275 chg_lhsbsel(&(rap[wlen]), biti, bv);
4276 break;
4277 case SR_SVEC:
4278 /* rap is base of vector for current inst */
4279 netsbp = &(np->nva.bp[np->nwid*__inum]);
4280 if (netsbp[biti] != (byte) av)
4281 { netsbp[biti] = (byte) av; __lhs_changed = TRUE; }
4282 break;
4283 case SR_SSCAL:
4284 /* DBG remove ---
4285 if (biti != 0) __arg_terr(__FILE__, __LINE__);
4286 -- */
4287 netsbp = &(np->nva.bp[__inum]);
4288 if (netsbp[0] != (byte) av)
4289 { *netsbp = (byte) av; __lhs_changed = TRUE; }
4290 break;
4291 default: __case_terr(__FILE__, __LINE__);
4292 }
4293 if (__lhs_changed) record_sel_nchg_(np, biti, biti);
4294 }
4295
4296 /*
4297 * immediate versions of store bit
4298 * know biti in range
4299 */
__st_bit(struct net_t * np,int32 biti,register word32 av,register word32 bv)4300 extern void __st_bit(struct net_t *np, int32 biti, register word32 av,
4301 register word32 bv)
4302 {
4303 register word32 *rap;
4304 int32 wlen;
4305
4306 switch ((byte) np->srep) {
4307 /* this is same as full value store - biti 0 or will not get here */
4308 case SR_SCAL:
4309 /* DBG remove --
4310 if (biti != 0) __arg_terr(__FILE__, __LINE__);
4311 --- */
4312 st_scalval_(np->nva.bp, av, bv);
4313 break;
4314 case SR_VEC:
4315 /* rap is base of vector for current inst */
4316 wlen = wlen_(np->nwid);
4317 rap = &(np->nva.wp[2*wlen*__inum]);
4318 __lhsbsel(rap, biti, av);
4319 __lhsbsel(&(rap[wlen]), biti, bv);
4320 break;
4321 case SR_SVEC:
4322 /* rap is base of vector for current inst */
4323 np->nva.bp[np->nwid*__inum + biti] = (byte) av;
4324 break;
4325 case SR_SSCAL:
4326 /* DBG remove ---
4327 if (biti != 0) __arg_terr(__FILE__, __LINE__);
4328 -- */
4329 np->nva.bp[__inum] = (byte) av;
4330 break;
4331 default: __case_terr(__FILE__, __LINE__);
4332 }
4333 }
4334
4335 /*
4336 * lhs bit select but do not store if same
4337 * sets global lhs changed to F if the same
4338 * LOOKATME - is it true that sw must be masked off - faster if not needed
4339 */
chg_lhsbsel(register word32 * dwp,int32 dbit,word32 sw)4340 static void chg_lhsbsel(register word32 *dwp, int32 dbit, word32 sw)
4341 {
4342 register word32 mask, sw2;
4343 register int32 bi, wi;
4344
4345 /* dbit in word32 0: 0-31, word32 1: 32-63, etc (32 left high - 0 right low) */
4346 wi = get_wofs_(dbit);
4347 bi = get_bofs_(dbit);
4348 mask = 1L << bi;
4349 sw2 = (sw & 1L) << bi;
4350 if (((dwp[wi] & mask) ^ sw2) != 0L)
4351 { __lhs_changed = TRUE; dwp[wi] &= ~mask; dwp[wi] |= sw2; }
4352 }
4353
4354 /*
4355 * assign to an indexed array location (procedural only)
4356 */
__assign_to_arr(struct net_t * np,struct expr_t * idndp,struct expr_t * ndx1,register word32 * ap,register word32 * bp)4357 extern void __assign_to_arr(struct net_t *np, struct expr_t *idndp,
4358 struct expr_t *ndx1, register word32 *ap, register word32 *bp)
4359 {
4360 int32 arri, nd_itpop, arrwid;
4361 struct gref_t *grp;
4362
4363 /* arrwid is number of cells in array */
4364 arri = __comp_ndx(np, ndx1);
4365 /* for array - if index out of range - do not change array at all */
4366 if (arri == -1)
4367 {
4368 __sgfwarn(530,
4369 "left hand side array index %s of %s unknown or out of range - unchanged",
4370 __regab_tostr(__xs, &__badind_a, &__badind_b, __badind_wid, BHEX, FALSE),
4371 __to_idnam(idndp));
4372 return;
4373 }
4374 /* SJM DBG REMOVEME --- */
4375 if (arri == -2) __misc_terr(__FILE__, __LINE__);
4376 /* --- */
4377 arrwid = __get_arrwide(np);
4378 nd_itpop = FALSE;
4379 /* notice for xmr - symbol points to right wire - trick is to make */
4380 /* sure target itree place right */
4381 if (idndp->optyp == GLBREF)
4382 { grp = idndp->ru.grp; __xmrpush_refgrp_to_targ(grp); nd_itpop = TRUE; }
4383
4384 /* SJM 03/15/01 - change to fields in net record */
4385 if (np->nchg_nd_chgstore)
4386 {
4387 __chg_st_arr_val(np->nva, arrwid, np->nwid, arri, ap, bp);
4388
4389 /* SJM - 06/25/00 - lhs changed possible from change store */
4390 /* and must only trigger change for right array index */
4391 if (__lhs_changed) record_sel_nchg_(np, arri, arri);
4392 }
4393 else __st_arr_val(np->nva, arrwid, np->nwid, arri, ap, bp);
4394
4395 if (nd_itpop) __pop_itstk();
4396 }
4397
4398 /*
4399 * store into array map of len mlen with element length blen using index
4400 * arri value from current instance from rgap and rgbp
4401 *
4402 * notice arrays stored and normalized to h:0 just like vectors
4403 * this handles source (to be stored) value of wrong size
4404 */
__st_arr_val(union pck_u pckv,int32 mlen,int32 blen,int32 arri,register word32 * rgap,register word32 * rgbp)4405 extern void __st_arr_val(union pck_u pckv, int32 mlen, int32 blen, int32 arri,
4406 register word32 *rgap, register word32 *rgbp)
4407 {
4408 register word32 uwrd;
4409 int32 indi, wlen, wi, bi;
4410 register word32 *vap, *rap;
4411
4412 /* compute number of words used to store 1 array element */
4413 /* 17 or more bits cannot be packed with multiple elements per word32 */
4414 /* new real arrays fit here */
4415 if (blen > WBITS/2)
4416 {
4417 /* case 1: each vector element of array takes multiple words */
4418
4419 wlen = wlen_(blen);
4420 /* find array for inst i with each vector wlen words wide */
4421 vap = &(pckv.wp[2*wlen*mlen*__inum]);
4422 /* find element arri that may be a vector */
4423 rap = &(vap[arri*2*wlen]);
4424
4425 /* instance and array index used to decode into vector addr rap */
4426 st_vecval(rap, blen, rgap, rgbp);
4427 return;
4428 }
4429
4430 /* case 2: array of 1 bit elements */
4431 if (blen == 1)
4432 {
4433 indi = 2*(__inum*mlen + arri);
4434 wi = get_wofs_(indi);
4435 bi = get_bofs_(indi);
4436 uwrd = pckv.wp[wi];
4437 uwrd &= ~(3L << bi);
4438 uwrd |= (((rgap[0] & 1L) | ((rgbp[0] & 1L) << 1)) << bi);
4439 pckv.wp[wi] = uwrd;
4440 return;
4441 }
4442 /* case 3: array cells packed */
4443 uwrd = (rgap[0] & __masktab[blen]) | ((rgbp[0] & __masktab[blen]) << blen);
4444 indi = __inum*mlen + arri;
4445 st_packintowrd_(pckv, indi, uwrd, blen);
4446 }
4447
4448 /*
4449 * store a array value if changed only - reset lhs change if not changed
4450 *
4451 * notice this does not record change (caller must)
4452 */
__chg_st_arr_val(union pck_u pckv,int32 mlen,int32 blen,int32 arri,register word32 * ap,register word32 * bp)4453 extern void __chg_st_arr_val(union pck_u pckv, int32 mlen, int32 blen, int32 arri,
4454 register word32 *ap, register word32 *bp)
4455 {
4456 register word32 *rap, uwrd, ouwrd;
4457 int32 wlen, wi, bi, indi;
4458 word32 *vap;
4459
4460 /* compute number of words used to store 1 array element */
4461 /* 17 or more bits cannot be packed with multiple elements per word32 */
4462 if (blen > WBITS/2)
4463 {
4464 /* case 1: each vector element of array takes multiple words */
4465 /* new real arrays fit here */
4466
4467 wlen = wlen_(blen);
4468 /* find array for inst i with each vector wlen words wide */
4469 vap = &(pckv.wp[2*wlen*mlen*__inum]);
4470 /* find element arri that may be a vector */
4471 rap = &(vap[arri*2*wlen]);
4472 /* instance and array indexed used to decode into vector addr rap */
4473 /* SJM 08/24/03 - caller check for lhs changed maybe set by this */
4474 chg_st_vecval(rap, blen, ap, bp);
4475 return;
4476 }
4477
4478 /* case 2: array of 1 bit elements */
4479 if (blen == 1)
4480 {
4481 indi = 2*(__inum*mlen + arri);
4482 wi = get_wofs_(indi);
4483 bi = get_bofs_(indi);
4484 ouwrd = pckv.wp[wi];
4485 uwrd = ouwrd & ~(3L << bi);
4486 uwrd |= (((ap[0] & 1L) | ((bp[0] & 1L) << 1)) << bi);
4487 if (ouwrd != uwrd) { pckv.wp[wi] = uwrd; __lhs_changed = TRUE; }
4488 return;
4489 }
4490 /* case 3: array cells packed */
4491 indi = __inum*mlen + arri;
4492 /* SJM 02/08/00 - since memory still need to get pack into word32 */
4493 ouwrd = get_packintowrd_(pckv, indi, blen);
4494 uwrd = (ap[0] & __masktab[blen]) | ((bp[0] & __masktab[blen]) << blen);
4495 if (uwrd != ouwrd)
4496 {
4497 st_packintowrd_(pckv, indi, uwrd, blen);
4498 __lhs_changed = TRUE;
4499 }
4500 }
4501
4502 /*
4503 * assign to a part select
4504 * know xsp width exactly matches part select range
4505 */
__assign_to_psel(struct expr_t * idndp,int32 ri1,int32 ri2,struct net_t * np,register word32 * ap,register word32 * bp)4506 extern void __assign_to_psel(struct expr_t *idndp, int32 ri1, int32 ri2,
4507 struct net_t *np, register word32 *ap, register word32 *bp)
4508 {
4509 struct gref_t *grp;
4510 int32 nd_itpop;
4511
4512 nd_itpop = FALSE;
4513 if (idndp->optyp == GLBREF)
4514 { grp = idndp->ru.grp; __xmrpush_refgrp_to_targ(grp); nd_itpop = TRUE; }
4515 /* if strength, know ap points to st bytes and array rhswid 4x to big */
4516
4517 /* SJM - 12/14/05 - must not call chg store of psel unless needed */
4518 /* otherwise - next assign that needs chg store but doesn't chg */
4519 /* incorrectly looks like it changed */
4520 if (np->nchg_nd_chgstore)
4521 {
4522 /* if strength, know ap points to st bytes and array rhswid 4x to big */
4523 chg_st_psel(np, ri1, ri2, ap, bp);
4524 }
4525 else st_psel(np, ri1, ri2, ap, bp);
4526
4527 if (nd_itpop) __pop_itstk();
4528 }
4529
4530 /*
4531 * assign to a part select
4532 * know ap/bp width exactly matches part select range
4533 * if returns F, caller does not do lhs assign, if T must do it
4534 */
forced_assign_to_psel(struct expr_t * idndp,int32 ri1,int32 ri2,struct net_t * np,register word32 * ap,register word32 * bp)4535 static int32 forced_assign_to_psel(struct expr_t *idndp, int32 ri1, int32 ri2,
4536 struct net_t *np, register word32 *ap, register word32 *bp)
4537 {
4538 register int32 bi, bi2;
4539 int32 wi, pswid, nd_itpop, wlen, nd_assign, ibase;
4540 byte *sbp, *sbp2;
4541 struct gref_t *grp;
4542 struct xstk_t *xsp, *xsp2;
4543
4544 pswid = ri1 - ri2 + 1;
4545 nd_itpop = FALSE;
4546 if (idndp->optyp == GLBREF)
4547 { grp = idndp->ru.grp; __xmrpush_refgrp_to_targ(grp); nd_itpop = TRUE; }
4548
4549 if (np->n_stren)
4550 {
4551 sbp = (byte *) ap;
4552 get_stwire_addr_(sbp2, np);
4553
4554 /* trick is to replace forced bits so new assign is same as forced val */
4555 /* if all bits forced, do not need assign */
4556 ibase = __inum*np->nwid;
4557 for (nd_assign = FALSE, bi = ri2, bi2 = 0; bi2 < pswid; bi++, bi2++)
4558 {
4559 /* some bits not forced - so need assign */
4560 if (np->nu2.qcval[ibase + bi].qc_active) sbp[bi] = sbp2[bi];
4561 else nd_assign = TRUE;
4562 }
4563 if (nd_itpop) __pop_itstk();
4564 return(nd_assign);
4565 }
4566
4567 push_xstk_(xsp, pswid);
4568 push_xstk_(xsp2, np->nwid);
4569 __bld_forcedbits_mask(xsp2->ap, np);
4570
4571 /* xsp has part select range forced bits */
4572 __rhspsel(xsp->ap, xsp2->ap, ri2, pswid);
4573 __pop_xstk();
4574
4575 /* if all bits forced nothing to do */
4576 if (__vval_is1(xsp->ap, pswid))
4577 { __pop_xstk(); if (nd_itpop) __pop_itstk(); return(FALSE); }
4578 /* if no bits in range forced, just do lhs part select */
4579 if (vval_is0_(xsp->ap, pswid))
4580 { __pop_xstk(); if (nd_itpop) __pop_itstk(); return(TRUE); }
4581
4582 push_xstk_(xsp2, pswid);
4583 /* xsp2 has value of old wire part selected range */
4584 __ld_psel(xsp2->ap, xsp2->bp, np, ri1, ri2);
4585 wlen = wlen_(pswid);
4586
4587 /* this changes new value so lhs part select will set right value */
4588 for (wi = 0; wi < wlen; wi++)
4589 {
4590 /* remove forced bits from new value */
4591 ap[wi] &= ~(xsp->ap[wi]);
4592 bp[wi] &= ~(xsp->ap[wi]);
4593 /* remove non forced bits from old (current value) */
4594 xsp2->ap[wi] &= xsp->ap[wi];
4595 xsp2->bp[wi] &= xsp->ap[wi];
4596 /* or old value forced into new value */
4597 ap[wi] |= xsp2->ap[wi];
4598 bp[wi] |= xsp2->ap[wi];
4599 }
4600 __pop_xstk();
4601 __pop_xstk();
4602 if (nd_itpop) __pop_itstk();
4603 return(TRUE);
4604 }
4605
4606 /*
4607 * schedule assignment to a part select
4608 * know xsp width exactly matches part select range
4609 */
schedassign_to_psel(struct expr_t * xlhs,register word32 * ap,register word32 * bp)4610 static void schedassign_to_psel(struct expr_t *xlhs, register word32 *ap,
4611 register word32 *bp)
4612 {
4613 register int32 bi, bi2;
4614 int32 ri1, ri2, pslen, nd_itpop;
4615 byte *sbp, *sbp2;
4616 word32 oav, obv, aval, bval;
4617 struct net_t *np;
4618 struct expr_t *idndp, *ndx1, *ndx2;
4619 struct gref_t *grp;
4620
4621 idndp = xlhs->lu.x;
4622 np = idndp->lu.sy->el.enp;
4623
4624 ndx1 = xlhs->ru.x->lu.x;
4625 ri1 = (int32) __contab[ndx1->ru.xvi];
4626 ndx2 = xlhs->ru.x->ru.x;
4627 ri2 = (int32) __contab[ndx2->ru.xvi];
4628
4629 nd_itpop = FALSE;
4630 if (idndp->optyp == GLBREF)
4631 { grp = idndp->ru.grp; __xmrpush_refgrp_to_targ(grp); nd_itpop = TRUE; }
4632 pslen = ri1 - ri2 + 1;
4633 if (np->n_stren)
4634 {
4635 sbp = (byte *) ap;
4636 /* get strength wire address */
4637 get_stwire_addr_(sbp2, np);
4638 for (bi = ri2, bi2 = 0; bi < ri2 + pslen; bi++, bi2++)
4639 {
4640 if (np->nu.rngdwir->n_delrep == DT_PTHDST)
4641 schd_1pthwirebit(np, bi, (word32) sbp[bi], (word32) sbp2[bi2]);
4642 else __wdel_schd_1wirebit(np, bi, (word32) sbp[bi], (word32) sbp2[bi2],
4643 FALSE);
4644 }
4645 }
4646 else
4647 {
4648 /* schedule for each bit */
4649 for (bi = ri2; bi < ri2 + pslen; bi++)
4650 {
4651 /* isolate rhs bit */
4652 aval = rhsbsel_(ap, bi);
4653 bval = rhsbsel_(bp, bi);
4654 aval |= (bval << 1);
4655 /* load old bit from wire */
4656 __ld_bit(&oav, &obv, np, bi);
4657 if (np->nu.rngdwir->n_delrep == DT_PTHDST)
4658 schd_1pthwirebit(np, bi, aval, (oav | (obv << 1)));
4659 else __wdel_schd_1wirebit(np, bi, aval, (oav | (obv << 1)), FALSE);
4660 }
4661 }
4662 if (nd_itpop) __pop_itstk();
4663 }
4664
4665 /*
4666 * part select numbits from swp (starting at 0) into dwp starting at dbi
4667 * assume swp part starts at bit 0
4668 * preserves high unused bits of new high word32 of dwp
4669 */
__lhspsel(register word32 * dwp,register int32 dbi,register word32 * swp,register int32 numbits)4670 extern void __lhspsel(register word32 *dwp, register int32 dbi,
4671 register word32 *swp, register int32 numbits)
4672 {
4673 register int32 wi;
4674
4675 /* correct so part select goes into 1st word32 */
4676 if (dbi >= WBITS)
4677 { wi = get_wofs_(dbi); dwp = &(dwp[wi]); dbi = get_bofs_(dbi); }
4678 /* if swp too short must correct */
4679 if (dbi == 0) ins_walign(dwp, swp, numbits);
4680 else __ins_wval(dwp, dbi, swp, numbits);
4681 }
4682
4683 /*
4684 * insert aligned on word32 boudary dest. dwp numbits from swp
4685 * preserves any unused high bits of high destination word
4686 * and ignored any used high bits of swp (wider than numbits)
4687 * if swp too narror correction made before here
4688 */
ins_walign(register word32 * dwp,register word32 * swp,register int32 numbits)4689 static void ins_walign(register word32 *dwp, register word32 *swp,
4690 register int32 numbits)
4691 {
4692 register int32 ubits, wlen;
4693 word32 save_val;
4694
4695 if (numbits <= WBITS)
4696 {
4697 /* preserve high bits and zero low */
4698 *dwp &= ~__masktab[numbits];
4699 /* or in new low bits - aligned to low (right) bits of word32 */
4700 /* if high bits of swp word32 used - will be masked off here */
4701 *dwp |= (*swp & __masktab[numbits]);
4702 }
4703 else
4704 {
4705 ubits = ubits_(numbits);
4706 /* need set brackets here since macro is multiple statements */
4707 if (ubits == 0) { cp_walign_(dwp, swp, numbits); }
4708 else
4709 {
4710 wlen = wlen_(numbits);
4711 save_val = dwp[wlen - 1] & ~__masktab[ubits];
4712 /* in case high word32 of swp has 0 bits (too wide), */
4713 /* copy will mask those bits off - high word32 ignored */
4714 cp_walign_(dwp, swp, numbits);
4715 dwp[wlen - 1] |= save_val;
4716 }
4717 }
4718 }
4719
4720 /*
4721 * insert value into dwp at index dbi from swp with length sblen
4722 * assume swp part starts at bit 0 but dwp and dbi corrected so dbi < WBITS
4723 * preserves high unused bits of new high word32 of dwp
4724 */
__ins_wval(register word32 * dwp,register int32 dbi,register word32 * swp,int32 numbits)4725 extern void __ins_wval(register word32 *dwp, register int32 dbi,
4726 register word32 *swp, int32 numbits)
4727 {
4728 register word32 save_val, mask;
4729 int32 wlen, w2bits;
4730
4731 /* case 1a - fits in 1st actual word32 */
4732 if (dbi + numbits <= WBITS)
4733 {
4734 mask = __masktab[numbits] << dbi;
4735 *dwp = (*dwp & ~mask) | ((*swp << dbi) & mask);
4736 return;
4737 }
4738 /* case 2 - less than 32 bits but crosses word32 boundary */
4739 if (numbits <= WBITS)
4740 {
4741 /* preserve low bits */
4742 *dwp = (*dwp & __masktab[dbi]);
4743 *dwp |= (*swp << dbi);
4744
4745 w2bits = numbits - (WBITS - dbi);
4746 dwp[1] = (dwp[1] & ~__masktab[w2bits])
4747 | ((*swp >> (WBITS - dbi)) & __masktab[w2bits]);
4748 return;
4749 }
4750 /* case 3 - general multiword case */
4751 w2bits = numbits + dbi;
4752 /* w2bits is length in bits starting from start of dest word32 */
4753 wlen = wlen_(w2bits);
4754 save_val = dwp[wlen - 1] & ~__masktab[ubits_(w2bits)];
4755 cp_dofs_wval(dwp, swp, dbi, numbits);
4756 dwp[wlen - 1] |= save_val;
4757 }
4758
4759 /*
4760 * routine to copy one value to another assuming dest. 1st bit is non 0
4761 * but copies from source bit 0
4762 * notice preserves bits to right of dbit1 but does not preserve high
4763 * bits of high word
4764 * dbit1 is offset in 1st word32 of dest. (>0 and <WBITS)
4765 *
4766 * copy from non 0 bit of source to 0 bitb of destionation use cp_sofs_wval
4767 *
4768 * chgs high unused bits of dest. word32 to 0's - caller must save if needed
4769 * this should probably be macro
4770 */
cp_dofs_wval(register word32 * dwp,register word32 * swp,int32 dbit1,int32 numbits)4771 static void cp_dofs_wval(register word32 *dwp, register word32 *swp,
4772 int32 dbit1, int32 numbits)
4773 {
4774 int32 dbit2;
4775
4776 /* do 1st word32 as special case */
4777 dwp[0] &= __masktab[dbit1];
4778 dwp[0] |= (swp[0] << dbit1);
4779 dbit2 = WBITS - dbit1;
4780 if (dbit2 >= numbits) return;
4781 numbits -= dbit2;
4782 __cp_sofs_wval(&(dwp[1]), swp, dbit2, numbits);
4783 }
4784
4785 /*
4786 * copy one value to another assuming source 1st bit is non 0
4787 * but copies into destination starting at bit 0
4788 * sbit1 is offset in 1st word32 (>0 and <WBITS)
4789 *
4790 * to copy into dwp non bit 0 copy use cp_dofs_wval
4791 * and then use this routine with WBITS - dbit1 for rest
4792 *
4793 * chgs high unused bits of dest. word32 to 0's - caller must save and restore
4794 * this should probably be macro
4795 */
__cp_sofs_wval(register word32 * dwp,register word32 * swp,register int32 sbit1,register int32 numbits)4796 extern void __cp_sofs_wval(register word32 *dwp, register word32 *swp,
4797 register int32 sbit1, register int32 numbits)
4798 {
4799 register int32 wi, bi, sbit2;
4800
4801 sbit2 = WBITS - sbit1;
4802 for (bi = 0, wi = 0; ; wi++)
4803 {
4804 dwp[wi] = (swp[wi] >> sbit1);
4805 if ((bi += sbit2) >= numbits) break;
4806 dwp[wi] |= (swp[wi + 1] << sbit2);
4807 if ((bi += sbit1) >= numbits) break;
4808 }
4809 /* bits in high source word32 will probably be on but must not be selected */
4810 dwp[wi] &= __masktab[ubits_(numbits)];
4811 }
4812
4813 /*
4814 * non change store assign to psel
4815 *
4816 * SJM 12/14/05 - can't use chg form for non chg since may wrongly leave
4817 * lhs changed on so next one wrongly appears to be changed
4818 */
st_psel(struct net_t * np,int32 ri1,int32 ri2,register word32 * ap,register word32 * bp)4819 static void st_psel(struct net_t *np, int32 ri1, int32 ri2,
4820 register word32 *ap, register word32 *bp)
4821 {
4822 int32 numbits, wlen;
4823 word32 *rap;
4824 byte *netsbp, *newsbp;
4825
4826 numbits = ri1 - ri2 + 1;
4827 if (np->srep == SR_VEC)
4828 {
4829 /* SJM 07/15/00 - all non mem vecs in at least 2 wrds - scalars in byte */
4830 /* while this needs words since always some number of words */
4831 wlen = wlen_(np->nwid);
4832 rap = &(np->nva.wp[2*wlen*__inum]);
4833 __lhspsel(rap, ri2, ap, numbits);
4834 rap = &(rap[wlen]);
4835 __lhspsel(rap, ri2, bp, numbits);
4836 }
4837 else
4838 {
4839 netsbp = &(np->nva.bp[np->nwid*__inum + ri2]);
4840 newsbp = (byte *) ap;
4841 memcpy(netsbp, newsbp, numbits);
4842 }
4843 }
4844
4845 /*
4846 * change store a part select - only for non strength vector
4847 *
4848 * LOOKATME - notice no non chg form store into psel
4849 */
chg_st_psel(struct net_t * np,int32 ri1,int32 ri2,register word32 * ap,register word32 * bp)4850 static void chg_st_psel(struct net_t *np, int32 ri1, int32 ri2,
4851 register word32 *ap, register word32 *bp)
4852 {
4853 register int32 bi, bi2;
4854 int32 pslen;
4855 byte *netsbp, *newsbp;
4856
4857 /* SJM 07/15/00 - now all non mem vecs in at least 2 wrds - scalars in byte */
4858 /* while this needs words since always some number of words */
4859 if (np->srep == SR_VEC)
4860 chg_st_unpckpsel(np->nva.wp, np->nwid, ri1, ri2, ap, bp);
4861 else
4862 {
4863 netsbp = &(np->nva.bp[np->nwid*__inum]);
4864 newsbp = (byte *) ap;
4865 pslen = ri1 - ri2 + 1;
4866 /* case 1: part select size same or narrower than rhs - truncation */
4867 for (bi = ri2, bi2 = 0; bi2 < pslen; bi++, bi2++)
4868 {
4869 if (netsbp[bi] != newsbp[bi2])
4870 { netsbp[bi] = newsbp[bi2]; __lhs_changed = TRUE; }
4871 }
4872 }
4873 /* notice since know dce and npps never ranges unless possible */
4874 /* do not need to correct vectored or reg subrange to entire wire */
4875 /* SJM 07/24/00 - because only ch store psel - no record if no dces/lds */
4876 /* SJM 03/15/01 - change to fields in net record */
4877 if (__lhs_changed && np->nchg_nd_chgstore)
4878 {
4879 record_sel_nchg_(np, ri1, ri2);
4880 }
4881 }
4882
4883 /*
4884 * change version of store psel for unpacked vector
4885 */
chg_st_unpckpsel(word32 * wp,int32 blen,int32 bith,int32 bitl,register word32 * ap,register word32 * bp)4886 static void chg_st_unpckpsel(word32 *wp, int32 blen, int32 bith, int32 bitl,
4887 register word32 *ap, register word32 *bp)
4888 {
4889 register word32 *rap;
4890 int32 wlen, numbits;
4891
4892 /* this is rightmost in word32, leftmost among words bit */
4893 numbits = bith - bitl + 1;
4894 wlen = wlen_(blen);
4895 rap = &(wp[2*wlen*__inum]);
4896 __chg_lhspsel(rap, bitl, ap, numbits);
4897 rap = &(rap[wlen]);
4898 __chg_lhspsel(rap, bitl, bp, numbits);
4899 }
4900
4901 /*
4902 * only store if change form of lhs psel
4903 */
__chg_lhspsel(register word32 * dwp,register int32 dbi,register word32 * swp,register int32 numbits)4904 extern void __chg_lhspsel(register word32 *dwp, register int32 dbi,
4905 register word32 *swp, register int32 numbits)
4906 {
4907 register int32 wi;
4908 int32 wlen, ubits;
4909
4910 /* correct so part select goes into 1st word32 */
4911 if (dbi >= WBITS)
4912 { wi = get_wofs_(dbi); dwp = &(dwp[wi]); dbi = ubits_(dbi); }
4913 /* if swp too short must correct */
4914 if (dbi == 0)
4915 {
4916 if (numbits <= WBITS)
4917 {
4918 if ((dwp[0] & __masktab[numbits]) != (swp[0] & __masktab[numbits]))
4919 {
4920 ins_walign(dwp, swp, numbits);
4921 __lhs_changed = TRUE;
4922 }
4923 }
4924 else
4925 {
4926 wlen = wlen_(numbits);
4927 ubits = ubits_(numbits);
4928
4929 /* if any differences copy all */
4930 /* when comparing high word32 - dest. high bits are masked off */
4931 if ((dwp[wlen - 1] & __masktab[ubits])
4932 != (swp[wlen - 1] & __masktab[ubits])
4933 || memcmp(dwp, swp, WRDBYTES*(wlen - 1)) != 0)
4934 {
4935 ins_walign(dwp, swp, numbits);
4936 __lhs_changed = TRUE;
4937 }
4938 }
4939 return;
4940 }
4941 /* unaligned case */
4942 chg_ins_wval(dwp, dbi, swp, numbits);
4943 }
4944
4945 /*
4946 * only assign if changed form of unaligned ins_wval
4947 */
chg_ins_wval(register word32 * dwp,register int32 dbi,register word32 * swp,register int32 numbits)4948 static void chg_ins_wval(register word32 *dwp, register int32 dbi,
4949 register word32 *swp, register int32 numbits)
4950 {
4951 register word32 mask;
4952 word32 save_val;
4953 int32 wlen, w2bits;
4954
4955 /* case 1a - fits in 1st actual word32 */
4956 if (dbi + numbits <= WBITS)
4957 {
4958 mask = __masktab[numbits] << dbi;
4959 save_val = (swp[0] << dbi) & mask;
4960 if ((dwp[0] & mask) != save_val)
4961 { dwp[0] = (dwp[0] & ~mask) | save_val; __lhs_changed = TRUE; }
4962 return;
4963 }
4964 /* case 2 - less than 32 bits but crosses word32 boundary */
4965 if (numbits <= WBITS)
4966 {
4967 w2bits = numbits - (WBITS - dbi);
4968
4969 /* compare 1st word32 dbi to 31 versus 32 - dbi low bits of source */
4970 /* compare 2nd word32 numbits - 32 - dbi to dbi to numbits of source */
4971 mask = __masktab[w2bits];
4972 if ((dwp[0] & ~__masktab[dbi]) != (swp[0] << dbi) ||
4973 ((dwp[1] & mask) != ((swp[0] >> (WBITS - dbi)) & mask)))
4974 {
4975 /* remove high (WBITS - dbi) bits (0s) */
4976 dwp[0] &= (dwp[0] & __masktab[dbi]);
4977 /* or into 0's low (WBITS - dbi) into range [31:dbi] */
4978 dwp[0] |= (swp[0] << dbi);
4979 /* remove low w2bits and or in all but low (WBITS - dbi) from new */
4980 dwp[1] = (dwp[1] & ~mask) | ((swp[0] >> (WBITS - dbi)) & mask);
4981 __lhs_changed = TRUE;
4982 }
4983 return;
4984 }
4985
4986 /* case 3 - general multiword case */
4987 if (chg_ofs_cmp(dwp, swp, dbi, numbits) == 0) return;
4988
4989 /* SJM 03/29/02 - handles restoring high big problem by saving and putting */
4990 /* back - these copies always zero high bits when done so or works */
4991 w2bits = numbits + dbi;
4992 /* w2bits is length in bits starting from start of dest word32 */
4993 wlen = wlen_(w2bits);
4994 save_val = dwp[wlen - 1] & ~__masktab[ubits_(w2bits)];
4995 cp_dofs_wval(dwp, swp, dbi, numbits);
4996 dwp[wlen - 1] |= save_val;
4997 __lhs_changed = TRUE;
4998 }
4999
5000 /*
5001 * compare source versus destination ofset by dbi bits
5002 * returns 0 for equal 1 for not
5003 * know source (new) always aligned to bit 0
5004 *
5005 * 03/17/02 - changed so saves and zeros high bits so compe works
5006 */
chg_ofs_cmp(register word32 * dwp,register word32 * swp,int32 dbi,int32 numbits)5007 static int32 chg_ofs_cmp(register word32 *dwp, register word32 *swp,
5008 int32 dbi, int32 numbits)
5009 {
5010 register int32 bi;
5011 register word32 mask1, mask2;
5012 int32 wi, sbit2, w2bits, w2len, chged;
5013 word32 save_val;
5014
5015 sbit2 = WBITS - dbi;
5016
5017 mask1 = __masktab[dbi];
5018 mask2 = __masktab[sbit2];
5019
5020 w2bits = numbits + dbi;
5021 w2len = wlen_(w2bits);
5022 /* trick here is to save and set high bits to 0's so do not need */
5023 /* high bit special case and can use saved val if copy needed */
5024 save_val = dwp[w2len - 1] & ~__masktab[ubits_(w2bits)];
5025 dwp[w2len - 1] &= __masktab[ubits_(w2bits)];
5026
5027 /* assume chged */
5028 chged = 1;
5029
5030 /* 1st word32 is special case */
5031 if ((dwp[0] & ~__masktab[dbi]) != (swp[0] << dbi)) goto done_putback;
5032 numbits -= sbit2;
5033
5034 for (bi = 0, wi = 1; ; wi++)
5035 {
5036 if ((dwp[wi] & mask1) != ((swp[wi - 1] >> sbit2) & mask1))
5037 goto done_putback;
5038 if ((bi += dbi) >= numbits) break;
5039
5040 if (((dwp[wi] >> dbi) & mask2) != (swp[wi] & mask2))
5041 goto done_putback;
5042 if ((bi += sbit2) >= numbits) break;
5043 }
5044 chged = 0;
5045
5046 done_putback:
5047 /* finally put back high bits of dest word32 */
5048 dwp[w2len - 1] |= save_val;
5049 return(chged);
5050 }
5051
5052 /* SJM 07/15/00 - now 2 to 16 bit vectors not packed - in 2 words */
5053
5054 /*
5055 * SIZE CHANGE AND Z EXTENSION ROUTINES
5056 */
5057
5058 /*
5059 * widen or narrow a stack value (only for rhs exprs)
5060 * know bit widths differ or will not be called
5061 * this may need to widen stack value width (alloc-free)
5062 * also if widens zeros all bits
5063 *
5064 * this is not for z filling but for normal operators where no z filling
5065 * allowed - may z fill in assignment after this size change done
5066 * also does not work for strengths
5067 */
__sizchgxs(register struct xstk_t * xsp,int32 nblen)5068 extern void __sizchgxs(register struct xstk_t *xsp, int32 nblen)
5069 {
5070 register int32 wi;
5071 register word32 *wpna, *wpnb, *wpob;
5072 int32 nwlen, nubits, stkwlen, xtrabits;
5073
5074 /* case 1: widening */
5075 if (xsp->xslen < nblen)
5076 {
5077 /* case 1a: stays in one word32 */
5078 if (nblen <= WBITS) goto done;
5079 nwlen = wlen_(nblen);
5080 /* case 1b: multiword but into same number of words - does nothing */
5081 stkwlen = wlen_(xsp->xslen);
5082 if (nwlen == stkwlen) goto done;
5083 nubits = ubits_(nblen);
5084 /* case 1c: wider needs bigger area */
5085 if (nwlen > xsp->xsawlen)
5086 {
5087 /* SJM 05/16/04 sign change was wrong - need to copy low parts */
5088 /* of a and b separately */
5089 wpna = (word32 *) __my_malloc(2*WRDBYTES*nwlen);
5090 memcpy(wpna, xsp->ap, WRDBYTES*xsp->xsawlen);
5091 /* SJM 09/29/04 widening b part is new wlen offset from a part */
5092 /* SJM 10/02/04 wasn't fixed right was using wrong old xsp ap part */
5093 wpnb = &(wpna[nwlen]);
5094 memcpy(wpnb, xsp->bp, WRDBYTES*xsp->xsawlen);
5095
5096 __my_free((char *) xsp->ap, 2*WRDBYTES*xsp->xsawlen);
5097 xsp->ap = wpna;
5098 xsp->bp = wpnb;
5099 xsp->xsawlen = nwlen;
5100 }
5101 else
5102 {
5103 /* case 1d: wider by adjusting loc in region of b part and copying */
5104 wpob = xsp->bp;
5105 /* SJM 09/29/04 widening b part should be new wlen offset from a part */
5106 wpnb = &(xsp->ap[nwlen]);
5107
5108 /* must copy high to low to preserve high old */
5109 for (wi = stkwlen - 1; wi >= 0; wi--) wpnb[wi] = wpob[wi];
5110 xsp->bp = wpnb;
5111 }
5112 /* 0 wide new high bits */
5113 xtrabits = (nblen - WBITS*stkwlen);
5114 zero_allbits_(&(xsp->ap[stkwlen]), xtrabits);
5115 zero_allbits_(&(xsp->bp[stkwlen]), xtrabits);
5116 goto done;
5117 }
5118 /* case 2 narrowing case - know cannot be 1 bit to start */
5119 /* case 2a: narrow to 1 bit */
5120 nwlen = wlen_(nblen);
5121 nubits = ubits_(nblen);
5122 stkwlen = wlen_(xsp->xslen);
5123 /* case 2b: narrowing where narrower same number of words */
5124 if (stkwlen == nwlen)
5125 {
5126 xsp->ap[nwlen - 1] &= __masktab[nubits];
5127 xsp->bp[nwlen - 1] &= __masktab[nubits];
5128 goto done;
5129 }
5130 /* case 2c: general narrowing */
5131 /* SJM 09/29/04 widening b part should be new wlen offset from a part */
5132 wpnb = &(xsp->ap[nwlen]);
5133 wpob = xsp->bp;
5134 /* need loop because must make sure copy low first */
5135 for (wi = 0; wi < nwlen; wi++) wpnb[wi] = wpob[wi];
5136 xsp->bp = wpnb;
5137 xsp->ap[nwlen - 1] &= __masktab[nubits];
5138 xsp->bp[nwlen - 1] &= __masktab[nubits];
5139 done:
5140 xsp->xslen = nblen;
5141 }
5142
5143 /*
5144 * zero widen a stack value (only for rhs exprs) - not for sign extend
5145 * know need to widen or will not be called
5146 * this may need to widen stack value width (alloc-free)
5147 * also if widens zeros all bits (not for sign extend widening)
5148 */
__sizchg_widen(register struct xstk_t * xsp,int32 nblen)5149 extern void __sizchg_widen(register struct xstk_t *xsp, int32 nblen)
5150 {
5151 register int32 wi;
5152 register word32 *wpna, *wpnb, *wpob;
5153 int32 nwlen, nubits, stkwlen, xtrabits;
5154
5155 /* case 1: widening */
5156 if (nblen <= WBITS) { xsp->xslen = nblen; return; }
5157 nwlen = wlen_(nblen);
5158 /* case 2: multiword but into same number of words - does nothing */
5159 stkwlen = wlen_(xsp->xslen);
5160 if (nwlen == stkwlen) { xsp->xslen = nblen; return; }
5161 nubits = ubits_(nblen);
5162 /* case 1c: wider needs bigger area */
5163 if (nwlen > xsp->xsawlen)
5164 {
5165 /* SJM 05/16/04 - 9-29 sign change was wrong - need to copy low parts */
5166 /* of a and b separately */
5167 wpna = (word32 *) __my_malloc(2*WRDBYTES*nwlen);
5168 memcpy(wpna, xsp->ap, WRDBYTES*xsp->xsawlen);
5169 /* SJM 09/29/04 widening b part should be new wlen offset from a part */
5170 /* SJM 10/02/04 wasn't fixed right was using wrong old xsp ap part */
5171 wpnb = &(wpna[nwlen]);
5172 memcpy(wpnb, xsp->bp, WRDBYTES*xsp->xsawlen);
5173
5174 __my_free((char *) xsp->ap, 2*WRDBYTES*xsp->xsawlen);
5175 xsp->ap = wpna;
5176 xsp->bp = wpnb;
5177 xsp->xsawlen = nwlen;
5178 }
5179 else
5180 {
5181 /* case 1d: wider by adjusting loc in region of b part and copying */
5182 wpob = xsp->bp;
5183 /* SJM 09/29/04 widening b part should be new wlen offset from a part */
5184 wpnb = &(xsp->ap[nwlen]);
5185 /* must copy high to low to preserve high old */
5186 for (wi = stkwlen - 1; wi >= 0; wi--) wpnb[wi] = wpob[wi];
5187 xsp->bp = wpnb;
5188 }
5189 /* 0 wide new high bits */
5190 xtrabits = (nblen - WBITS*stkwlen);
5191 zero_allbits_(&(xsp->ap[stkwlen]), xtrabits);
5192 zero_allbits_(&(xsp->bp[stkwlen]), xtrabits);
5193 xsp->xslen = nblen;
5194 }
5195
5196 /*
5197 * widen a stack using signed extension (narrow case handled in other routine)
5198 *
5199 * know need widening (maybe within one word32) - checks and extends sign
5200 * if needed
5201 *
5202 * this may need to widen stack value width (alloc-free) but never pops/pushes
5203 * also routine does not work for strens
5204 */
__sgn_xtnd_widen(struct xstk_t * xsp,int32 nblen)5205 extern void __sgn_xtnd_widen(struct xstk_t *xsp, int32 nblen)
5206 {
5207 register int32 wi, osgn_bofs;
5208 register word32 mask;
5209 word32 *wpna, *wpnb, *wpob;
5210 int32 nwlen, stkwlen, widen_amt, xtra_wbits, ival;
5211
5212 /* case 1: stays in one word32 */
5213 if (nblen <= WBITS)
5214 {
5215 osgn_bofs = xsp->xslen - 1;
5216 /* if signed, sign extend, otherwise nothing to do */
5217 if ((xsp->ap[0] & (1 << (osgn_bofs))) != 0)
5218 {
5219 /* AIV 01/18/06 - added parenthesis around minus amount */
5220 mask = __masktab[nblen - (osgn_bofs + 1)] << (osgn_bofs + 1);
5221 xsp->ap[0] |= mask;
5222 /* if x/z x/z extend */
5223 if ((xsp->bp[0] & (1 << (osgn_bofs))) != 0) xsp->bp[0] |= mask;
5224 }
5225 else
5226 {
5227 if ((xsp->bp[0] & (1 << (osgn_bofs))) != 0)
5228 {
5229 /* since sign bit off, 0 extend a part but if z, z extend b part */
5230 mask = __masktab[nblen - (osgn_bofs + 1)] << (osgn_bofs + 1);
5231 if ((xsp->bp[0] & (1 << (osgn_bofs))) != 0) xsp->bp[0] |= mask;
5232 }
5233 }
5234 xsp->xslen = nblen;
5235 return;
5236 }
5237 nwlen = wlen_(nblen);
5238 /* case 2: multiword but into same number of words */
5239 stkwlen = wlen_(xsp->xslen);
5240
5241 if (nwlen == stkwlen)
5242 {
5243 osgn_bofs = get_bofs_(xsp->xslen - 1);
5244
5245 /* if signed, sign extend, otherwise nothing to do */
5246 /* notice nwlen and stkwlen same */
5247 if ((xsp->ap[nwlen - 1] & (1 << osgn_bofs)) != 0)
5248 {
5249 /* AIV 12/22/06 - masktab index was wrong */
5250 mask = ((__masktab[WBITS - (osgn_bofs + 1)]) << (osgn_bofs + 1));
5251 xsp->ap[nwlen - 1] |= mask;
5252 /* SJM 09/29/04 - was checking word32 after high end not high word32 */
5253 /* if x/z, x/z extend */
5254 if ((xsp->bp[nwlen - 1] & (1 << (osgn_bofs))) != 0)
5255 xsp->bp[nwlen - 1] |= mask;
5256 }
5257 else
5258 {
5259 /* AIV 01/10/07 - was using bp[0] - wrong for wide case */
5260 if ((xsp->bp[nwlen - 1] & (1 << (osgn_bofs))) != 0)
5261 {
5262 /* AIV 12/22/06 - masktab index was wrong */
5263 mask = ((__masktab[WBITS - (osgn_bofs + 1)]) << (osgn_bofs + 1));
5264 /* SJM 09/29/04 - was masking word32 after high end not high word32 */
5265 xsp->bp[nwlen - 1] |= mask;
5266 }
5267 }
5268 xsp->xslen = nblen;
5269 }
5270
5271 /* case 3: wider - first create the larger area */
5272 if (nwlen > xsp->xsawlen)
5273 {
5274 /* SJM 05/16/04 - 9-29 sign change was wrong - need to copy low parts */
5275 /* of a and b separately */
5276 wpna = (word32 *) __my_malloc(2*WRDBYTES*nwlen);
5277 memcpy(wpna, xsp->ap, WRDBYTES*xsp->xsawlen);
5278 /* SJM 09/29/04 widening b part should be new wlen offset from a part */
5279 wpnb = &(wpna[nwlen]);
5280 memcpy(wpnb, xsp->bp, WRDBYTES*xsp->xsawlen);
5281
5282 __my_free((char *) xsp->ap, 2*WRDBYTES*xsp->xsawlen);
5283 xsp->ap = wpna;
5284 xsp->bp = wpnb;
5285 xsp->xsawlen = nwlen;
5286 }
5287 else
5288 {
5289 /* case 1d: wider by adjusting loc in region of b part and copying */
5290 wpob = xsp->bp;
5291 /* SJM 09/29/04 widening b part should be new wlen offset from a part */
5292 wpnb = &(xsp->ap[nwlen]);
5293 /* must copy high to low to preserve high old */
5294 for (wi = stkwlen - 1; wi >= 0; wi--) wpnb[wi] = wpob[wi];
5295 xsp->bp = wpnb;
5296 }
5297
5298 /* widen amount is number of bits to set to 1 (sign extend into) */
5299 widen_amt = nblen - xsp->xslen;
5300 /* this is position in old narrower value */
5301 osgn_bofs = get_bofs_(xsp->xslen - 1);
5302 /* xtra bits is bit num bits with high bits of sign words subtracted */
5303 xtra_wbits = widen_amt - (WBITS - osgn_bofs - 1);
5304
5305 /* AIV 06/23/05 - special case don't check for sign if 32 bits */
5306 /* just cast to int and copy high part */
5307 if (xsp->xslen == WBITS)
5308 {
5309 ival = (int32) xsp->ap[0];
5310 if (ival < 0) one_allbits_(&(xsp->ap[stkwlen]), xtra_wbits);
5311 else zero_allbits_(&(xsp->ap[stkwlen]), xtra_wbits);
5312 ival = (int32) xsp->bp[0];
5313 if (ival < 0) one_allbits_(&(xsp->bp[stkwlen]), xtra_wbits);
5314 else zero_allbits_(&(xsp->bp[stkwlen]), xtra_wbits);
5315 xsp->xslen = nblen;
5316 return;
5317 }
5318
5319 /* now can set new widened size */
5320 xsp->xslen = nblen;
5321
5322 /* sign extend if sign bit on, x/z extend if sign bit x/z, else 0 extend */
5323 if ((xsp->ap[stkwlen - 1] & (1 << osgn_bofs)) != 0)
5324 {
5325 mask = __masktab[WBITS - (osgn_bofs + 1)] << (osgn_bofs + 1);
5326 /* one high bits of this word32 */
5327 xsp->ap[stkwlen - 1] |= mask;
5328 /* then all bits of rest */
5329 one_allbits_(&(xsp->ap[stkwlen]), xtra_wbits);
5330
5331 /* if x/z x/z extend */
5332 if ((xsp->bp[stkwlen - 1] & (1 << osgn_bofs)) != 0)
5333 {
5334 xsp->bp[stkwlen - 1] |= mask;
5335 one_allbits_(&(xsp->bp[stkwlen]), xtra_wbits);
5336 }
5337 /* know high bits of high old size word32 0, but 0 all new words */
5338 else zero_allbits_(&(xsp->bp[stkwlen]), xtra_wbits);
5339 return;
5340 }
5341 /* a part sign bit off, 0 all high a part words */
5342 zero_allbits_(&(xsp->ap[stkwlen]), xtra_wbits);
5343 if ((xsp->bp[stkwlen - 1] & (1 << osgn_bofs)) != 0)
5344 {
5345 mask = __masktab[WBITS - (osgn_bofs +1)] << (osgn_bofs + 1);
5346 xsp->bp[stkwlen - 1] |= mask;
5347 one_allbits_(&(xsp->bp[stkwlen]), xtra_wbits);
5348 return;
5349 }
5350 /* 0 wide new high bits */
5351 zero_allbits_(&(xsp->bp[stkwlen]), xtra_wbits);
5352 }
5353
5354 /*
5355 * sign extend widen within one word
5356 */
__sgn_xtnd_wrd(register struct xstk_t * xsp,int32 nblen)5357 extern void __sgn_xtnd_wrd(register struct xstk_t *xsp, int32 nblen)
5358 {
5359 register int32 oubits;
5360 register word32 mask;
5361
5362 oubits = xsp->xslen;
5363 /* if signed, sign extend, otherwise nothing to do */
5364 if ((xsp->ap[0] & (1 << (oubits - 1))) != 0)
5365 {
5366 mask = (__masktab[WBITS - oubits]) << oubits;
5367 xsp->ap[0] |= mask;
5368 /* if x/z x/z extend */
5369 /* AIV - 10/10/05 - b part was & should be | */
5370 if ((xsp->bp[0] & (1 << (oubits - 1))) != 0) xsp->bp[0] |= mask;
5371 }
5372 else
5373 {
5374 mask = (__masktab[WBITS - oubits]) << oubits;
5375 if ((xsp->bp[0] & (1 << (oubits - 1))) != 0)
5376 xsp->bp[0] |= mask;
5377 }
5378 xsp->xslen = nblen;
5379 }
5380
5381 /*
5382 * special case narrow to 1 routine
5383 */
__narrow_to1bit(register struct xstk_t * xsp)5384 extern void __narrow_to1bit(register struct xstk_t *xsp)
5385 {
5386 register int32 stkwlen;
5387
5388 stkwlen = wlen_(xsp->xslen);
5389 /* case 1: narrowing within one word32 */
5390 if (stkwlen == 1) { xsp->ap[0] &= 1; xsp->bp[0] &= 1; }
5391 else
5392 {
5393 /* case 2: wide to 1 bit narrow */
5394 xsp->ap[0] &= 1;
5395 xsp->ap[1] = xsp->bp[0] & 1;
5396 xsp->bp = &(xsp->ap[1]);
5397 }
5398 xsp->xslen = 1;
5399 }
5400
5401 /*
5402 * special case narrow to WBITS routine
5403 */
__narrow_to1wrd(register struct xstk_t * xsp)5404 extern void __narrow_to1wrd(register struct xstk_t *xsp)
5405 {
5406 register int32 stkwlen;
5407
5408 stkwlen = wlen_(xsp->xslen);
5409 /* DBG remove -- */
5410 if (stkwlen == 1) __misc_terr(__FILE__, __LINE__);
5411 /* --- */
5412 xsp->ap[1] = xsp->bp[0];
5413 xsp->bp = &(xsp->ap[1]);
5414 xsp->xslen = WBITS;
5415 }
5416
5417 /*
5418 * narrow a stack value (only for rhs exprs)
5419 * know need to narrow or will not be called
5420 */
__narrow_sizchg(register struct xstk_t * xsp,int32 nblen)5421 extern void __narrow_sizchg(register struct xstk_t *xsp, int32 nblen)
5422 {
5423 register int32 wi;
5424 register word32 *wpnb, *wpob;
5425 int32 nwlen, nubits, stkwlen;
5426
5427 /* know cannot be 1 bit to start */
5428 nwlen = wlen_(nblen);
5429 nubits = ubits_(nblen);
5430 stkwlen = wlen_(xsp->xslen);
5431 /* case 2b: narrowing where narrower same number of words */
5432 if (stkwlen == nwlen)
5433 {
5434 xsp->ap[nwlen - 1] &= __masktab[nubits];
5435 xsp->bp[nwlen - 1] &= __masktab[nubits];
5436 xsp->xslen = nblen;
5437 return;
5438 }
5439 /* case 2c: general narrowing */
5440 /* SJM 09/29/04 widening b part should be new wlen offset from a part */
5441 wpnb = &(xsp->ap[nwlen]);
5442 wpob = xsp->bp;
5443 /* need loop because must make sure copy low first */
5444 /* this insures a/b parts contiguous */
5445 for (wi = 0; wi < nwlen; wi++) wpnb[wi] = wpob[wi];
5446 xsp->bp = wpnb;
5447 xsp->ap[nwlen - 1] &= __masktab[nubits];
5448 xsp->bp[nwlen - 1] &= __masktab[nubits];
5449 xsp->xslen = nblen;
5450 }
5451
5452 /*
5453 * after widening stack, z extend new high bits if needed
5454 * know nblen greater than oblen
5455 *
5456 * this is only for non stren case since stren never assigned only added
5457 * from something that drives strength (gates are exception and handled as
5458 * special case elsewhere)
5459 */
__fix_widened_tozs(struct xstk_t * xsp,int32 oblen)5460 extern void __fix_widened_tozs(struct xstk_t *xsp, int32 oblen)
5461 {
5462 register int32 wi;
5463 int32 nblen, owlen, nwlen, oubits, nubits;
5464
5465 /* case 1: same number of words */
5466 nblen = xsp->xslen;
5467 nubits = ubits_(nblen);
5468 oubits = ubits_(oblen);
5469 owlen = wlen_(oblen);
5470 nwlen = wlen_(nblen);
5471 if (owlen == nwlen)
5472 {
5473 xsp->bp[owlen - 1] |= (__masktab[nubits] & ~__masktab[oubits]);
5474 return;
5475 }
5476 /* case 2 widen to more words */
5477 /* set b part high bits of high old word32 to 1s */
5478 xsp->bp[owlen - 1] |= ~__masktab[oubits];
5479 /* set b part of all high words */
5480 for (wi = owlen; wi < nwlen; wi++) xsp->bp[wi] = ALL1W;
5481 /* except unused high bit of new high word32 */
5482 xsp->bp[nwlen - 1] &= __masktab[nubits];
5483 }
5484
5485 /*
5486 * after widening stack, x extend new high bits if needed
5487 * know nblen greater than oblen
5488 * this is only for non strength initialization case
5489 */
__fix_widened_toxs(register struct xstk_t * xsp,int32 oblen)5490 extern void __fix_widened_toxs(register struct xstk_t *xsp, int32 oblen)
5491 {
5492 register int32 wi;
5493 int32 nblen, owlen, nwlen, oubits, nubits;
5494
5495 /* case 1: same number of words */
5496 nblen = xsp->xslen;
5497 nubits = ubits_(nblen);
5498 oubits = ubits_(oblen);
5499 owlen = wlen_(oblen);
5500 nwlen = wlen_(nblen);
5501 if (owlen == nwlen)
5502 {
5503 xsp->ap[owlen - 1] |= (__masktab[nubits] & ~__masktab[oubits]);
5504 xsp->bp[owlen - 1] |= (__masktab[nubits] & ~__masktab[oubits]);
5505 return;
5506 }
5507 /* case 2 widen to more words */
5508 /* set b part high bits of high old word32 to 1s */
5509 /* SJM 02/18/03 WRONG - also need to mask in 1's for a part */
5510 /* code came from widen to z's so a part was missing */
5511 xsp->ap[owlen - 1] |= ~__masktab[oubits];
5512 xsp->bp[owlen - 1] |= ~__masktab[oubits];
5513 /* set all high words to x */
5514 for (wi = owlen; wi < nwlen; wi++) xsp->ap[wi] = ALL1W;
5515 for (wi = owlen; wi < nwlen; wi++) xsp->bp[wi] = ALL1W;
5516 /* except unused high bit of new high word32 */
5517 xsp->ap[nwlen - 1] &= __masktab[nubits];
5518 xsp->bp[nwlen - 1] &= __masktab[nubits];
5519 }
5520
5521 /*
5522 * widen a stacked strength byte value (only for rhs exprs)
5523 * know bit widths differ or will not be called
5524 * this may need to widen stack value width (alloc-free)
5525 * also if widens z's 00z all bits
5526 *
5527 * 07/08/00 SJM - also used to widen fi>1 strength driver competition results
5528 *
5529 * for strength all extension must be z
5530 * for narrow just adjust xslen
5531 */
__strenwiden_sizchg(struct xstk_t * xsp,int32 nblen)5532 extern void __strenwiden_sizchg(struct xstk_t *xsp, int32 nblen)
5533 {
5534 register int32 bi;
5535 byte *sbp, *sbp2;
5536 int32 oblen, numavailbytes, wlen;
5537
5538 /* DBG remove -- */
5539 if ((xsp->xslen % 4) != 0) __arg_terr(__FILE__, __LINE__);
5540 /* --- */
5541 oblen = xsp->xslen/4;
5542 sbp = (byte *) xsp->ap;
5543 /* case 1: widening */
5544 if (oblen < nblen)
5545 {
5546 numavailbytes = 2*WRDBYTES*xsp->xsawlen;
5547 if (numavailbytes < nblen)
5548 {
5549 /* just widen to enough room plus 1 word32 */
5550 wlen = (nblen + WRDBYTES - 1)/WRDBYTES + 1;
5551 /* SJM 05/23/03 - freed and realloced with size 2 times - was not */
5552 /* allocing both a and b parts but was freeing */
5553 xsp->ap = (word32 *) __my_malloc(2*wlen*WRDBYTES);
5554 xsp->xsawlen = wlen;
5555 sbp2 = (byte *) xsp->ap;
5556 /* LOOKATME - think overlap possible here */
5557 memmove(sbp2, sbp, oblen);
5558 __my_free((char *) sbp, numavailbytes);
5559 sbp = sbp2;
5560 }
5561 for (bi = oblen; bi < nblen; bi++) sbp[bi] = ST_HIZ;
5562 xsp->xslen = 4*nblen;
5563 }
5564 /* case 2: narrow */
5565 else xsp->xslen = 4*nblen;
5566 }
5567
5568 /*
5569 * BUILT-IN GATE AND UDP EVALUATION ROUTINES
5570 */
5571
5572 /*
5573 * evaluate logic gate - know input changed and changes recorded in gstate
5574 * set new value in new gateval and if needs strength in new gate stren
5575 * and if return T sets old gateval
5576 * returns F if no change (new value and old the same)
5577 * gate values here never have strength - maybe added when stored in wire
5578 *
5579 * i is position of gate input expr. starting at 1, bi bit starting at 0
5580 */
__eval_logic_gate(struct gate_t * gp,word32 i,int32 * out_chg)5581 extern int32 __eval_logic_gate(struct gate_t *gp, word32 i, int32 *out_chg)
5582 {
5583 register word32 *rap, uwrd, ouwrd, ngav, ngbv;
5584 register word32 gav, gbv, mask;
5585 int32 gwid, wlen, bi, gatid;
5586 struct xstk_t *xsp;
5587
5588 gwid = gp->gpnum;
5589 xsp = __eval_xpr(gp->gpins[i]);
5590 bi = i - 1;
5591 ngav = xsp->ap[0] & 1L;
5592 ngbv = xsp->bp[0] & 1L;
5593 __new_inputval = ngav | (ngbv << 1);
5594 __pop_xstk();
5595
5596 /* always set strength even if not needed - always constant here */
5597 /* rare wide case */
5598 if (gwid > 16)
5599 {
5600 wlen = wlen_(gwid);
5601 /* rap is base of vector for current inst */
5602 rap = &(gp->gstate.wp[2*wlen*__inum]);
5603 chg_lhsbsel(rap, bi, ngav);
5604 chg_lhsbsel(&(rap[wlen]), bi, ngbv);
5605 if (!__lhs_changed) return(FALSE);
5606 /* this set global new and old gate values */
5607 *out_chg = __eval_1wide_gate(gp, gwid);
5608 return(TRUE);
5609 }
5610
5611 /* eval changed input expr. and store in gstate if needed */
5612 /* know packed both a and b sections in same word32 */
5613 /* SJM 12/16/99 still packing gate state as usual */
5614 ouwrd = get_packintowrd_(gp->gstate, __inum, gwid);
5615 uwrd = ouwrd & ~(1L << bi) & ~(1L << (gwid + bi));
5616 uwrd |= ((ngav << bi) | (ngbv << (gwid + bi)));
5617 if (uwrd == ouwrd) return(FALSE);
5618 st_packintowrd_(gp->gstate, __inum, uwrd, gwid);
5619
5620 /* now need gate id */
5621 gatid = gp->gmsym->el.eprimp->gateid;
5622 /* buf (assign buf) or not short circuit */
5623 /* now reusing ngav/ngbv as new output value no longer new input value */
5624 /* FIXME not allowing multiple output nots and bufs yet */
5625 if (gwid == 2)
5626 {
5627 /* old gate value b part in 3 and a part in bit 1 */
5628 __old_gateval = ((uwrd >> 1) & 1L) | ((uwrd >> 2) & 2L);
5629 ngbv = (uwrd >> 2) & 1L;
5630 /* buf and not always convert z to x */
5631 if (gatid == G_NOT) ngav = !(uwrd & 1L) | ngbv;
5632 else if (gatid == G_BUF) ngav = (uwrd & 1L) | ngbv;
5633 /* but cont. ASSIGN passes z */
5634 else if (gatid == G_ASSIGN) ngav = (uwrd & 1L);
5635 else
5636 {
5637 switch ((byte) gatid) {
5638 case G_BITREDAND: case G_BITREDOR: case G_BITREDXOR:
5639 ngav = (uwrd & 1L) | ngbv;
5640 break;
5641 case G_NAND: case G_NOR: case G_REDXNOR:
5642 ngav = !(uwrd & 1L) | ngbv;
5643 break;
5644 }
5645 }
5646 goto done;
5647 }
5648
5649 /* need to handle 2 input gates as partial special case */
5650 if (gwid == 3)
5651 {
5652 mask = __masktab[2];
5653 gav = uwrd & 3L;
5654 gbv = (uwrd >> 3) & 3L;
5655 __old_gateval = ((uwrd >> 2) & 1L) | ((uwrd >> 4) & 2L);
5656 }
5657 else
5658 {
5659 /* gav and gbv are inputs only */
5660 mask = __masktab[gwid - 1];
5661 /* this masks off a/b output bit - but gav/gbv all inputs */
5662 gav = uwrd & mask;
5663 gbv = (uwrd >> gwid) & mask;
5664 /* works since know n ins at least 1 - b shifts 1 less, goes b bit */
5665 __old_gateval = ((uwrd >> (gwid - 1)) & 1L) | ((uwrd >> (2*gwid - 2)) & 2L);
5666 }
5667 ngav = ngbv = 1L;
5668 switch ((byte) gatid) {
5669 case G_BITREDAND:
5670 /* if even 1 0 value in any used bit, result is 0 */
5671 if (gbv == 0L) { ngav = (gav != mask) ? 0L : 1L; ngbv = 0L; }
5672 else if ((gav | gbv) != mask) ngav = ngbv = 0L;
5673 break;
5674 case G_NAND:
5675 /* if even 1 0 value in any used bit, result is 1 */
5676 if (gbv == 0L) { ngav = (gav != mask) ? 1L : 0L; ngbv = 0L; }
5677 else if ((gav | gbv) != mask) ngbv = 0L;
5678 break;
5679 case G_BITREDOR:
5680 /* if even 1 1 value in any used bit, result is 1 */
5681 if (gbv == 0L) { ngav = (gav != 0L) ? 1L : 0L; ngbv = 0L; }
5682 else if ((gav & ~gbv) != 0L) ngbv = 0L;
5683 break;
5684 case G_NOR:
5685 /* if even 1 1 value in any used bit, result is 0 */
5686 if (gbv == 0L) { ngav = (gav != 0L) ? 0L : 1L; ngbv = 0L; }
5687 else if ((gav & ~gbv) != 0L) ngav = ngbv = 0L;
5688 break;
5689 case G_BITREDXOR:
5690 if (gbv == 0L) { ngbv = 0L; ngav = __wrd_redxor(gav); }
5691 break;
5692 case G_REDXNOR:
5693 if (gbv == 0L) { ngbv = 0L; ngav = !__wrd_redxor(gav); }
5694 break;
5695 default: __case_terr(__FILE__, __LINE__);
5696 }
5697 /* these gates can drive constant strengths on wire - handled at assign */
5698 /* need gate as 2 bit value for delay selection */
5699 done:
5700 __new_gateval = ngav | (ngbv << 1);
5701 /* set to T (non 0) if not equal if changed (different) */
5702 *out_chg = (__old_gateval != __new_gateval);
5703 return(TRUE);
5704 }
5705
5706 /*
5707 * evaluate a gate that has at least 15 inputs
5708 * could do 15 and 16 input gates slightly better since gate inputs fit
5709 * in 1 word32 but stored as 2 words per gate here
5710 */
__eval_1wide_gate(struct gate_t * gp,int32 gwid)5711 extern int32 __eval_1wide_gate(struct gate_t *gp, int32 gwid)
5712 {
5713 struct xstk_t *xsp;
5714 int32 bi, wi;
5715
5716 push_xstk_(xsp, gwid);
5717 /* notice this includes output */
5718 __ld_gate_wide_val(xsp->ap, xsp->bp, gp->gstate.wp, gwid);
5719
5720 /* notice width it total bits while bit is index */
5721 /* must acess output value but in load must mask off high output bit */
5722 wi = get_wofs_(gwid - 1);
5723 bi = get_bofs_(gwid - 1);
5724 /* 2 shifts for b part because bit can be low bit in both halves */
5725 /* extract output value before masking off output */
5726 __old_gateval = ((xsp->ap[wi] >> bi) & 1L)
5727 | (((xsp->bp[wi] >> bi) & 1L) << 1);
5728
5729 /* input state minus output for gate eval */
5730 xsp->ap[wi] &= __masktab[bi];
5731 xsp->bp[wi] &= __masktab[bi];
5732 /* this sets new gateval */
5733 eval_wide_gate(gp, xsp);
5734 __pop_xstk();
5735 if (__new_gateval == __old_gateval) return(FALSE);
5736 return(TRUE);
5737 }
5738
5739 /*
5740 * load a wide form gate value from wp into ap and bp
5741 */
__ld_gate_wide_val(word32 * ap,word32 * bp,word32 * gsp,int32 gwid)5742 extern void __ld_gate_wide_val(word32 *ap, word32 *bp, word32 *gsp, int32 gwid)
5743 {
5744 int32 wlen;
5745 word32 *rap;
5746
5747 wlen = wlen_(gwid);
5748 rap = &(gsp[2*wlen*__inum]);
5749 memcpy(ap, rap, WRDBYTES*wlen);
5750 memcpy(bp, &(rap[wlen]), WRDBYTES*wlen);
5751 }
5752
5753 /*
5754 * evaluate a wide gate (> 15 inputs)
5755 * know xsp a and b parts have high output bit masked off
5756 * and operations here in place so state replaced by 1 bit value
5757 */
eval_wide_gate(struct gate_t * gp,struct xstk_t * xsp)5758 static void eval_wide_gate(struct gate_t *gp, struct xstk_t *xsp)
5759 {
5760 int32 rta, rtb;
5761 int32 nins;
5762
5763 nins = gp->gpnum - 1;
5764 switch (gp->gmsym->el.eprimp->gateid) {
5765 case G_BITREDAND: /* and */
5766 __lunredand(&rta, &rtb, xsp->ap, xsp->bp, nins);
5767 __narrow_to1bit(xsp);
5768 xsp->ap[0] = (word32) rta;
5769 xsp->bp[0] = (word32) rtb;
5770 break;
5771 case G_NAND: /* nand */
5772 __lunredand(&rta, &rtb, xsp->ap, xsp->bp, nins);
5773 __narrow_to1bit(xsp);
5774 xsp->ap[0] = (word32) rta;
5775 xsp->bp[0] = (word32) rtb;
5776 invert:
5777 __new_gateval = ((~xsp->ap[0] | xsp->bp[0]) & 1L) | (xsp->bp[0] << 1);
5778 return;
5779 case G_BITREDOR: /* or */
5780 __lunredor(&rta, &rtb, xsp->ap, xsp->bp, nins);
5781 __narrow_to1bit(xsp);
5782 xsp->ap[0] = (word32) rta;
5783 xsp->bp[0] = (word32) rtb;
5784 break;
5785 case G_NOR: /* nor */
5786 __lunredor(&rta, &rtb, xsp->ap, xsp->bp, nins);
5787 __narrow_to1bit(xsp);
5788 xsp->ap[0] = (word32) rta;
5789 xsp->bp[0] = (word32) rtb;
5790 goto invert;
5791 case G_BITREDXOR: /* xor */
5792 __lunredxor(&rta, &rtb, xsp->ap, xsp->bp, nins);
5793 __narrow_to1bit(xsp);
5794 xsp->ap[0] = (word32) rta;
5795 xsp->bp[0] = (word32) rtb;
5796 break;
5797 case G_REDXNOR: /* xnor */
5798 __lunredxor(&rta, &rtb, xsp->ap, xsp->bp, nins);
5799 __narrow_to1bit(xsp);
5800 xsp->ap[0] = (word32) rta;
5801 xsp->bp[0] = (word32) rtb;
5802 goto invert;
5803 default: __case_terr(__FILE__, __LINE__);
5804 }
5805 /* know stack value replaced to 1 bit result by here */
5806 __new_gateval = xsp->ap[0] | (xsp->bp[0] << 1);
5807 }
5808
5809 /* --
5810 -- value is 000111vv table is 6 bits lllhhh - 1 if has 0 strength --
5811 -- i.e. st0 is val shifted 5 let and st1 is val shifted 2 --
5812 0 - 15
5813 000 000 00 - 0
5814 000 000 01 - 0
5815 000 000 10 - 0x02
5816 000 000 11 - 0
5817 000 001 00 - 0x02
5818 000 001 01 - 001001 01 = 0x25
5819 000 001 10 - 0
5820 000 001 11 - 0x07
5821
5822 000 010 00 0x02
5823 000 010 01 01001001 = 0x49
5824 000 010 10 0
5825 000 010 11 0x0b
5826 000 011 00 0x02
5827 000 011 01 01101101 = 0x6d
5828 000 011 10 0
5829 000 011 11 0x0f
5830
5831 16-31
5832 000 100 00 - 0x02
5833 000 100 01 - 10010001 = 0x91
5834 000 100 10 - 0
5835 000 100 11 - 0x13
5836 000 101 00 - 0x02
5837 000 101 01 - 101101 01 = 0xb5
5838 000 101 10 - 0
5839 000 101 11 - 0x17
5840
5841 000 110 00 0x02
5842 000 110 01 11011001 = 0xd9
5843 000 110 10 0
5844 000 110 11 0x1b
5845 000 111 00 0x02
5846 000 111 01 11111101 = 0xfd
5847 000 111 10 0
5848 000 111 11 0x1f
5849
5850 32-47
5851 001 000 00 - 00100100 = 0x24
5852 001 000 01 - 0x02
5853 001 000 10 - 0
5854 001 000 11 - 0x23
5855 001 001 00 - 0x24
5856 001 001 01 - 0x25
5857 001 001 10 - 0
5858 001 001 11 - 0x27
5859
5860 *001 010 00 00100100 = 0x24 not 0x28
5861 * 001 010 01 01001001 = 0x49 not 0x29
5862 001 010 10 0
5863 001 010 11 0x2b
5864 *001 011 00 00100100 = 0x24 not 0x2c
5865 *001 011 01 01101101 = 0x6d not 0x2d
5866 001 011 10 0
5867 001 011 11 0x2f
5868
5869 48-63
5870 *001 100 00 - 00100100 = 0x24 not 0x30
5871 *001 100 01 - 10010001 = 0x91 not 0x31
5872 001 100 10 - 0
5873 001 100 11 - 0x33
5874 *001 101 00 - 00100100 = 0x24 not 0x34
5875 *001 101 01 - 10110101 = 0xb5 not 0x35
5876 001 101 10 - 0
5877 001 101 11 - 0x37
5878
5879 *001 110 00 - 00100100 = 0x24 not 0x38
5880 *001 110 01 - 11011001 = 0xd9 not 0x39
5881 001 110 10 0
5882 001 110 11 0x3b
5883 *001 111 00 - 00100100 = 0x24 not 0x3c
5884 *001 111 01 - 11111101 = 0xfd not 0x3d
5885 001 111 10 0
5886 001 111 11 0x3f
5887
5888 64-79
5889 010 000 00 - 01001000 = 0x48
5890 010 000 01 - 0x02
5891 010 000 10 - 0
5892 010 000 11 - 0x43
5893 *010 001 00 - 01001000 = 0x48 not 0x44
5894 *010 001 01 - 00100101 = 0x25 not 0x45
5895 010 001 10 - 0
5896 010 001 11 - 0x47
5897
5898 010 010 00 0x48
5899 010 010 01 0x49
5900 010 010 10 0
5901 010 010 11 0x4b
5902 *010 011 00 - 01001000 = 0x48 not 0x4c
5903 *010 011 01 - 01101101 = 0x6d not 0x4d
5904 010 011 10 0
5905 010 011 11 0x4f
5906
5907 80-95
5908 *010 100 00 - 01001000 = 0x48 not 0x50
5909 *010 100 01 - 10010001 = 0x91 not 0x51
5910 010 100 10 - 0
5911 010 100 11 - 0x53
5912 *010 101 00 - 01001000 = 0x48 not 0x54
5913 *010 101 01 - 10110101 = 0xb5 not 0x55
5914 010 101 10 - 0
5915 010 101 11 - 0x57
5916
5917 *010 110 00 - 01001000 = 0x48 not 0x58
5918 *010 110 01 - 11011001 = 0xd9 not 0x59
5919 010 110 10 0
5920 010 110 11 0x5b
5921 *010 111 00 - 01001000 = 0x49 not 0x5c
5922 *010 111 01 - 11111101 = 0xfd not 0x5d
5923 010 111 10 0
5924 010 111 11 0x5f
5925
5926 96-111
5927 011 000 00 - 01101100 = 0x6c
5928 011 000 01 - 0x02
5929 011 000 10 - 0
5930 011 000 11 - 0x63
5931 *011 001 00 - 01101100 = 0x6c not 0x64
5932 *011 001 01 - 00100101 = 0x25 not 0x65
5933 011 001 10 - 0
5934 011 001 11 - 0x67
5935
5936 *011 010 00 - 01101100 = 0x6c not 0x68
5937 *011 010 01 - 01001001 = 0x49 not 0x69
5938 011 010 10 0
5939 011 010 11 0x6b
5940 011 011 00 0x6c
5941 011 011 01 0x6d
5942 011 011 10 0
5943 011 011 11 0x6f
5944
5945 112-127
5946 *011 100 00 - 01101100 = 0x6c not 0x70
5947 *011 100 01 - 10010001 = 0x91 not 0x71
5948 011 100 10 - 0
5949 011 100 11 - 0x73
5950 *011 101 00 - 01101100 = 0x6c not 0x74
5951 *011 101 01 - 10110101 = 0xb5 not 0x75
5952 011 101 10 - 0
5953 011 101 11 - 0x77
5954
5955 *011 110 00 - 01101100 = 0x6c not 0x78
5956 *011 110 01 - 11011001 = 0xd9 not 0x79
5957 011 110 10 0
5958 011 110 11 0x7b
5959 *011 111 00 - 01101100 = 0x6c not 0x7c
5960 *011 111 01 - 11111101 = 0xfd not 0x7d
5961 011 111 10 0
5962 011 111 11 0x7f
5963
5964 128-143
5965 100 000 00 - 10010000 = 0x90
5966 100 000 01 - 0x02
5967 100 000 10 - 0
5968 100 000 11 - 0x83
5969 *100 001 00 - 10010000 = 0x90 not 0x84
5970 *100 001 01 - 00100101 = 0x25 not 0x85
5971 100 001 10 - 0
5972 100 001 11 - 0x87
5973
5974 *100 010 00 - 10010000 = 0x90 not 0x88
5975 *100 010 01 - 01001001 = 0x49 not 0x89
5976 100 010 10 0
5977 100 010 11 0x8b
5978 *100 011 00 - 10010000 = 0x90 not 0x8c
5979 *100 011 01 - 01101101 = 0x6d not 0x8d
5980 100 011 10 0
5981 100 011 11 0x8f
5982
5983 144-159
5984 100 100 00 - 0x90
5985 100 100 01 - 0x91
5986 100 100 10 - 0
5987 100 100 11 - 0x93
5988 *100 101 00 - 10010000 = 0x90 not 0x94
5989 *100 101 01 - 10110101 = 0xb5 not 0x95
5990 100 101 10 - 0
5991 100 101 11 - 0x97
5992
5993 *100 110 00 - 10010000 = 0x90 not 0x98
5994 *100 110 01 - 11011001 = 0xd9 not 0x99
5995 100 110 10 0
5996 100 110 11 0x9b
5997 *100 111 00 - 10010000 = 0x90 not 0x9c
5998 *100 111 01 - 11111101 = 0xfd not 0x9d
5999 100 111 10 0
6000 100 111 11 0x9f
6001
6002 160-175
6003 101 000 00 - 10110100 = 0xb4
6004 101 000 01 - 0x02
6005 101 000 10 - 0
6006 101 000 11 - 0xa3
6007 *101 001 00 - 10110100 = 0xb4 not 0xa4
6008 *101 001 01 - 00100101 = 0x25 not 0xa5
6009 101 001 10 - 0
6010 101 001 11 - 0xa7
6011
6012 *101 010 00 - 10110100 = 0xb4 not 0xa8
6013 *101 010 01 - 01001001 = 0x49 not 0xa9
6014 101 010 10 0
6015 101 010 11 0xab
6016 *101 011 00 - 10110100 = 0xb4 not 0xac
6017 *101 011 01 - 01101101 = 0x6d not 0xad
6018 101 011 10 0
6019 101 011 11 0xaf
6020
6021 176-191
6022 *101 100 00 - 10110100 = 0xb4 not 0xb0
6023 *101 100 01 - 10010001 = 0x91 not 0xb1
6024 101 100 10 - 0
6025 101 100 11 - 0xb3
6026 101 101 00 - 0xb4
6027 101 101 01 - 0xb5
6028 101 101 10 - 0
6029 101 101 11 - 0xb7
6030
6031 *101 110 00 - 10110100 = 0xb4 not 0xb8
6032 *101 110 01 - 11011001 = 0xb9 not 0xb9
6033 101 110 10 0
6034 101 110 11 0xbb
6035 *101 111 00 - 10110100 = 0xb4 not 0xbc
6036 *101 111 01 - 11111101 = 0xfd not 0xbd
6037 101 111 10 0
6038 101 111 11 0xbf
6039
6040 192-207
6041 110 000 00 - 11011000 = 0xd8
6042 110 000 01 - 0x02
6043 110 000 10 - 0
6044 110 000 11 - 0xc3
6045 *110 001 00 - 11011000 = 0xd8 not 0xc4
6046 *110 001 01 - 00100101 = 0x25 not 0xc5
6047 110 001 10 - 0
6048 110 001 11 - 0xc7
6049
6050 *110 010 00 - 11011000 = 0xd8 not 0xc8
6051 *110 010 01 - 01001001 = 0x49 not 0xc9
6052 110 010 10 0
6053 110 010 11 0xcb
6054 *110 011 00 - 11011000 = 0xd8 not 0xcc
6055 *110 011 01 - 01101101 = 0x6d not 0xcd
6056 110 011 10 0
6057 110 011 11 0xcf
6058
6059 208-223
6060 *110 100 00 - 11011000 = 0xd8 not 0xd0
6061 *110 100 01 - 10010001 = 0x91 not 0xd1
6062 110 100 10 - 0
6063 110 100 11 - 0xd3
6064 *110 101 00 - 11011000 = 0xd8 not 0xd4
6065 *110 101 01 - 10110101 = 0xb5 not 0xd5
6066 110 101 10 - 0
6067 110 101 11 - 0xd7
6068
6069 110 110 00 0xd8
6070 110 110 01 0xd9
6071 110 110 10 0
6072 110 110 11 0xdb
6073 *110 111 00 - 11011000 = 0xd8 not 0xdc
6074 *110 111 01 - 11111101 = 0xfd not 0xdd
6075 110 111 10 0
6076 110 111 11 0xdf
6077
6078 224-239
6079 111 000 00 - 11111100 = 0xfc
6080 111 000 01 - 0x02
6081 111 000 10 - 0
6082 111 000 11 - 0xe3
6083 *111 001 00 - 11111100 = 0xfc not 0xe4
6084 *111 001 01 - 00100101 = 0x25 not 0xe5
6085 111 001 10 - 0
6086 111 001 11 - 0xe7
6087
6088 *111 010 00 - 11111100 = 0xfc not 0xe8
6089 *111 010 01 - 01001001 = 0x49 not 0xe9
6090 111 010 10 0
6091 111 010 11 0xeb
6092 *111 011 00 - 11111100 = 0xfc not 0xec
6093 *111 011 01 - 01101101 = 0x6d not 0xed
6094 111 011 10 0
6095 111 011 11 0xef
6096
6097 240-255
6098 *111 100 00 - 11111100 = 0xfc not 0xf0
6099 *111 100 01 - 10010001 = 91 not 0xf1
6100 111 100 10 - 0
6101 111 100 11 - 0xf3
6102 *111 101 00 - 11111100 = 0xfc not 0xf4
6103 *111 101 01 - 10110101 = 0xb5 not 0xf5
6104 111 101 10 - 0
6105 111 101 11 - 0xf7
6106
6107 *111 110 00 - 11111100 = 0xfc not 0xf8
6108 *111 110 01 - 11011001 = 0xd9 not 0xf9
6109 111 110 10 0
6110 111 110 11 0xfb
6111 111 111 00 0xfc
6112 111 111 01 0xfd
6113 111 111 10 0
6114 111 111 11 0xff
6115
6116 --- */
6117
6118 /* table to determine if special strength delay calculation needed */
6119 /* index is 6 stren bits - value removed */
6120 byte __hizstren_del_tab[] = {
6121 1, 1, 1, 1, 1, 1, 1, 1,
6122 1, 0, 0, 0, 0, 0, 0, 0,
6123 1, 0, 0, 0, 0, 0, 0, 0,
6124 1, 0, 0, 0, 0, 0, 0, 0,
6125 1, 0, 0, 0, 0, 0, 0, 0,
6126 1, 0, 0, 0, 0, 0, 0, 0,
6127 1, 0, 0, 0, 0, 0, 0, 0,
6128 1, 0, 0, 0, 0, 0, 0, 0
6129 };
6130
6131 /* SJM 08/07/01 - table always needed to map from gate/conta logic val */
6132 /* to driving stren (or connected net stren if only one driver) */
6133 /* old version just mapped the hiz strens to hiz instead of 0 or 1 */
6134 /* now also maps 0 and 1 and uses identity map for x */
6135
6136 /* table to map gate or conta driving stren to driving stren val */
6137 /* if stren component hiz, value replaced by hiz else if 0 or 1 selects */
6138 /* that component from the 0 and 1 components, if x uses both components */
6139 /* if 0, both stren components have stren0, i 1, both stren1, if x both */
6140 /* as coded in source if either component hiz logic value replaced by z */
6141
6142 /* for open collector nor gate, the 1 stren is highz1 so gate output val */
6143 /* of 1 causes gate to output hiz */
6144 byte __stren_map_tab[] = {
6145
6146 0, 0, 0x02, 0, 0x02, 0x25, 0, 0x07,
6147 0x02, 0x49, 0, 0x0b, 0x02, 0x6d, 0, 0x0f,
6148
6149 0x02, 0x91, 0, 0x13, 0x02, 0xb5, 0, 0x17,
6150 0x02, 0xd9, 0, 0x1b, 0x02, 0xfd, 0, 0x1f,
6151
6152 0x24, 0x02, 0, 0x23, 0x24, 0x25, 0, 0x27,
6153 0x24, 0x49, 0, 0x2b, 0x24, 0x6d, 0, 0x2f,
6154
6155 0x24, 0x91, 0, 0x33, 0x24, 0xb5, 0, 0x37,
6156 0x24, 0xd9, 0, 0x3b, 0x24, 0xfd, 0, 0x3f,
6157
6158 0x48, 0x02, 0, 0x43, 0x48, 0x25, 0, 0x47,
6159 0x48, 0x49, 0, 0x4b, 0x48, 0x6d, 0, 0x4f,
6160
6161 0x48, 0x91, 0, 0x53, 0x48, 0xb5, 0, 0x57,
6162 0x48, 0xd9, 0, 0x5b, 0x49, 0xfd, 0, 0x5f,
6163
6164 0x6c, 0x02, 0, 0x63, 0x6c, 0x25, 0, 0x67,
6165 0x6c, 0x49, 0, 0x6b, 0x6c, 0x6d, 0, 0x6f,
6166
6167 0x6c, 0x91, 0, 0x73, 0x6c, 0xb5, 0, 0x77,
6168 0x6c, 0xd9, 0, 0x7b, 0x6c, 0xfd, 0, 0x7f,
6169
6170 0x90, 0x02, 0, 0x83, 0x90, 0x25, 0, 0x87,
6171 0x90, 0x49, 0, 0x8b, 0x90, 0x6d, 0, 0x8f,
6172
6173 0x90, 0x91, 0, 0x93, 0x90, 0xb5, 0, 0x97,
6174 0x90, 0xd9, 0, 0x9b, 0x90, 0xfd, 0, 0x9f,
6175
6176 0xb4, 0x02, 0, 0xa3, 0xb4, 0x25, 0, 0xa7,
6177 0xb4, 0x49, 0, 0xab, 0xb4, 0x6d, 0, 0xaf,
6178
6179 0xb4, 0x91, 0, 0xb3, 0xb4, 0xb5, 0, 0xb7,
6180 0xb4, 0xb9, 0, 0xbb, 0xb4, 0xfd, 0, 0xbf,
6181
6182 0xd8, 0x02, 0, 0xc3, 0xd8, 0x25, 0, 0xc7,
6183 0xd8, 0x49, 0, 0xcb, 0xd8, 0x6d, 0, 0xcf,
6184
6185 0xd8, 0x91, 0, 0xd3, 0xd8, 0xb5, 0, 0xd7,
6186 0xd8, 0xd9, 0, 0xdb, 0xd8, 0xfd, 0, 0xdf,
6187
6188 0xfc, 0x02, 0, 0xe3, 0xfc, 0x25, 0, 0xe7,
6189 0xfc, 0x49, 0, 0xeb, 0xfc, 0x6d, 0, 0xef,
6190
6191 0xfc, 0x91, 0, 0xf3, 0xfc, 0xb5, 0, 0xf7,
6192 0xfc, 0xd9, 0, 0xfb, 0xfc, 0xfd, 0, 0xff
6193 };
6194
6195 /* bufif table */
6196 /* ---
6197 format is (cb)(ca)(db)(da)
6198 0 0 0 0 (d0,c0) 0x0
6199 0 0 0 1 (d1,c0) 0x1
6200 0 0 1 0 (dz,c0) 0x2
6201 0 0 1 1 (dx,c0) 0x3
6202 0 1 0 0 (d0,c1) 0x4
6203 0 1 0 1 (d1,c1) 0x5
6204 0 1 1 0 (dz,c1) 0x6
6205 0 1 1 1 (dx,c1) 0x7
6206 1 0 0 0 (d0,cz) 0x8
6207 1 0 0 1 (d1,cz) 0x9
6208 1 0 1 0 (dz,cz) 0xa
6209 1 0 1 1 (dx,cz) 0xb
6210 1 1 0 0 (d0,cx) 0xc
6211 1 1 0 1 (d1,cx) 0xd
6212 1 1 1 0 (dz,cx) 0xe
6213 1 1 1 1 (dx,cx) 0xf
6214 --- */
6215
6216 /* assume value is strength and in and tab and or in or tab */
6217 /* and tab no value is ff, or tab no value is 0 */
6218 byte __bufif_and_tab[] = {
6219 /* 0-15 bufif0 */
6220 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0,
6221 0xe0, 0x1c, 0xff, 0xff, 0xe0, 0x1c, 0xff, 0xff,
6222 /* 16-31 bufif1 */
6223 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff,
6224 0xe0, 0x1c, 0xff, 0xff, 0xe0, 0x1c, 0xff, 0xff,
6225 /* 32-47 notif0 */
6226 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0,
6227 0x1c, 0xe0, 0xff, 0xff, 0x1c, 0xe0, 0xff, 0xff,
6228 /* 48-63 notif1 */
6229 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff,
6230 0x1c, 0xe0, 0xff, 0xff, 0x1c, 0xe0, 0xff, 0xff,
6231 };
6232
6233 byte __bufif_or_tab[] = {
6234 /* 0-15 bufif0 */
6235 0, 1, 3, 3, 2, 2, 2, 2,
6236 3, 3, 3, 3, 3, 3, 3, 3,
6237 /* 16-31 bufif1 */
6238 2, 2, 2, 2, 0, 1, 3, 3,
6239 3, 3, 3, 3, 3, 3, 3, 3,
6240 /* 32-47 notif0 */
6241 1, 0, 3, 3, 2, 2, 2, 2,
6242 3, 3, 3, 3, 3, 3, 3, 3,
6243 /* 48-63 notif1 */
6244 2, 2, 2, 2, 1, 0, 3, 3,
6245 3, 3, 3, 3, 3, 3, 3, 3
6246 };
6247
6248 /* interpreter bufif gate type base table */
6249 /* BEWARE - assumes LAST_GSYM is 36 - if changes intialize at start */
6250 /* and G_BUFIF0 is 13, G_BUFIF1 is 14, G_NOTIF0 is 20, G_NOTIF1 is 21 */
6251 int32 __bufif_base_tab[] = {
6252 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
6253 -1, -1, -1, 0, 16, -1, -1, -1, -1, -1,
6254 32, 48, -1, -1, -1, -1, -1, -1, -1, -1,
6255 -1, -1, -1, -1, -1, -1, -1 };
6256
6257 /*
6258 * evaluate bufif gate style gate where state stores output strength
6259 * this routine does not evaluate if input same
6260 *
6261 * input strength not passed thru but driven strength non constant
6262 * storage is data port 2 bits, control 2 bit and output 8 bits and
6263 * stored as half word32 that is access as such
6264 *
6265 * wires drives by bufifs must be marked as strength
6266 * i is gate expr. index (0th is out)
6267 * and since gate state stores strength need to correct for hiz strength
6268 *
6269 * if inputs differ and evals, it must set __new_gateval to value
6270 * with added in or driven strength
6271 *
6272 */
__eval_bufif_gate(register struct gate_t * gp,word32 i,int32 * out_chg)6273 extern int32 __eval_bufif_gate(register struct gate_t *gp, word32 i,
6274 int32 *out_chg)
6275 {
6276 register word32 gwrd;
6277 register struct xstk_t *xsp;
6278 int32 base;
6279
6280 if (__debug_flg && __ev_tracing)
6281 __tr_msg("--changing bufif - old: %s\n",
6282 __gstate_tostr(__xs, gp, FALSE));
6283
6284 /* this loads value part if driver has strength */
6285 xsp = __eval_xpr(gp->gpins[i]);
6286 /* gate inputs can be wider than 1 bit */
6287 xsp->ap[0] &= 1L;
6288 xsp->bp[0] &= 1L;
6289
6290 gwrd = (word32) gp->gstate.hwp[__inum];
6291 __old_inputval = (i == 1) ? (gwrd & 3L) : ((gwrd >> 2) & 3L);
6292 __new_inputval = xsp->ap[0] | (xsp->bp[0] << 1);
6293 __pop_xstk();
6294 if (__new_inputval == __old_inputval) return(FALSE);
6295
6296 /* update the changed input state */
6297 if (i == 1) { gwrd &= ~3L; gwrd |= __new_inputval; }
6298 else { gwrd &= ~(3L << 2); gwrd |= (__new_inputval << 2); }
6299 gp->gstate.hwp[__inum] = (hword) gwrd;
6300 /* buf always has strength (maybe strong if no explicit) */
6301 __old_gateval = (gwrd >> 4);
6302
6303 /* use input 4 bits as case access - output not just simple 8 bit value */
6304 gwrd &= 0xf;
6305
6306 /* assume 0 value with driven strength (strong if none) */
6307 __new_gateval = (gp->g_stval << 2);
6308
6309 base = __bufif_base_tab[gp->gmsym->el.eprimp->gateid];
6310 /* DEBUG remove ---
6311 if (base == -1) __misc_terr(__FILE__, __LINE__);
6312 --- */
6313 __new_gateval &= __bufif_and_tab[gwrd + base];
6314 __new_gateval |= __bufif_or_tab[gwrd + base];
6315
6316 /* strength is lllhhhvv table is 6 bits lllhhh - 1 if has 0 strength*/
6317 /* must correct for special case where 1 strength is hiz (no drive) */
6318 __new_gateval = __stren_map_tab[__new_gateval];
6319
6320 /* if no change, nothing to do */
6321 /* set to T (non 0) if not equal if changed (different) */
6322 *out_chg = (__new_gateval != __old_gateval);
6323 return(TRUE);
6324 }
6325
6326 /* normal mos mapping only supply changed to strong */
6327 word32 __mos_stmap[] = { 0, 1, 2, 3, 4, 5, 6, 6 };
6328 /* resistive devices reduce strengths */
6329 word32 __rmos_stmap[] = { 0, 1, 1, 2, 2, 3, 5, 5 };
6330
6331 /* ----
6332 0 => d=0,c=0 4 => d=0,c=1 8 => d=0,c=z 0c=> d=0,c=x -> 0,4,8,0xc
6333 1 => d=1,c=0 5 => d=1,c=1 9 => d=1,c=z 0d=> d=1,c=x -> 1,5,9,0xd
6334 3 => d=x,c=0 7 => d=x,c=1 0b=> d=x,c=z 0f=> d=x,c=x -> 3,7,0xb,0xf
6335 2 => d=z,c=0 6 => d=z,c=1 0a=> d=z,c=z 0e=> d=z,c=x -> 2,6,0xa,0xe
6336 --- */
6337
6338 /*
6339 * evaluate nmos gate
6340 * special format for all mos gates is 3 8 bit values (0-7) data
6341 * (1st input stren val, * 8-9 control value (2nd input) (10-15 unused stren
6342 * ignored), * 16-23 output with strength
6343 *
6344 * this is passed state word32 for instance and set globals __old gateval
6345 * and __new gateval
6346 *
6347 * for r style reduce strength according to table for non resistive only
6348 * changes supply to strong (also uses table)
6349 */
__eval_nmos_gate(word32 gwrd)6350 extern void __eval_nmos_gate(word32 gwrd)
6351 {
6352 register word32 ivec, st0, st1;
6353
6354 /* state here is 2 8 bit inputs and 1 8 bit strength format output */
6355 /* 1 word32 per gate */
6356 __old_gateval = (gwrd >> 16) & 0xffL;
6357 /* 4 bit value is index (bits 0-1 data value, 2-3 control value */
6358 ivec = ((gwrd >> 6) & 0x0c) | (gwrd & 3L);
6359 st0 = (gwrd >> 5) & 7;
6360 st1 = (gwrd >> 2) & 7;
6361
6362 switch ((byte) ivec) {
6363 /* control 0 or data z - driven to z */
6364 case 0: case 1: case 2: case 3: case 6: case 10: case 14:
6365 __new_gateval = 2;
6366 break;
6367 /* control 1 - non x/z data passed thru */
6368 case 4:
6369 __new_gateval = 0 | (__mos_stmap[st0] << 5) | (__mos_stmap[st1] << 2);
6370 break;
6371 case 5:
6372 __new_gateval = 1 | (__mos_stmap[st0] << 5) | (__mos_stmap[st1] << 2);
6373 break;
6374 case 7: case 11: case 15:
6375 __new_gateval = 3 | (__mos_stmap[st0] << 5) | (__mos_stmap[st1] << 2);
6376 break;
6377 case 8: case 12:
6378 /* control x/z - data 0 - L */
6379 /* high 3 bits are 0 strength */
6380 if (st0 == 0) __new_gateval = 2;
6381 else __new_gateval = (__mos_stmap[st0] << 5) | 3;
6382 break;
6383 case 9: case 13:
6384 if (st1 == 0) __new_gateval = 2;
6385 else __new_gateval = (__mos_stmap[st1] << 2) | 3;
6386 break;
6387 default: __case_terr(__FILE__, __LINE__);
6388 }
6389 }
6390
__eval_rnmos_gate(word32 gwrd)6391 extern void __eval_rnmos_gate(word32 gwrd)
6392 {
6393 register word32 ivec, st0, st1;
6394
6395 /* state here is 2 8 bit inputs and 1 8 bit strength format output */
6396 /* 1 word32 per gate */
6397 __old_gateval = (gwrd >> 16) & 0xffL;
6398 /* 4 bit value is index (bits 0-1 data value, 2-3 control value */
6399 ivec = ((gwrd >> 6) & 0x0c) | (gwrd & 3L);
6400 st0 = (gwrd >> 5) & 7;
6401 st1 = (gwrd >> 2) & 7;
6402
6403 switch ((byte) ivec) {
6404 /* control 0 or data z - driven to z */
6405 case 0: case 1: case 2: case 3: case 6: case 10: case 14:
6406 __new_gateval = 2;
6407 break;
6408 /* control 1 - non x/z data passed thru */
6409 case 4:
6410 __new_gateval = 0 | (__rmos_stmap[st0] << 5) | (__rmos_stmap[st1] << 2);
6411 break;
6412 case 5:
6413 __new_gateval = 1 | (__rmos_stmap[st0] << 5) | (__rmos_stmap[st1] << 2);
6414 break;
6415 case 7: case 11: case 15:
6416 __new_gateval = 3 | (__rmos_stmap[st0] << 5) | (__rmos_stmap[st1] << 2);
6417 break;
6418 case 8: case 12:
6419 /* control x/z - data 0 - L */
6420 /* high 3 bits are 0 strength */
6421 if (st0 == 0) __new_gateval = 2;
6422 else __new_gateval = (__rmos_stmap[st0] << 5) | 3;
6423 break;
6424 case 9: case 13:
6425 if (st1 == 0) __new_gateval = 2;
6426 else __new_gateval = (__rmos_stmap[st1] << 2) | 3;
6427 break;
6428 default: __case_terr(__FILE__, __LINE__);
6429 }
6430 }
6431
__eval_pmos_gate(word32 gwrd)6432 extern void __eval_pmos_gate(word32 gwrd)
6433 {
6434 register word32 ivec, st0, st1;
6435
6436 /* state here is 2 8 bit inputs and 1 8 bit strength format output */
6437 /* 1 word32 per gate */
6438 __old_gateval = (gwrd >> 16) & 0xffL;
6439 /* 4 bit value is index (bits 0-1 data value, 2-3 control value */
6440 ivec = ((gwrd >> 6) & 0x0c) | (gwrd & 3L);
6441 st0 = (gwrd >> 5) & 7;
6442 st1 = (gwrd >> 2) & 7;
6443
6444 switch ((byte) ivec) {
6445 /* control 0 - non x/z data passed thru */
6446 case 0:
6447 __new_gateval = 0 | (__mos_stmap[st0] << 5) | (__mos_stmap[st1] << 2);
6448 break;
6449 case 1:
6450 __new_gateval = 1 | (__mos_stmap[st0] << 5) | (__mos_stmap[st1] << 2);
6451 break;
6452 case 3: case 11: case 15:
6453 /* data x, ctrl 0, data x, ctrl z, data x cntrl x become strength x */
6454 /* 3, 11, 15 */
6455 __new_gateval = 3 | (__mos_stmap[st0] << 5) | (__mos_stmap[st1] << 2);
6456 break;
6457 /* control 1 or data z - driven to z */
6458 case 2: case 4: case 5: case 6: case 7: case 10: case 14:
6459 __new_gateval = 2;
6460 break;
6461 case 8: case 12:
6462 /* if H becomes hiz */
6463 if (st0 == 0) __new_gateval = 2;
6464 else __new_gateval = (__mos_stmap[st0] << 5) | 3;
6465 break;
6466 case 9: case 13:
6467 /* if L goes to Hiz not H */
6468 if (st1 == 0) __new_gateval = 2;
6469 else __new_gateval = (__mos_stmap[st1] << 2) | 3;
6470 break;
6471 default: __case_terr(__FILE__, __LINE__);
6472 }
6473 }
6474
__eval_rpmos_gate(word32 gwrd)6475 extern void __eval_rpmos_gate(word32 gwrd)
6476 {
6477 register word32 ivec, st0, st1;
6478
6479 /* state here is 2 8 bit inputs and 1 8 bit strength format output */
6480 /* 1 word32 per gate */
6481 __old_gateval = (gwrd >> 16) & 0xffL;
6482 /* 4 bit value is index (bits 0-1 data value, 2-3 control value */
6483 ivec = ((gwrd >> 6) & 0x0c) | (gwrd & 3L);
6484 st0 = (gwrd >> 5) & 7;
6485 st1 = (gwrd >> 2) & 7;
6486
6487 switch ((byte) ivec) {
6488 /* control 0 - non x/z data passed thru */
6489 case 0:
6490 __new_gateval = 0 | (__rmos_stmap[st0] << 5) | (__rmos_stmap[st1] << 2);
6491 break;
6492 case 1:
6493 __new_gateval = 1 | (__rmos_stmap[st0] << 5) | (__rmos_stmap[st1] << 2);
6494 break;
6495 case 3: case 11: case 15:
6496 /* data x, ctrl 0, data x, ctrl z, data x cntrl x become strength x */
6497 /* 3, 11, 15 */
6498 __new_gateval = 3 | (__rmos_stmap[st0] << 5) | (__rmos_stmap[st1] << 2);
6499 break;
6500 /* control 1 or data z - driven to z */
6501 case 2: case 4: case 5: case 6: case 7: case 10: case 14:
6502 __new_gateval = 2;
6503 break;
6504 case 8: case 12:
6505 /* if H becomes hiz */
6506 if (st0 == 0) __new_gateval = 2;
6507 else __new_gateval = (__rmos_stmap[st0] << 5) | 3;
6508 break;
6509 case 9: case 13:
6510 /* if L goes to Hiz not H */
6511 if (st1 == 0) __new_gateval = 2;
6512 else __new_gateval = (__rmos_stmap[st1] << 2) | 3;
6513 break;
6514 default: __case_terr(__FILE__, __LINE__);
6515 }
6516 }
6517
6518 /*
6519 * evaluate cmos style gate that passes thru input strength
6520 *
6521 * special format is 4 8 bit values (0-7) data (1st input stren val,
6522 * 8-9 nmos control value (2nd input) (10-15 unused stren ignored),
6523 * 16-17 pmos control value (3rd input) (18-23 unused stren ignored),
6524 * 24-31 output with strength
6525 *
6526 * this is passed gate record and sets globals __old gateval
6527 * and __new gateval
6528 *
6529 * values only changed if inputs differ
6530 *
6531 * for r style reduce strength according to table for non resistive only
6532 * changes supply to strong
6533 * this uses the output port net for strength competition
6534 * scheme builds tree so 2 halves of cmos converted to one drive val here
6535 */
__eval_cmos_gate(struct gate_t * gp)6536 extern void __eval_cmos_gate(struct gate_t *gp)
6537 {
6538 register word32 gwrd, tmpwrd;
6539 int32 nchged, pchged;
6540 word32 wtyp, gid;
6541 word32 new_nval, new_pval;
6542 struct expr_t *xp;
6543
6544 gwrd = gp->gstate.wp[__inum];
6545 /* ---
6546 stren combined gate state [31-24] => always bits 23:16
6547 3nd pmos control [23-16] => removed 1st, to bits 15-8 2nd
6548 2rd nmos control [15-8] => same place 1st, removed 2nd
6549 1st input state [7-0] = always same place
6550 -- */
6551
6552 /* LOOKATME - maybe should add bit or use g gone also for this */
6553 gid = gp->gmsym->el.eprimp->gateid;
6554
6555 /* controls: 1st nmos 8-15, 2nd pmos 16-23 */
6556 tmpwrd = (gwrd & 0xffffL) | ((gwrd >> 8) & 0x00ff0000L);
6557 nchged = TRUE;
6558 if (gid == G_RCMOS) __eval_rnmos_gate(tmpwrd); else __eval_nmos_gate(tmpwrd);
6559 if (__new_gateval == __old_gateval) nchged = FALSE;
6560 new_nval = __new_gateval;
6561
6562 tmpwrd = (gwrd & 0xffL) | ((gwrd >> 8) & 0x00ffff00L);
6563 pchged = TRUE;
6564 if (gid == G_RCMOS) __eval_rpmos_gate(tmpwrd);
6565 else __eval_pmos_gate(tmpwrd);
6566 if (__new_gateval == __old_gateval) pchged = FALSE;
6567 new_pval = __new_gateval;
6568
6569 /* now can set old values */
6570 __old_gateval = gwrd >> 24;
6571
6572 /* since here old value always old value of cmos, no change means */
6573 /* no change since if some sort of wired or/and effect will be different */
6574 if (!nchged && !pchged) return;
6575
6576 /* know at least one different from old, need tournament */
6577 /* hard part is need wire type of output */
6578 xp = gp->gpins[0];
6579 wtyp = (word32) N_REG;
6580 switch ((byte) xp->optyp) {
6581 case ID: case GLBREF: wtyp = xp->lu.sy->el.enp->ntyp; break;
6582 case LSB: wtyp = xp->lu.x->lu.sy->el.enp->ntyp; break;
6583 default: __case_terr(__FILE__, __LINE__);
6584 }
6585 __new_gateval = __comb_1bitsts(wtyp, new_nval, new_pval);
6586 }
6587
6588 /*
6589 * convert assigned to expression to value
6590 */
__to_gassign_str(char * s,struct expr_t * xp)6591 extern char *__to_gassign_str(char *s, struct expr_t *xp)
6592 {
6593 struct xstk_t *xsp;
6594 byte *sbp;
6595
6596 if (!xp->x_stren)
6597 {
6598 xsp = __eval_xpr(xp);
6599 __regab_tostr(s, xsp->ap, xsp->bp, xsp->xslen, BHEX, FALSE);
6600 }
6601 else
6602 {
6603 xsp = __ndst_eval_xpr(xp);
6604 sbp = (byte *) xsp->ap;
6605 __st_regab_tostr(s, sbp, xp->szu.xclen);
6606 }
6607 __pop_xstk();
6608 return(s);
6609 }
6610
6611 word32 __to_uvaltab[] = {0, 1, 2, 2 };
6612 word32 __to_noztab[] = {0, 1, 3, 3 };
6613
6614 /*
6615 * change input and if different evaluate udp
6616 *
6617 * notice input stored as 2 bit scalars not a and b sections
6618 * sets 2 bit value byte and restores updated gstate
6619 * notice extra input does not exist - include output in index if not comb.
6620 *
6621 * must pass is edge because initialize treats edge as seq.
6622 * expects __cur_udp to point to current udp record
6623 * pi is gstate bit index (starting at 0), i is gate expr. index (from 1)
6624 * gate values never have strength here maybe added when stored
6625 *
6626 * output always stored in high 2 bits of state but 0th gpins pin
6627 */
__eval_udp(register struct gate_t * gp,word32 i,int32 * out_chg,int32 is_edge)6628 extern int32 __eval_udp(register struct gate_t *gp, word32 i, int32 *out_chg,
6629 int32 is_edge)
6630 {
6631 register int32 pi;
6632 register word32 uwrd;
6633 register struct xstk_t *xsp;
6634 int32 ndx, bi, wi, outbi, tabi;
6635 word32 *wp;
6636 extern word32 __pow3tab[];
6637
6638 /* combinatorial can be sequential or not */
6639 /* DBG remove --
6640 if (__debug_flg && __ev_tracing)
6641 __tr_msg("-- changing udp - old: %s\n",
6642 __gstate_tostr(__xs, gp, FALSE));
6643 --- */
6644
6645 xsp = __eval_xpr(gp->gpins[i]);
6646 /* normal udp - just using scalar form as index - i.e. x/z must be 3 */
6647 __new_inputval = __to_noztab[(xsp->ap[0] & 1L) | ((xsp->bp[0] & 1L) << 1)];
6648 __pop_xstk();
6649
6650 outbi = 2*__cur_udp->numins;
6651 pi = i - 1;
6652 if (!__cur_udp->u_wide)
6653 {
6654 uwrd = (word32) gp->gstate.hwp[__inum];
6655 __old_inputval = (uwrd >> (2*pi)) & 3L;
6656 /* DBG remove ---
6657 if (__old_inputval == 2) __misc_terr(__FILE__, __LINE__);
6658 --- */
6659
6660 if (__new_inputval == __old_inputval) return(FALSE);
6661 /* update the state */
6662 uwrd &= ~(3L << (2*pi));
6663 uwrd |= ((hword) __new_inputval << (2*pi));
6664 gp->gstate.hwp[__inum] = (hword) uwrd;
6665 /* finally compute the index - will include output if sequential */
6666 /* index since table look up always 3 for x or z */
6667 ndx = (int32) 2*(uwrd & __masktab[2*__cur_udp->numstates]);
6668 __old_gateval = (uwrd >> outbi) & 3L;
6669 }
6670 /* in wide, case need 2nd running value index word32 */
6671 else
6672 {
6673 wp = &(gp->gstate.wp[2*__inum]);
6674 /* know all input 0,1, x (2) only */
6675 __old_inputval = (wp[0] >> (2*pi)) & 3L;
6676 /* DBG remove ---
6677 if (__old_inputval == 2) __misc_terr(__FILE__, __LINE__);
6678 --- */
6679 if (__new_inputval == __old_inputval) return(FALSE);
6680
6681 /* change the input - here x/z is 3 */
6682 wp[0] &= ~(3L << (2*pi));
6683 wp[0] |= (__new_inputval << (2*pi));
6684
6685 /* must correct running index - subtract off contribution of port i */
6686 /* here x must be 2 not 3 */
6687 wp[1] -= ((__old_inputval == 3) ? 2 : __old_inputval)*__pow3tab[pi];
6688 /* add in new contribution of port i */
6689 wp[1] += ((__new_inputval == 3) ? 2 : __new_inputval)*__pow3tab[pi];
6690 /* --- RELEASE remove --
6691 if (__debug_flg && __ev_tracing)
6692 __tr_msg("## wide udp word0=%x, word1=%x(%d)\n", wp[0], wp[1], wp[1]);
6693 --- */
6694 /* notice word32 1 index is bit - times 2 to get 2 bit output val. ind */
6695 ndx = (int32) 2*wp[1];
6696 __old_gateval = (wp[0] >> outbi) & 3L;
6697 }
6698
6699 /* notice ndx already multiplied by 2 for 2 bit table values */
6700 wi = get_wofs_(ndx);
6701 bi = get_bofs_(ndx);
6702 __new_gateval = (__cur_udp->utab->ludptab[wi] >> bi) & 3L;
6703 /* RELEASE remove ---
6704 if (__debug_flg && __ev_tracing)
6705 __tr_msg(
6706 "## in=%d, old in=%d, gval=%d, old gval=%d, bi=%d, wi=%d, twrd=%lx\n",
6707 __new_inputval, __old_inputval, __new_gateval, __old_gateval, bi, wi,
6708 __cur_udp->utab->ludptab[wi]);
6709 --- */
6710
6711 if (__new_gateval == 3 && is_edge)
6712 {
6713 /* level sensitive has state but no edge table */
6714 tabi =3*pi + ((__old_inputval == 3) ? 2 : __old_inputval);
6715 wp = __cur_udp->utab->eudptabs[tabi];
6716 __new_gateval = (wp[wi] >> bi) & 3L;
6717 /* --- RELEASE remove
6718 if (__debug_flg && __ev_tracing)
6719 __tr_msg("## eval edge - new gval=%d, tabi=%d, twrd=%lx\n",
6720 __new_gateval, tabi, wp[wi]);
6721 --- */
6722 }
6723 /* set to T (non 0) if not equal if changed (different) */
6724 /* know strengths will always be same */
6725 *out_chg = (__old_gateval != __new_gateval);
6726 return(TRUE);
6727 }
6728