1 /* Copyright (c) 1991-2007 Pragmatic C Software Corp. */
2 
3 /*
4    This program is free software; you can redistribute it and/or modify it
5    under the terms of the GNU General Public License as published by the
6    Free Software Foundation; either version 2 of the License, or (at your
7    option) any later version.
8 
9    This program is distributed in the hope that it will be useful, but
10    WITHOUT ANY WARRANTY; without even the implied warranty of
11    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12    General Public License for more details.
13 
14    You should have received a copy of the GNU General Public License along
15    with this program; if not, write to the Free Software Foundation, Inc.,
16    59 Temple Place, Suite 330, Boston, MA, 02111-1307.
17 
18    We are selling our new Verilog compiler that compiles to X86 Linux
19    assembly language.  It is at least two times faster for accurate gate
20    level designs and much faster for procedural designs.  The new
21    commercial compiled Verilog product is called CVC.  For more information
22    on CVC visit our website at www.pragmatic-c.com/cvc.htm or contact
23    Andrew at avanvick@pragmatic-c.com
24 
25  */
26 
27 
28 /*
29  * MODULE TO HANDLE TRAN CHANNELS - PREPARATION AND ASSIGNMENT
30  */
31 
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <string.h>
35 
36 #ifdef __DBMALLOC__
37 #include "../malloc.h"
38 #endif
39 
40 #include "v.h"
41 #include "cvmacros.h"
42 
43 #include "vpi_user.h"
44 
45 /* local prototypes */
46 static void free_vtx_and_edge_freelsts(void);
47 static void emit_chan_size_table(void);
48 static void bld_trauxs(void);
49 static void alloc_traux(struct mod_t *, struct net_t *);
50 static void bld_trgraph(struct mod_t *, struct traux_t *, struct net_t *);
51 static void init_chanrec(struct chanrec_t *);
52 static struct vbinfo_t *add_vtxtraux(struct net_t *, int32, int32);
53 static void add_1chan_vtxes_and_edges(int32);
54 static struct vtx_t *alloc_vtx(struct net_t *, int32);
55 static void add_vtx_to_future_q(struct vtx_t *, struct itree_t *itp);
56 static void add_edges_from_npps(struct vtx_t *, int32, struct itree_t *);
57 static int32 add1_oside_edge(struct vtx_t *, struct net_pin_t *, struct net_t *,
58  int32, int32, struct itree_t *, struct itree_t *);
59 static word32 cnvt_tobase_ntyp(word32);
60 static void chg_bidchan_to_vtxlist(struct chanrec_t *);
61 static void process_bid_vertices(struct chanrec_t *, struct vtx_t *,
62  struct itree_t *);
63 static void add_vtx_to_bidvtxlist(struct chanrec_t *, struct vtx_t *,
64  struct itree_t *);
65 static struct edge_t *alloc_edge(struct net_pin_t *);
66 static void chkchannel(struct chanrec_t *);
67 static void off_bichan_marks(struct chanrec_t *);
68 static void add_vtx_to_offlist(struct vtx_t *);
69 static void chkchan_edge(struct chanrec_t *, struct edge_t *);
70 static void save_bidandtran_npps(void);
71 static void init_sttranif_chan(struct chanrec_t *);
72 static void init_add_vtx_and_subtree(struct vtx_t *, struct itree_t *itp);
73 static void off_stvtxtab_marks(void);
74 
75 static void stren_schd_bidpthdrvrs(struct net_t *, byte *, byte *);
76 static void schd_bidpthdrvrs(struct net_t *, struct xstk_t *,
77  struct xstk_t *);
78 static int32 schd_1bitpthdrvr(struct net_t *, register int32, i_tev_ndx *);
79 static int32 evtr_schd_1bitpthdrvr(struct net_t *, register int32,
80  i_tev_ndx *);
81 static void eval_assign_bid_chan(struct chanrec_t *);
82 static void eval_assign_stbid_chan(struct chanrec_t *);
83 static void ld_vtx_netbit(word32 *, word32 *, struct net_t *, int32);
84 static void st_vtx_netbit(struct net_t *, int32, word32, word32);
85 static void trmsg_frc_inhibit(struct net_t *, int32);
86 static void transtore_trmsg(struct net_t *, int32, int32, word32, word32);
87 static void stassign_1tranbit(struct net_t *, register int32, register word32);
88 static void assign_1tranbit(struct net_t *, register int32, register word32,
89  register word32);
90 static void eval_update_1w_tranchan(struct vtx_t *);
91 static struct vtxlst_t *add_stchan_chged_vtx(struct vtx_t *, struct itree_t *);
92 static void assign_chged_vtxs(void);
93 static void find_chgvtx_vicinity(struct vtxlst_t *);
94 static void stchan_trif_relax(void);
95 static void add_to_chg_vtx_list(struct vtxlst_t *, int32);
96 static void dmp_perturb_list(void);
97 static char *to_vtx_info(char *, struct vtx_t *, struct itree_t *);
98 static int32 try_reduce_tranif_stren(word32 *, struct gate_t *);
99 static void try_reduce_tran_stren(word32 *, int32);
100 static int32 get_switch_tranif_onoff(struct gate_t *, int32);
101 static int32 get_bidchan_val(struct chanrec_t *, register struct net_t *,
102  int32, int32);
103 static void dmp_vtx_edges(struct vtx_t *, struct itree_t *);
104 static void dmp_vtx_and_out_edges(struct vtx_t *, struct itree_t *);
105 static void prt_edge(struct vtx_t *, struct edge_t *, int32);
106 static struct net_t *xldrvld_to_netbit(struct expr_t *, int32, int32 *,
107  struct itree_t *);
108 static void getv2_itp(struct edge_t *, struct itree_t *, struct itree_t **,
109  struct itree_t **);
110 static char *to_vtx(char *, struct vtx_t *);
111 static struct itree_t *trchan_get_oside_itp(register struct net_pin_t *, int32,
112  int32 *, struct expr_t **, struct itree_t *);
113 static struct expr_t *find_cat_oside_xp(struct expr_t *, int32, int32 *);
114 static void fill_port_npps(struct net_t *, int32, struct itree_t *);
115 static void add_match_vtxs(struct net_t *, struct net_pin_t *, int32);
116 static int32 get_xldl_vtx(struct itree_t *, struct net_t *, int32, int32 *);
117 static int32 add_xldl_vtx(struct itree_t *, struct net_t *, int32, int32);
118 
119 static void do_qc_wire_intran_force(struct net_t *, int32, word32, word32,
120  struct itree_t *);
121 static void do_qc_wire_intran_release(struct net_t *, int32, struct expr_t *,
122  struct itree_t *);
123 static void do_vpi_wire_intran_force(struct net_t *, int32, word32 *, word32 *);
124 static void do_vpi_wire_intran_release(struct net_t *, int32);
125 static void do_putv_wire_intran_softforce(struct net_t *, int32, word32 *, word32 *);
126 
127 /* extern prototypes (maybe defined in this module) */
128 extern void __dmp_modtrans(struct mod_t *);
129 extern struct net_t *__find_tran_conn_np(struct expr_t *);
130 extern void __allocinit_stperival(union pck_u *, int32, struct net_t *, int32);
131 extern void __allocinit_perival(union pck_u *, int32, int32, int32);
132 extern char *__msg2_blditree(char *, struct itree_t *);
133 extern char *__my_realloc(char *, int32, int32);
134 extern void __get_bidnpp_sect(struct net_t *, struct net_pin_t *, int32 *,
135  int32 *);
136 extern struct net_t *__tranx_to_netbit(register struct expr_t *, int32, int32 *,
137  struct itree_t *);
138 extern void __xmrpush_refgrp_to_targ(struct gref_t *);
139 extern int32 __get_const_bselndx(register struct expr_t *);
140 extern void __eval_tran_bits(register struct net_t *);
141 extern void __eval_tran_1bit(register struct net_t *, register int32);
142 extern char *__my_malloc(int32);
143 extern void __my_free(char *, int32);
144 extern struct xstk_t *__stload_mdrwire(struct net_t *);
145 extern struct xstk_t *__load_mdrwire(register struct net_t *);
146 extern void __grow_xstk(void);
147 extern void __chg_xstk_width(struct xstk_t *, int32);
148 extern void __ld_perinst_val(register word32 *, register word32 *, union pck_u,
149  int32);
150 extern void __st_perinst_val(union pck_u, int32, register word32 *,
151  register word32 *);
152 extern char *__st_regab_tostr(char *, byte *, int32);
153 extern char *__regab_tostr(char *, word32 *, word32 *, int32, int32, int32);
154 extern char *__to_wtnam(char *, struct net_t *);
155 extern void __lhsbsel(register word32 *, register int32, word32);
156 extern struct pthdst_t *__get_path_del(struct rngdwir_t *, int32, word64 *);
157 extern void __emit_path_distinform(struct net_t *, struct pthdst_t *,
158  word64 *);
159 extern int32 __em_suppr(int32);
160 extern void __emit_path_pulsewarn(struct pthdst_t *, struct tev_t *, word64 *,
161  word64 *, char *, word32);
162 extern void __emit_path_samewarn(struct net_t *, int32, struct tev_t *,
163  word64 *, char *, word32);
164 extern void __schedule_1wev(struct net_t *, int32, int32, word64, word64,
165  word32, i_tev_ndx *, int32);
166 extern void __reschedule_1wev(i_tev_ndx, word32, word64, word64, i_tev_ndx *);
167 extern void __cancel_1wev(struct tev_t *t);
168 extern char *__to_evtrwnam(char *, struct net_t *, int32, int32,
169  struct itree_t *);
170 extern char *__to_timstr(char *, word64 *);
171 extern char *__to_vnam(char *, word32, word32);
172 extern char *__bld_lineloc(char *, word32, int32);
173 extern int32 __move_to_npprefloc(struct net_pin_t *);
174 extern word32 __comb_1bitsts(word32, register word32, register word32);
175 extern void __eval_1w_nonstren(register word32 *, register word32 *,
176  register word32, register word32, word32);
177 extern void __chg_st_val(struct net_t *, register word32 *, register word32 *);
178 extern void __st_val(struct net_t *, register word32 *, register word32 *);
179 extern void __chg_st_bit(struct net_t *, int32, register word32, register word32);
180 extern void __st_bit(struct net_t *, int32, register word32, register word32);
181 extern char *__to_vvstnam(char *, word32);
182 extern void __dmp_trchan(struct vtx_t *);
183 extern void __dmp_bidchan(struct chanrec_t *);
184 extern void __dmp1_nplstel(struct mod_t *, struct net_t *,
185  struct net_pin_t *);
186 extern void __dmp_bidnet_drvs(struct net_t *, struct mod_t *);
187 extern void __add_dmpv_chglst_el(struct net_t *);
188 extern void __add_nchglst_el(register struct net_t *);
189 extern void __add_select_nchglst_el(register struct net_t *, register int32,
190  register int32);
191 extern void __wakeup_delay_ctrls(register struct net_t *, register int32,
192  register int32);
193 extern char *__msgexpr_tostr(char *, struct expr_t *);
194 extern void __ld_bit(register word32 *, register word32 *,
195  register struct net_t *, int32);
196 extern void __qc_tran_wireforce(struct net_t *, int32, int32, int32,
197  struct itree_t *, struct st_t *);
198 extern void __qc_tran_wirerelease(struct net_t *, int32, int32, struct itree_t *,
199  struct expr_t *);
200 extern void __tran_wire_vpi_force(struct net_t *, word32 *, word32 *, int32);
201 extern void __tran_wire_vpi_release(struct net_t *, int32);
202 extern void __tran_exec_putv_wire_softforce(struct net_t *, word32 *, word32 *,
203  int32);
204 extern void __alloc_qcval(struct net_t *);
205 extern void __bit1_vpi_or_tran_wireforce(struct net_t *, word32 *, word32 *,
206  int32, int32, int32, char *);
207 extern int32 __correct_forced_newwireval(struct net_t *, word32 *, word32 *);
208 extern void __find_call_force_cbs(struct net_t *, int32);
209 extern void __cb_all_rfs(struct net_t *, int32, int32);
210 extern void __find_call_rel_cbs(struct net_t *, int32);
211 extern void __assign_1mdrwire(register struct net_t *);
212 extern int32 __unnormalize_ndx(struct net_t *, int32);
213 extern struct xstk_t *__eval_assign_rhsexpr(register struct expr_t *,
214  register struct expr_t *);
215 
216 extern void __cv_msg(char *, ...);
217 extern void __tr_msg(char *, ...);
218 extern void __dbg_msg(char *, ...);
219 extern void __sgfinform(int32, char *, ...);
220 extern void __vpi_err(int32, int32, char *, ...);
221 extern void __arg_terr(char *, int32);
222 extern void __case_terr(char *, int32);
223 extern void __misc_terr(char *, int32);
224 extern void __vpi_terr(char *, int32);
225 
226 extern word32 __mos_stmap[];
227 extern word32 __rmos_stmap[];
228 extern word32 __cap_to_stren[];
229 
230 /*
231  * ROUTINES TO BUILD TRAN AND UNCOLLAPSED INOUT TRAN CHANNEL CONN. GRAPHS
232  */
233 
234 /*
235  * build the separate inout tran graphs
236  * assumes called before building tran gates so cross instance trans
237  * allow all normal trans to be added to inter instance channels
238  *
239  * must build from both side because channel can go up and and
240  * then down into other instance
241  * FIXME - for now never will see concatenate here
242  */
__bld_bidandtran_graph(void)243 extern void __bld_bidandtran_graph(void)
244 {
245  register int32 ni, chi;
246  register struct net_t *np;
247  register struct chanrec_t *chanp;
248  int32 si, design_has_trans, start_chanid;
249  struct mod_t *mdp;
250  struct traux_t *trap;
251 
252  if (__switch_verbose)
253   {
254    __cv_msg("  SWITCH: begin switch channel graph construction.\n");
255   }
256 
257  /* need to count number of switch (have tranif or different wire types) */
258  bld_trauxs();
259 
260  /* DBG remove --
261  malloc_chain_check(1);
262  -- */
263 
264  /* alloc channel info table - size increased when needed */
265  __nxt_chan_id = 0;
266  __chantab = (struct chanrec_t *) __my_malloc(1000*sizeof(struct chanrec_t));
267  __chanallocsize = 1000;
268  design_has_trans = FALSE;
269  for (si = 0; si < 7; si++)
270   { __stvtxtab[si] = NULL; __stvtxtabend[si] = NULL; }
271  __chg_vtxlst_hdr = __chg_vtxlst_end = NULL;
272  __off_vtxlst_hdr = __off_vtxlst_end = NULL;
273  __vtx_freelst = NULL;
274  __edge_freelst = NULL;
275  __vtxlst_freelst = NULL;
276 
277  for (mdp = __modhdr; mdp != NULL; mdp = mdp->mnxt)
278   {
279    if (mdp->mnnum == 0) continue;
280    /* if no trans and no inouts module will have no tran channels */
281    if (!mdp->mod_hasbidtran && !mdp->mod_hastran) continue;
282    start_chanid = __nxt_chan_id;
283    for (ni = 0, np = &(mdp->mnets[0]); ni < mdp->mnnum; ni++, np++)
284     {
285      if ((trap = np->ntraux) == NULL) continue;
286      bld_trgraph(mdp, trap, np);
287      design_has_trans = TRUE;
288     }
289    free_vtx_and_edge_freelsts();
290    if (__switch_verbose && __nxt_chan_id > start_chanid)
291     {
292      __cv_msg(
293       "  SWITCH: %d channels in module %s (%d insts, %d nets) built.\n",
294       __nxt_chan_id - start_chanid,
295       mdp->msym->synam, mdp->flatinum, mdp->mnnum);
296     }
297   }
298 
299  __num_switch_chans = 0;
300  for (chi = 0; chi < __nxt_chan_id; chi++)
301   {
302    chanp = &(__chantab[chi]);
303    if (chanp->chtyp != TRPROC_TRAN && chanp->chan_diffwtyps)
304     {
305      if (chanp->chtyp == TRPROC_STBID) chanp->chtyp = TRPROC_STWTYPBID;
306      else if (chanp->chtyp == TRPROC_BID)
307       {
308        /* SJM 04/21/01 - internal error if inout only wired trchan non stren */
309        __case_terr(__FILE__, __LINE__);
310       }
311      else __case_terr(__FILE__, __LINE__);
312     }
313    if (chanp->chtyp == TRPROC_STWTYPBID || chanp->chtyp == TRPROC_TRAN)
314     __num_switch_chans++;
315 
316    /* only check tran channels */
317    /* DBG remove --- */
318    if (__debug_flg)
319     {
320      if (!chanp->chan_no_vtxs) chkchannel(chanp);
321     }
322    /* --- */
323   }
324  if (__switch_verbose) emit_chan_size_table();
325  /* done with tran npps (nlds and ndrvs must now only have hard ones) */
326  /* move to list in traux for vpi_ local load/driver processing */
327  save_bidandtran_npps();
328  if (__debug_flg && design_has_trans)
329   {
330    __dbg_msg("++ dumping all channels after optimization:\n");
331 /* -- DBG remove --
332    for (mdp = __modhdr; mdp != NULL; mdp = mdp->mnxt)
333     {
334      if (mdp->mnnum == 0) continue;
335      if (!mdp->mod_hasbidtran && !mdp->mod_hastran) continue;
336      __dmp_modtrans(mdp);
337     }
338 --- */
339    /* 04/12/00 - */
340    for (chi = 0; chi < __nxt_chan_id; chi++)
341     {
342      chanp = &(__chantab[chi]);
343      if (chanp->chan_no_vtxs) __dmp_bidchan(chanp);
344      else
345       {
346        /* dump from vertex although vtx is just used to get back to chan id */
347        __push_itstk(chanp->chvitp);
348        __dmp_trchan(chanp->chvxp);
349        __pop_itstk();
350       }
351     }
352   }
353 }
354 
355 /*
356  * free vtx and edge free lists
357  *
358  * because freeing pure bid channels now faster to use free list for
359  * vertices and edges
360  * SJM 07/27/01 - also needed because vtx list els used in building channels
361  */
free_vtx_and_edge_freelsts(void)362 static void free_vtx_and_edge_freelsts(void)
363 {
364  register struct vtx_t *vtxp, *vtxp2;
365  register struct edge_t *ep, *ep2;
366 
367  for (vtxp = __vtx_freelst; vtxp != NULL;)
368   {
369    /* free list linked using edge field */
370    vtxp2 = (struct vtx_t *) vtxp->vedges;
371    __my_free((char *) vtxp, sizeof(struct vtx_t));
372    vtxp = vtxp2;
373   }
374  __vtx_freelst = NULL;
375 
376  for (ep = __edge_freelst; ep != NULL;)
377   {
378    ep2 = ep->enxt;
379    __my_free((char *) ep, sizeof(struct edge_t));
380    ep = ep2;
381   }
382  __edge_freelst = NULL;
383 }
384 
385 /*
386  * routine for switch verbose mode to give table of switch chan sizes
387  */
emit_chan_size_table(void)388 static void emit_chan_size_table(void)
389 {
390  register struct chanrec_t *chanp;
391  register int32 chi;
392  int32 size1_5, size5_10, size10_20, size20_50, size50_100, size100_200;
393  int32 size200_500, size500_1000, more1000, i1;
394  struct net_t *np;
395  struct itree_t *itp;
396  char s1[RECLEN], s2[RECLEN];
397 
398  itp = NULL;
399  size1_5 = size5_10 = size10_20 = size20_50 = size50_100 = size100_200 = 0;
400  size200_500 = size500_1000 = more1000 = 0;
401  for (chi = 0; chi < __nxt_chan_id; chi++)
402   {
403    chanp = &(__chantab[chi]);
404 
405    if (chanp->numvtxs <= 5) size1_5++;
406    else if (chanp->numvtxs <= 10) size5_10++;
407    else if (chanp->numvtxs <= 20) size10_20++;
408    else if (chanp->numvtxs <= 50) size20_50++;
409    else if (chanp->numvtxs <= 100) size50_100++;
410    else if (chanp->numvtxs <= 200) size100_200++;
411    else if (chanp->numvtxs <= 500) size200_500++;
412    else if (chanp->numvtxs <= 1000) size500_1000++;
413    else
414     {
415      more1000++;
416 
417      /* SJM 04/23/01 - now need to access vtx for inout from bid vtxlp list */
418      if (chanp->chtyp == TRPROC_TRAN || chanp->chtyp == TRPROC_STWTYPBID)
419       {
420        strcpy(s1, "tran");
421        np = chanp->chvxp->vnp;
422        i1 = chanp->chvxp->vi1;
423        itp = chanp->chvitp;
424       }
425      else
426       {
427        strcpy(s1, "inout");
428        np = chanp->bid_vtxlp->vnp;
429        i1 = chanp->bid_vtxlp->vi1;
430        itp = chanp->bid_vtxlp->bidvtx_itp;
431       }
432      if (i1 != -1)
433       {
434        sprintf(s2, "%s.%s[%d]", __msg2_blditree(__xs, itp), np->nsym->synam,
435         i1);
436       }
437      else sprintf(s2, "%s.%s", __msg2_blditree(__xs, itp), np->nsym->synam);
438 
439      __cv_msg(
440       "  Large %s switch channel has %d terminals (ID=%d) a vertex: %s\n",
441       s1, chanp->numvtxs, chi, s2);
442     }
443   }
444  __cv_msg("  Total Number of Switch Channels: %d\n", __nxt_chan_id);
445  if (size1_5 != 0)
446   __cv_msg("  %d channels with less than 5 terminals.\n", size1_5);
447  if (size5_10 != 0)
448   __cv_msg("  %d channels with 6 to 10 terminals.\n", size5_10);
449  if (size10_20 != 0)
450   __cv_msg("  %d channels with 11 to 20 terminals.\n", size10_20);
451  if (size20_50 != 0)
452   __cv_msg("  %d channels with 21 to 50 terminals.\n", size20_50);
453  if (size50_100 != 0)
454   __cv_msg("  %d channels with 51 to 100 terminals.\n", size50_100);
455  if (size100_200 != 0)
456   __cv_msg("  %d channels with 101 to 200 terminals.\n", size100_200);
457  if (size200_500 != 0)
458   __cv_msg("  %d channels with 201 to 500 terminals.\n", size200_500);
459  if (size500_1000 != 0)
460   __cv_msg("  %d channels with 501 to 1000 terminals.\n", size500_1000);
461  if (more1000 != 0)
462   __cv_msg("  %d channels with more than 1000 terminals.\n", more1000);
463 }
464 
465 /*
466  * routine to build the net tran aux records for all nets that need them
467  *
468  * pre-allocates all trauxs including per bit forms for xmrs and inouts
469  * net connection type and type of channel element determine is per inst
470  *
471  * after here all trauxes allocated and all per inst. bits set but
472  * do not know channel type (processing type)
473  */
bld_trauxs(void)474 static void bld_trauxs(void)
475 {
476  register struct net_pin_t *npp;
477  register int32 ni, gi, gri;
478  register struct net_t *np;
479  int32 chg;
480  struct gref_t *grp;
481  struct gate_t *gp;
482  struct mod_t *mdp, *refmdp;
483  struct net_t *np1;
484 
485  /* all xmr lhs net targs need per inst. and define per inst. channel */
486  /* even if rooted with only one itree loc. target */
487  /* this will get all xmr def. (target) wires from the ref */
488  /* this will mark tran terminals so can find all per inst. other sides */
489  for (mdp = __modhdr; mdp != NULL; mdp = mdp->mnxt)
490   {
491    for (gri = 0, grp = &(mdp->mgrtab[0]); gri < mdp->mgrnum; gri++, grp++)
492     {
493      /* if ref. point is rhs, channel does not need to be per inst. */
494      if (grp->gr_gone || !grp->gxndp->x_islhs) continue;
495 
496      /* for upward rel. can use 1st for mod type since all same */
497      /* SJM 08/15/00 - using 0th ref loc inst. table entry - always exists */
498      if (grp->upwards_rel)
499       {
500        refmdp = grp->targu.uprel_itps[0]->itip->imsym->el.emdp;
501       }
502      else refmdp = grp->targmdp;
503 
504      np = grp->targsyp->el.enp;
505      /* must do all or will not set all needed mod having tran/bid trans */
506      for (npp = np->ndrvs; npp != NULL; npp = npp->npnxt)
507       {
508        if (npp->npntyp == NP_BIDMDPRT && npp->npntyp == NP_BIDICONN)
509         refmdp->mod_hasbidtran = TRUE;
510        else if (npp->npntyp == NP_TRAN) refmdp->mod_hastran = TRUE;
511        else continue;
512        /* may have lots of xmrs to same target - all must be per bit/inst */
513        if (np->ntraux == NULL) alloc_traux(refmdp, np);
514       }
515     }
516   }
517 
518  /* any inout or inout iconn even if only 1 inst.*/
519  /* need per inst. even if 1 inst in case other side has per inst. */
520  for (mdp = __modhdr; mdp != NULL; mdp = mdp->mnxt)
521   {
522    if (mdp->mnnum == 0) continue;
523    for (ni = 0, np = &(mdp->mnets[0]); ni < mdp->mnnum; ni++, np++)
524     {
525      if (np->ntraux != NULL) continue;
526      for (npp = np->ndrvs; npp != NULL; npp = npp->npnxt)
527       {
528        if (npp->npntyp == NP_TRAN) { mdp->mod_hastran = TRUE; continue; }
529        if (npp->npntyp == NP_BIDMDPRT || npp->npntyp == NP_BIDICONN)
530         {
531          /* must go through all to set has bid tran for module */
532          mdp->mod_hasbidtran = TRUE;
533          /* if only one inst. per inst. does nothing */
534          if (np->ntraux == NULL) alloc_traux(mdp, np);
535         }
536       }
537     }
538   }
539 
540  /* all nets that connect to other side of per. inst trans */
541  /* all inout trans are alloc and marked by here - look only at trans */
542  /* all tran nets that are per inst from xmr have traux by here */
543  /* and all module having tranor bid tran set by here */
544  for (mdp = __modhdr; mdp != NULL; mdp = mdp->mnxt)
545   {
546    if (!mdp->mod_hastran) continue;
547    chg = TRUE;
548    while (chg)
549     {
550      chg = FALSE;
551      for (gi = 0; gi < mdp->mgnum; gi++)
552       {
553        gp = &(mdp->mgates[gi]);
554 
555        if (gp->g_class != GC_TRAN && gp->g_class != GC_TRANIF) continue;
556 
557        /* 04/26/01 - if terminals same, removed so ignore here */
558        if (gp->g_gone) continue;
559 
560        /* if xmr, the net will be per inst already */
561        np = __find_tran_conn_np(gp->gpins[0]);
562        np1 = __find_tran_conn_np(gp->gpins[1]);
563        if (np->ntraux != NULL)
564         {
565          if (np1->ntraux == NULL)
566           { alloc_traux(mdp, np1); chg = TRUE; }
567         }
568        else if (np1->ntraux != NULL)
569         {
570          if (np->ntraux == NULL)
571           { alloc_traux(mdp, np); chg = TRUE; }
572         }
573       }
574     }
575   }
576  /* allocate non per inst - can only be non xmr/inout conn. tran gate */
577  for (mdp = __modhdr; mdp != NULL; mdp = mdp->mnxt)
578   {
579    if (mdp->mnnum == 0) continue;
580    if (!mdp->mod_hastran) continue;
581    for (ni = 0, np = &(mdp->mnets[0]); ni < mdp->mnnum; ni++, np++)
582     {
583      if (np->ntraux != NULL) continue;
584 
585      for (npp = np->ndrvs; npp != NULL; npp = npp->npnxt)
586       {
587        /* if on other side of inout/iconn to inout in chan or xmr tran */
588        /* already allocated */
589        if (npp->npntyp == NP_TRAN) { alloc_traux(mdp, np); break; }
590       }
591     }
592   }
593 }
594 
595 /*
596  * allocate and initialize a traux for a net not yet on any tran channel
597  */
alloc_traux(struct mod_t * mdp,struct net_t * np)598 static void alloc_traux(struct mod_t *mdp, struct net_t *np)
599 {
600  struct traux_t *trap;
601  int32 nbytes;
602 
603  /* DBG remove --
604  __dbg_msg("allocating traux for net %s in module %s\n",
605   np->nsym->synam, mdp->msym->synam);
606  --- */
607 
608  trap = (struct traux_t *) __my_malloc(sizeof(struct traux_t));
609  np->ntraux = trap;
610  /* alloc storage for hard drivers - must be per instance */
611  if (np->n_stren)
612   __allocinit_stperival(&trap->trnva, mdp->flatinum, np, TRUE);
613  else __allocinit_perival(&trap->trnva, mdp->flatinum, np->nwid, TRUE);
614  /* allocate per inst per bit bit chan */
615  nbytes = np->nwid*mdp->flatinum*sizeof(struct vbinfo_t *);
616  /* allocate per bit vtx and chan id */
617  /* BEWARE - assumes can set array of nil ptrs by zeroing */
618  trap->vbitchans = (struct vbinfo_t **) __my_malloc(nbytes);
619  memset(trap->vbitchans, 0, nbytes);
620  trap->tran_npps = NULL;
621 }
622 
623 /*
624  * routine to build tran channel graph for 1 for net
625  * must fill channel for each instance
626  * for type tran channel np insts will be 1 even though maybe more
627  *
628  * SJM - 08/26/00 - changed so storing dest. itree loc (if different) in
629  * edge - so tracing edge just pushes new itree loc
630  */
bld_trgraph(struct mod_t * mdp,struct traux_t * trap,struct net_t * np)631 static void bld_trgraph(struct mod_t *mdp, struct traux_t *trap,
632  struct net_t *np)
633 {
634  register int32 ii, bi2;
635  int32 bi, insts, osize;
636  struct vbinfo_t *vbip;
637  struct chanrec_t *chanp;
638 
639  /* know all wires bits in per-inst channels, mark per inst by here */
640  insts = mdp->flatinum;
641 
642  /* build tran channel for each bit of net */
643  for (bi2 = np->nwid - 1; bi2 >= 0; bi2--)
644   {
645    /* DBG remove --
646    __cv_msg("++ malloc chain check before processing %s[%d] tran:\n",
647     np->nsym->synam, (!np->n_isavec ? -1 : bi2));
648    malloc_chain_check(1);
649    -- */
650 
651    bi = (!np->n_isavec) ? -1 : bi2;
652    /* DBG remove ---
653    if (__debug_flg)
654     {
655      __dbg_msg("++ processing %s[%d] tran:\n", np->nsym->synam,
656       (!np->n_isavec ? -1 : bi));
657     }
658    --- */
659    for (ii = 0; ii < insts; ii++)
660     {
661      /* notice know that ii and itinum same */
662      __push_itstk(mdp->moditps[ii]);
663      if ((vbip = trap->vbitchans[np->nwid*ii + bi2]) != NULL)
664       {
665        if (__debug_flg)
666         {
667          sprintf(__xs2, ", itp=%s", __msg2_blditree(__xs, __inst_ptr));
668 
669          /* DBG remove -- */
670          if (__debug_flg)
671           {
672            __dbg_msg(
673             "-- vertex %s (id=%d%s) already in channel\n",
674             to_vtx(__xs, vbip->vivxp), vbip->chan_id, __xs2);
675           }
676          /* --- */
677         }
678        __pop_itstk();
679        continue;
680       }
681      /* add the new vertex, bld net to vtx conn and maybe alloc vb tab */
682      vbip = add_vtxtraux(np, bi, __nxt_chan_id++);
683      trap->vbitchans[np->nwid*ii + bi2] = vbip;
684 
685      if (__nxt_chan_id >= __chanallocsize)
686       {
687        osize = __chanallocsize;
688        __chanallocsize += (3*osize)/2;
689        __chantab = (struct chanrec_t *)
690         __my_realloc((char *) __chantab, osize*sizeof(struct chanrec_t),
691          (int32) (__chanallocsize*sizeof(struct chanrec_t)));
692       }
693      chanp = &(__chantab[__nxt_chan_id - 1]);
694      init_chanrec(chanp);
695      chanp->chvxp = vbip->vivxp;
696      chanp->chvitp = __inst_ptr;
697      chanp->numvtxs = 1;
698      /* SJM 09/18/00 - since no longer checking channels need rigth first */
699      /* channel type for 1 element (unc.) channels */
700      if (np->n_stren) chanp->chtyp = TRPROC_STBID;
701      else chanp->chtyp = TRPROC_BID;
702 
703      /* SJM 07/27/01 - change algorithm to use priority queue */
704      /* new algorithm saves oside vtxes on vtx list and adds edges from cur */
705      /* vertex (not both edges at once as before) before moving to next */
706      /* DBG add -- sav_mem_use = __mem_use; */
707      add_vtx_to_future_q(vbip->vivxp, __inst_ptr);
708      add_1chan_vtxes_and_edges(__nxt_chan_id - 1);
709 
710      /* st bid and simple bid, convert to simple vtx list and free chan */
711      if ((chanp->chtyp == TRPROC_STBID || chanp->chtyp ==TRPROC_BID)
712       && !chanp->chan_diffwtyps)
713       {
714        chg_bidchan_to_vtxlist(chanp);
715       }
716      /* DBG add --
717      else
718       {
719        if (__switch_verbose)
720         {
721          __cv_msg("tran channel with %d vertices requires %d bytes.\n",
722           chanp->numvtxs, __mem_use - sav_mem_use);
723         }
724       }
725      --- */
726 
727      if (__switch_verbose && ((__nxt_chan_id % 5000) == 0))
728       {
729        __cv_msg(
730         "  SWITCH: %d switch/inout channel graphs constructed.\n",
731         __nxt_chan_id);
732       }
733      __pop_itstk();
734     }
735   }
736 }
737 
738 /*
739  * initialize a record for a new channel
740  */
init_chanrec(struct chanrec_t * chanp)741 static void init_chanrec(struct chanrec_t *chanp)
742 {
743  chanp->chtyp = TRPROC_UNKN;
744  chanp->chan_diffwtyps = FALSE;
745  chanp->chan_no_vtxs = FALSE;
746  chanp->chvxp = NULL;
747  chanp->chvitp = NULL;
748  chanp->bid_vtxlp = NULL;
749  chanp->numvtxs = 0;
750 }
751 
752 /*
753  * add a new vertex and add net to link vertex
754  * vertices are net and edges are gates or cross port assigns (npps)
755  */
add_vtxtraux(struct net_t * np,int32 bi,int32 chanid)756 static struct vbinfo_t *add_vtxtraux(struct net_t *np, int32 bi, int32 chanid)
757 {
758  struct vtx_t *vtxp;
759  struct vbinfo_t *vbip;
760 
761  vbip = (struct vbinfo_t *) __my_malloc(sizeof(struct vbinfo_t));
762  vbip->chan_id = chanid;
763  /* build the new tran channel - first add this vertex on front */
764  vtxp = alloc_vtx(np, bi);
765  vbip->vivxp = vtxp;
766  return(vbip);
767 }
768 
769 /*
770  * add all rest of vertices and all edges or rest of channel
771  *
772  * LOOKATME - using chg vtxlst list to build tran graph and same list
773  * used later to build perturb list
774  */
add_1chan_vtxes_and_edges(int32 chanid)775 static void add_1chan_vtxes_and_edges(int32 chanid)
776 {
777  register struct vtxlst_t *vtxlp;
778  register struct vtx_t *vtxp;
779  struct itree_t *itp;
780 
781  /* know at least one vtx list element or will not get here */
782  for (vtxlp = __chg_vtxlst_hdr; vtxlp != NULL; vtxlp = vtxlp->vtxnxt)
783   {
784    /* see if already processed */
785    vtxp = vtxlp->vtxp;
786    itp = vtxlp->vtx_itp;
787 
788    /* mark set when all edges from vertex added */
789    if (vtxp->vtx_mark) continue;
790 
791    add_edges_from_npps(vtxp, chanid, itp);
792    vtxp->vtx_mark = TRUE;
793   }
794  /* turn off all marks */
795  for (vtxlp = __chg_vtxlst_hdr; vtxlp != NULL; vtxlp = vtxlp->vtxnxt)
796   vtxlp->vtxp->vtx_mark = FALSE;
797 
798  /* add entire vtx list to vtx list free list */
799  __chg_vtxlst_end->vtxnxt = __vtxlst_freelst;
800  __vtxlst_freelst = __chg_vtxlst_hdr;
801  __chg_vtxlst_hdr = __chg_vtxlst_end = NULL;
802 }
803 
804 /*
805  * allocate a tran graph vertex (wire-bit)
806  */
alloc_vtx(struct net_t * np,int32 bi)807 static struct vtx_t *alloc_vtx(struct net_t *np, int32 bi)
808 {
809  struct vtx_t *vtxp;
810 
811  if (__vtx_freelst != NULL)
812   {
813    vtxp = __vtx_freelst;
814    __vtx_freelst = (struct vtx_t *) __vtx_freelst->vedges;
815   }
816  else vtxp = (struct vtx_t *) __my_malloc(sizeof(struct vtx_t));
817 
818  vtxp->new_vtxval = ST_HIZ;
819  vtxp->old_vtxval = ST_HIZ;
820  vtxp->vtx_chged = FALSE;
821  vtxp->vtx_in_vicinity = FALSE;
822  vtxp->vtx_forced = FALSE;
823  vtxp->vtx_supply = FALSE;
824  vtxp->vtx_mark = FALSE;
825  vtxp->vtx_mark2 = FALSE;
826  vtxp->vnp = np;
827  vtxp->vi1 = bi;
828  vtxp->vedges = NULL;
829  return(vtxp);
830 }
831 
832 /*
833  * add vertex to future add vtx from edges fifo queue
834  */
add_vtx_to_future_q(struct vtx_t * vtxp,struct itree_t * itp)835 static void add_vtx_to_future_q(struct vtx_t *vtxp, struct itree_t *itp)
836 {
837  register struct vtxlst_t *vtxlp;
838 
839  /* alloc and add to list */
840  if (__vtxlst_freelst == NULL)
841   vtxlp = (struct vtxlst_t *) __my_malloc(sizeof(struct vtxlst_t));
842  else
843   {
844    vtxlp = __vtxlst_freelst;
845    __vtxlst_freelst = __vtxlst_freelst->vtxnxt;
846   }
847  vtxlp->vtxp = vtxp;
848  vtxlp->vtx_itp = itp;
849  vtxlp->vtxnxt = NULL;
850  if (__chg_vtxlst_hdr == NULL) __chg_vtxlst_hdr = __chg_vtxlst_end = vtxlp;
851  else
852   {
853    __chg_vtxlst_end->vtxnxt = vtxlp;
854    __chg_vtxlst_end = vtxlp;
855   }
856 }
857 
858 /*
859  * process all edge's from current vtx list el and add one from this
860  * vtx edge for every npp and add all vertices not in a switch channel
861  *
862  * 05/21/01 SJM - now passing vertex itree loc (for edge it is vtx 1)
863  * 07/27/01 SJM - now add all vertices breadth first
864  */
add_edges_from_npps(struct vtx_t * vtxp,int32 chanid,struct itree_t * vt1_itp)865 static void add_edges_from_npps(struct vtx_t *vtxp, int32 chanid,
866  struct itree_t *vt1_itp)
867 {
868  register struct net_pin_t *npp;
869  int32 ri1, ri2, bi, bi2, osbi, fromr_bi, catel_bi;
870  int32 cat_oside_bi, oside_vtxndx;
871  struct net_t *np, *osnp;
872  struct expr_t *xp;
873  struct itree_t *vt2_itp, *oside_itp;
874 
875  np = vtxp->vnp;
876  bi = vtxp->vi1;
877  /* bi2 is edge start bit of vectored net */
878  bi2 = (bi == -1) ? 0 : bi;
879  for (npp = np->ndrvs; npp != NULL; npp = npp->npnxt)
880   {
881    /* ignore hard (channel input) drivers */
882    if (npp->npntyp != NP_BIDICONN && npp->npntyp != NP_BIDMDPRT
883     && npp->npntyp != NP_TRAN) continue;
884 
885    /* if npp only applies to one inst, filter if wrong inst */
886    /* this is only for rooted */
887    if (npp->npproctyp == NP_PROC_FILT && npp->npaux->npu.filtitp != vt1_itp)
888     continue;
889 
890    /* any IS bit select forms in tran chans removed from splitting */
891    __get_bidnpp_sect(np, npp, &ri1, &ri2);
892    /* unc. because index mismatch */
893    if (ri1 != -1 && (bi2 > ri1 || bi2 < ri2)) continue;
894 
895    /* get oside vtx, if same as this side vt2 itp nil */
896    vt2_itp = trchan_get_oside_itp(npp, bi2, &catel_bi, &xp, vt1_itp);
897    /* if other side unc., nothing to add */
898    if (xp->optyp == OPEMPTY) continue;
899 
900    oside_itp = (vt2_itp != NULL) ? vt2_itp : vt1_itp;
901    cat_oside_bi = -1;
902    /* this looks ahead to other side but not used until bottom */
903    /* no itree loc. change */
904    if (npp->npntyp == NP_TRAN)
905     {
906      osnp = __tranx_to_netbit(xp, 0, &osbi, oside_itp);
907      if (osbi == -2) continue;
908 
909      /* flags are not per inst. - use near side bit index */
910      /* for now no special routine for channels with tranifs */
911      __chantab[chanid].chtyp = TRPROC_TRAN;
912      /* FIXME - set tranif processing mode separate value here when used */
913     }
914    else if (npp->npntyp == NP_BIDICONN || npp->npntyp == NP_BIDMDPRT)
915     {
916      /* set index offset to match other side from this side npp */
917      if (npp->npaux == NULL) fromr_bi = bi2;
918      else
919       {
920        if (npp->npaux->nbi1 == -1) fromr_bi = bi2;
921        /* ??? LOOKATME - why is npp low psel bit subtracted off */
922        else fromr_bi = bi2 - npp->npaux->nbi2.i;
923 
924        /* in case this side expr in concat need low of where in concat */
925        /* so can add to otherside index to get matching oside bit */
926        if (npp->npaux->lcbi1 != -1) cat_oside_bi = npp->npaux->lcbi2;
927       }
928 
929      /* set type of channel if changes */
930      if (__chantab[chanid].chtyp != TRPROC_TRAN)
931       {
932        if (np->n_stren)
933         {
934          /* DBG remove --- */
935          if (__chantab[chanid].chtyp == TRPROC_BID)
936           __misc_terr(__FILE__, __LINE__);
937          /* --- */
938          __chantab[chanid].chtyp = TRPROC_STBID;
939         }
940        else
941         {
942          /* DBG remove --- */
943          if (__chantab[chanid].chtyp == TRPROC_STBID)
944           __misc_terr(__FILE__, __LINE__);
945          /* --- */
946          __chantab[chanid].chtyp = TRPROC_BID;
947         }
948       }
949      osnp = __tranx_to_netbit(xp, ((catel_bi == -1) ? fromr_bi : catel_bi),
950       &osbi, oside_itp);
951      if (osbi == -2) continue;
952     }
953    else { __case_terr(__FILE__, __LINE__); continue; }
954 
955    /* compute other side vertex index */
956    /* osbi is other side connecting expr object index (maybe -1) */
957    /* cat oside bi is offset of this side npp in other side cat (none -1) */
958    if (osbi == -1)
959     {
960      if (cat_oside_bi != -1) oside_vtxndx = cat_oside_bi;
961      else oside_vtxndx = -1;
962     }
963    else
964     {
965      if (cat_oside_bi == -1) oside_vtxndx = osbi;
966      else oside_vtxndx = osbi + cat_oside_bi;
967     }
968 
969    /* alloc oside vertex if needed and add one edge to it */
970    if (!add1_oside_edge(vtxp, npp, osnp, oside_vtxndx, chanid, vt1_itp,
971     vt2_itp)) continue;
972 
973    /* DBG remove --
974    if (__debug_flg)
975     {
976      int32 ei;
977      struct edge_t *ep;
978 
979      -* this must run in itree loc. of this side edge *-
980      __push_itstk(vt1_itp);
981      sprintf(__xs2, ", itp=%s", __msg2_blditree(__xs, __inst_ptr));
982 
983      __dbg_msg(">< vertex %s(id=%d%s) added, edges:\n",
984       to_vtx(__xs, vtxp), chanid, __xs2);
985      for (ei = 1, ep = vtxp->vedges; ep != NULL; ep = ep->enxt, ei++)
986       prt_edge(vtxp, ep, ei);
987      __pop_itstk();
988     }
989    --- */
990   }
991 }
992 
993 /*
994  * convert npp into range - uses current instance
995  */
__get_bidnpp_sect(struct net_t * np,struct net_pin_t * npp,int32 * bi1,int32 * bi2)996 extern void __get_bidnpp_sect(struct net_t *np, struct net_pin_t *npp,
997  int32 *bi1, int32 *bi2)
998 {
999  struct npaux_t *auxp;
1000 
1001  if ((auxp = npp->npaux) == NULL || auxp->nbi1 == -1)
1002   {
1003    /* all internal indices normalized to h:0 */
1004    if (np->n_isavec) { *bi1 = np->nwid - 1; *bi2 = 0; }
1005    else *bi1 = *bi2 = -1;
1006    return;
1007   }
1008  /* DBG remove -- */
1009  if (auxp->nbi1 == -2) __arg_terr(__FILE__, __LINE__);
1010  /* --- */
1011  *bi1 = auxp->nbi1;
1012  *bi2 = auxp->nbi2.i;
1013 }
1014 
1015 /*
1016  * move from trchan npp to other side or 2nd vtx itree loc.
1017  * returns other side itree loc if different else nil
1018  *
1019  * does not use net (col. to in vtx), so for col. to chges only itp
1020  * LOOKATME - think this code may be wrong?
1021  */
trchan_get_oside_itp(register struct net_pin_t * npp,int32 bi2,int32 * catel_bi,struct expr_t ** xp2,struct itree_t * vt1_itp)1022 static struct itree_t *trchan_get_oside_itp(register struct net_pin_t *npp,
1023  int32 bi2, int32 *catel_bi, struct expr_t **xp2, struct itree_t *vt1_itp)
1024 {
1025  register struct itree_t *vt2_itp, *itp2;
1026  register struct expr_t *xp;
1027  register struct mod_pin_t *mpp;
1028  register struct expr_t *idndp;
1029  struct gate_t *gp;
1030  struct npaux_t *auxp;
1031  struct mod_t *vt2_mdp;
1032  int32 netbi;
1033 
1034  *catel_bi = -1;
1035  vt2_itp = NULL;
1036  switch ((byte) npp->npntyp) {
1037   case NP_TRAN:
1038    gp = npp->elnpp.egp;
1039    if (npp->obnum == 0) xp = gp->gpins[1]; else xp = gp->gpins[0];
1040    break;
1041   case NP_BIDMDPRT:
1042    xp = vt1_itp->itip->ipins[npp->obnum];
1043    vt2_itp = vt1_itp->up_it;
1044    break;
1045   case NP_BIDICONN:
1046    if (npp->np_xmrtyp != XNP_LOC)
1047     {
1048      /* SJM 10/05/99 - for down relative may be at top or at bottom */
1049      /* correct by detecting up */
1050      if (npp->np_xmrtyp == XNP_RTXMR)
1051       {
1052        /* always moved to ref. point even if at top - since rooted works */
1053        __push_itstk(vt1_itp);
1054        /* SJM 04/17/03 - this can't fail */
1055        __move_to_npprefloc(npp);
1056        itp2 = __inst_ptr;
1057        __pop_itstk();
1058        __pop_itstk();
1059       }
1060      else
1061       {
1062        /* for non rooted, if at ref. point, do not move */
1063        if (vt1_itp->itip->imsym->el.emdp == npp->npaux->npu.npgrp->gin_mdp)
1064         itp2 = vt1_itp;
1065        else
1066         {
1067          /* if F, this instance not xmr, if T pushes ref loc on it stk */
1068          __push_itstk(vt1_itp);
1069          /* SJM 04/17/03 - if not matching XMR path, must not move */
1070          if (!__move_to_npprefloc(npp)) __misc_terr(__FILE__, __LINE__);
1071          itp2 = __inst_ptr;
1072          __pop_itstk();
1073          __pop_itstk();
1074         }
1075       }
1076     }
1077    /* local non xmr - set itree loc for use below */
1078    else itp2 = vt1_itp;
1079 
1080    /* DBG remove -- */
1081    if (itp2->in_its == NULL) __misc_terr(__FILE__, __LINE__);
1082    if (itp2->itip->imsym->el.emdp->minum <= npp->elnpp.eii)
1083     __misc_terr(__FILE__, __LINE__);
1084    /* --- */
1085    vt2_itp = &(itp2->in_its[npp->elnpp.eii]);
1086    vt2_mdp = vt2_itp->itip->imsym->el.emdp;
1087    /* DBG remove -- */
1088    if (npp->obnum >= vt2_mdp->mpnum) __misc_terr(__FILE__, __LINE__);
1089    /* --- */
1090    mpp = &(vt2_mdp->mpins[npp->obnum]);
1091    xp = mpp->mpref;
1092    break;
1093   default:  __case_terr(__FILE__, __LINE__); xp = NULL;
1094  }
1095 
1096  /* need to find channel */
1097  if (xp->optyp == ID || xp->optyp == GLBREF) idndp = xp;
1098  else if (xp->optyp == LSB || xp->optyp == PARTSEL) idndp = xp->lu.x;
1099  else if (xp->optyp == OPEMPTY) { *xp2 = xp; return(vt2_itp); }
1100  else if (xp->optyp == LCB)
1101   {
1102    /* LOOKATME - for entire wire no lcb ofset */
1103    if ((auxp = npp->npaux) == NULL || auxp->lcbi1 == -1) netbi = bi2;
1104    else netbi = bi2 + auxp->lcbi1;
1105 
1106    /* this does not need itree loc */
1107    *xp2 = find_cat_oside_xp(xp, netbi, catel_bi);
1108    return(vt2_itp);
1109   }
1110  else { __case_terr(__FILE__, __LINE__); idndp = NULL; }
1111 
1112  if (idndp->optyp == GLBREF)
1113   {
1114    /* SJM 07/23/01 - for other side of mod port xmr, need to push oside itp */
1115    /* not entering down side - can't happen for ICONN since oside never xmr */
1116    /* notice if not mod port will be tran where vt2 itp not set */
1117    if (npp->npntyp == NP_BIDMDPRT) __push_itstk(vt2_itp);
1118    else __push_itstk(vt1_itp);
1119 
1120    __xmrpush_refgrp_to_targ(idndp->ru.grp);
1121    vt2_itp = __inst_ptr;
1122    __pop_itstk();
1123    __pop_itstk();
1124   }
1125  *xp2 = xp;
1126  return(vt2_itp);
1127 }
1128 
1129 /*
1130  * routine to find non concat lhs xp from concat and bit index in concat
1131  *
1132  * returns bit index in xp net if bit or part select or vectored net
1133  * returns -1 for scalar
1134  */
find_cat_oside_xp(struct expr_t * xp,int32 catbi,int32 * catel_bi)1135 static struct expr_t *find_cat_oside_xp(struct expr_t *xp, int32 catbi,
1136  int32 *catel_bi)
1137 {
1138  register struct expr_t *catxp;
1139  register int32 catxlen, bi1;
1140 
1141  for (catxp = xp->ru.x; catxp != NULL; catxp = catxp->ru.x)
1142   {
1143    catxlen = catxp->lu.x->szu.xclen;
1144    /* bi1 is low bit of first (high) concat element */
1145    bi1 = catxp->szu.xclen - catxlen;
1146 
1147    /* DBG remove ---
1148    if (__debug_flg)
1149     __dbg_msg(
1150      "+++rhs: total concat wid=%u, low index=%d, wid=%u, remaining wid=%u\n",
1151      xp->szu.xclen, bi1, catxlen, catxp->szu.xclen);
1152    --- */
1153    /* since goes from high (left internal) bit to low if passed above right */
1154    if (catbi >= bi1)
1155     {
1156      /* for low bit or scalar catel_bi is 0 not 1 */
1157      *catel_bi = catbi - bi1;
1158 
1159      /* -- DBG remove ---
1160      if (__debug_flg)
1161       {
1162        if (catxlen == 1) __msgexpr_tostr(__xs, catxp->lu.x);
1163        else sprintf(__xs, "%s[%d]", __msgexpr_tostr(__xs2, catxp->lu.x),
1164         *catel_bi);
1165        __dbg_msg(
1166         "*** other side concat component %s width %d of concat size %d\n",
1167         __xs, catxlen, xp->szu.xclen);
1168       }
1169      --- */
1170      return(catxp->lu.x);
1171     }
1172   }
1173  __arg_terr(__FILE__, __LINE__);
1174  return(NULL);
1175 }
1176 
1177 /*
1178  * given a tran channel lhs non concatenate expression get net and bit
1179  *
1180  * sets bi to -2 for other side out of this side range
1181  * uses other side offset from low (right) to determine index
1182  * this uses itree loc to get module for constant table
1183  */
__tranx_to_netbit(register struct expr_t * xp,int32 fromr_bi,int32 * bi,struct itree_t * oside_itp)1184 extern struct net_t *__tranx_to_netbit(register struct expr_t *xp,
1185  int32 fromr_bi, int32 *bi, struct itree_t *oside_itp)
1186 {
1187  register struct net_t *np;
1188  int32 ri1, ri2;
1189 
1190  np = __find_tran_conn_np(xp);
1191  if (xp->optyp == LSB)
1192  {
1193    __push_itstk(oside_itp);
1194    *bi = __get_const_bselndx(xp);
1195    __pop_itstk();
1196    if (fromr_bi > 0) *bi = -2;
1197    return(np);
1198   }
1199  if (xp->optyp == PARTSEL)
1200   {
1201    ri1 = __contab[xp->ru.x->lu.x->ru.xvi];
1202    ri2 = __contab[xp->ru.x->ru.x->ru.xvi];
1203    if (fromr_bi == -1) *bi = ri2;
1204    else { *bi = ri2 + fromr_bi; if (*bi > ri1) *bi = -2;  }
1205    return(np);
1206   }
1207  /* this side is 1 bit scalar */
1208  if (!np->n_isavec)
1209   {
1210    *bi = -1;
1211    if (fromr_bi > 0) *bi = -2;
1212   }
1213  else { if (fromr_bi >= np->nwid) *bi = -2; else *bi = fromr_bi;  }
1214  return(np);
1215 }
1216 
1217 /*
1218  * for every npp, add other vertex if needed and connect edge from vtxp
1219  *
1220  * SJM 07/27/01 - new algorithm that uses breadth first building
1221  * here if edge never seen in channel, aalloc and add vbip and put on
1222  * future processing fifo
1223  *
1224  * npp is one that other side of need to have edges added
1225  * vt1 itp always set, vt2 itp itree loc for other edge only if different
1226  */
add1_oside_edge(struct vtx_t * vtxp,struct net_pin_t * npp,struct net_t * osnp,int32 osbi,int32 chanid,struct itree_t * vt1_itp,struct itree_t * vt2_itp)1227 static int32 add1_oside_edge(struct vtx_t *vtxp, struct net_pin_t *npp,
1228  struct net_t *osnp, int32 osbi, int32 chanid, struct itree_t *vt1_itp,
1229  struct itree_t *vt2_itp)
1230 {
1231  register struct edge_t *ep1;
1232  register struct vbinfo_t *osvbip;
1233  int32 osbi2;
1234  struct vtx_t *osvtxp;
1235  struct traux_t *ostrap;
1236  struct itree_t *itp;
1237 
1238  ostrap = osnp->ntraux;
1239  osbi2 = (osbi == -1) ? 0 : osbi;
1240  /* for non per inst. itinum will be 0 */
1241  /* not in channel - add it */
1242  if (vt2_itp == NULL) itp = vt1_itp; else itp = vt2_itp;
1243  if ((osvbip = ostrap->vbitchans[osnp->nwid*itp->itinum + osbi2])
1244   == NULL)
1245   {
1246    /* add the new vertex, bld net to vtx conn */
1247    osvbip = add_vtxtraux(osnp, osbi, chanid);
1248    ostrap->vbitchans[osnp->nwid*itp->itinum + osbi2] = osvbip;
1249    osvtxp = osvbip->vivxp;
1250    __chantab[chanid].numvtxs++;
1251 
1252    /* SJM 09/11/00 - at edge add set diff wtypes if edge nets ntyp differ */
1253    /* SJM 04/23/01 - need to handle wire type synonyms (tri=wire say) */
1254    if (cnvt_tobase_ntyp(osnp->ntyp) != cnvt_tobase_ntyp(vtxp->vnp->ntyp))
1255     __chantab[chanid].chan_diffwtyps = TRUE;
1256 
1257    /* finally, add to future add from edges fifo list */
1258    add_vtx_to_future_q(osvtxp, itp);
1259   }
1260  else osvtxp = osvbip->vivxp;
1261 
1262  /* need to add 2 opposite direct edges */
1263  ep1 = alloc_edge(npp);
1264  /* SJM 08/26/00 - if vt2 itp nil, then no itree change on edges */
1265  /* notice for cross inout ports always have itree loc change */
1266  if (vt2_itp != NULL) ep1->edgoside_itp = vt2_itp;
1267  ep1->ev2 = osvtxp;
1268  ep1->enxt = vtxp->vedges;
1269  vtxp->vedges = ep1;
1270 
1271  /* DBG remove --
1272  if (__debug_flg)
1273   {
1274    __push_itstk(vt1_itp);
1275    __dbg_msg(
1276     "!!! adding other side vertex %s and forward edge from %s:\n",
1277     to_vtx(__xs, osvtxp), to_vtx(__xs2, vtxp));
1278    -* --
1279    prt_edge(vtxp, ep1, -1);
1280    -- *-
1281    __pop_itstk();
1282   }
1283  --- */
1284  return(TRUE);
1285 }
1286 
1287 /*
1288  * routine to convert wire types to base
1289  *
1290  * used for finding mixed wire type chans that require relax
1291  */
cnvt_tobase_ntyp(word32 ntyp)1292 static word32 cnvt_tobase_ntyp(word32 ntyp)
1293 {
1294  switch (ntyp) {
1295   case N_TRI: return(N_WIRE);
1296   case N_TRIAND: return(N_WA);
1297   case N_TRIOR: return(N_WO);
1298   default: break;
1299  }
1300  return(ntyp);
1301 }
1302 
1303 /*
1304  * given a tran or tranif connecting port, find the net
1305  *
1306  * will never see concatenate here because xp is component of concat
1307  */
__find_tran_conn_np(struct expr_t * xp)1308 extern struct net_t *__find_tran_conn_np(struct expr_t *xp)
1309 {
1310  struct net_t *np;
1311 
1312  switch ((byte) xp->optyp) {
1313   case ID: case GLBREF: np = xp->lu.sy->el.enp; return(np);
1314   case LSB: case PARTSEL: np = xp->lu.x->lu.sy->el.enp; return(np);
1315   /* for opempty will be no npp */
1316   default: __case_terr(__FILE__, __LINE__);
1317  }
1318  return(NULL);
1319 }
1320 
1321 /*
1322  * routine to convert bid only same wire type channel to simple
1323  *
1324  * SJM 04/23/01 - can process bid only channels by just going through
1325  * list (key is that vtx list element contains itree loc for each one)
1326  */
chg_bidchan_to_vtxlist(struct chanrec_t * chanp)1327 static void chg_bidchan_to_vtxlist(struct chanrec_t *chanp)
1328 {
1329  register struct edge_t *ep, *ep2;
1330  /* DBG remove - int32 sav_mem_use; */
1331  struct vtx_t *vtxp;
1332  struct vtxlst_t *vtxlp;
1333 
1334  /* free every vertex and its edge list */
1335  /* build list of all vertices */
1336  vtxp = chanp->chvxp;
1337  /* need to pass itree context since itstk may overflow, use call stack */
1338  process_bid_vertices(chanp, vtxp, chanp->chvitp);
1339 
1340  /* build the off list - use this to free */
1341  add_vtx_to_offlist(vtxp);
1342 
1343  for (vtxlp = __off_vtxlst_hdr; vtxlp != NULL; vtxlp = vtxlp->vtxnxt)
1344   {
1345    /* first free edge list */
1346    for (ep = vtxlp->vtxp->vedges; ep != NULL;)
1347     {
1348      ep2 = ep->enxt;
1349      __my_free((char *) ep, sizeof(struct edge_t));
1350      ep = ep2;
1351     }
1352    /* SJM 08/08/01 - can't free vertices still used by iout channels */
1353    /* ??? __my_free((char *) vtxlp->vtxp, sizeof(struct vtx_t)); */
1354   }
1355 
1356  /* DBG remove --
1357  if (__switch_verbose)
1358   {
1359    register struct bidvtxlst_t *bvtxlp;
1360    int32 chanid;
1361 
1362    chanid = chanp - __chantab;
1363    __cv_msg("-- chan %d (%d vertices): conversion freed %d bytes\n",
1364     chanid, chanp->numvtxs, sav_mem_use - __mem_use);
1365   }
1366  --- */
1367 
1368  /* chan now stored as bid vtx list */
1369  chanp->chvxp = NULL;
1370  chanp->chvitp = NULL;
1371  chanp->chan_no_vtxs = TRUE;
1372 
1373  /* then add entire off vtx list to end of vtx list free list */
1374  if (__off_vtxlst_hdr != NULL)
1375   {
1376    __off_vtxlst_end->vtxnxt =  __vtxlst_freelst;
1377    __vtxlst_freelst = __off_vtxlst_hdr;
1378   }
1379  __off_vtxlst_hdr = __off_vtxlst_end = NULL;
1380 }
1381 
1382 /*
1383  * add vertex and all connected to linear vertex list
1384  */
process_bid_vertices(struct chanrec_t * chanp,struct vtx_t * vtxp,struct itree_t * vt1_itp)1385 static void process_bid_vertices(struct chanrec_t *chanp, struct vtx_t *vtxp,
1386  struct itree_t *vt1_itp)
1387 {
1388  register struct edge_t *ep;
1389  struct itree_t *oside_itp;
1390 
1391  /* mark vertex visited and and add to list */
1392  vtxp->vtx_mark = TRUE;
1393  add_vtx_to_bidvtxlist(chanp, vtxp, vt1_itp);
1394 
1395  for (ep = vtxp->vedges; ep != NULL; ep = ep->enxt)
1396   {
1397    if (ep->ev2->vtx_mark) continue;
1398 
1399    /* SJM 04/23/01 - this moves to right other side edge itree loc */
1400    if (ep->edgoside_itp != NULL) oside_itp = ep->edgoside_itp;
1401    else oside_itp = vt1_itp;
1402 
1403    process_bid_vertices(chanp, ep->ev2, oside_itp);
1404   }
1405 }
1406 
1407 /*
1408  * add a vertex to bid chan linear list
1409  */
add_vtx_to_bidvtxlist(struct chanrec_t * chanp,struct vtx_t * vtxp,struct itree_t * vt1_itp)1410 static void add_vtx_to_bidvtxlist(struct chanrec_t *chanp, struct vtx_t *vtxp,
1411  struct itree_t *vt1_itp)
1412 {
1413  register struct bidvtxlst_t *bidvtxlp;
1414 
1415  bidvtxlp = (struct bidvtxlst_t *) __my_malloc(sizeof(struct bidvtxlst_t));
1416 
1417  bidvtxlp->vnp = vtxp->vnp;
1418  bidvtxlp->vi1 = vtxp->vi1;
1419  bidvtxlp->bidvtx_itp = vt1_itp;
1420  bidvtxlp->bidvtxnxt = NULL;
1421 
1422  /* must always put on end */
1423  if (chanp->bid_vtxlp != NULL)
1424   { bidvtxlp->bidvtxnxt = chanp->bid_vtxlp; chanp->bid_vtxlp = bidvtxlp; }
1425  else chanp->bid_vtxlp = bidvtxlp;
1426 }
1427 
1428 /*
1429  * allocate a tran graph edge (npp tran gate)
1430  */
alloc_edge(struct net_pin_t * npp)1431 static struct edge_t *alloc_edge(struct net_pin_t *npp)
1432 {
1433  struct edge_t *ep;
1434 
1435  if (__edge_freelst != NULL)
1436   { ep = __edge_freelst; __edge_freelst = __edge_freelst->enxt; }
1437  else ep = (struct edge_t *) __my_malloc(sizeof(struct edge_t));
1438 
1439  ep->enpp = npp;
1440  ep->edgoside_itp = NULL;
1441  ep->ev2 = NULL;
1442  ep->enxt = NULL;
1443  return(ep);
1444 }
1445 
1446 /*
1447  * return T if bit inout only channel, has all same wire types
1448  */
chkchannel(struct chanrec_t * chanp)1449 static void chkchannel(struct chanrec_t *chanp)
1450 {
1451  register struct edge_t *ep;
1452  int32 bi, chanid, base;
1453  struct vtx_t *vtxp;
1454  struct traux_t *trap;
1455  struct net_t *np;
1456 
1457  vtxp = chanp->chvxp;
1458  __push_itstk(chanp->chvitp);
1459  np = vtxp->vnp;
1460  /* DBG remove -- */
1461  if ((trap = np->ntraux) == NULL) __misc_terr(__FILE__, __LINE__);
1462  /* --- */
1463  bi = vtxp->vi1;
1464  if (bi == -1) bi = 0;
1465  base = __inum*np->nwid;
1466  chanid = trap->vbitchans[base + bi]->chan_id;
1467  /* DBG remove -- */
1468  if (&(__chantab[chanid]) != chanp) __misc_terr(__FILE__, __LINE__);
1469  /* --- */
1470  vtxp->vtx_mark = TRUE;
1471 
1472  for (ep = vtxp->vedges; ep != NULL; ep = ep->enxt)
1473   {
1474    if (ep->ev2->vtx_mark) continue;
1475    ep->ev2->vtx_mark = TRUE;
1476    chkchan_edge(chanp, ep);
1477   }
1478  /* SJM 09/11/00 - not set ch typ even if this not called */
1479  __pop_itstk();
1480 
1481  /* off vertex marks, to be ready for next traversal */
1482  off_bichan_marks(chanp);
1483 }
1484 
1485 /*
1486  * routine to turn off all vertex marks for
1487  */
off_bichan_marks(struct chanrec_t * chanp)1488 static void off_bichan_marks(struct chanrec_t *chanp)
1489 {
1490  register struct vtxlst_t *vtxlp;
1491 
1492  add_vtx_to_offlist(chanp->chvxp);
1493 
1494  /* turn of marks in all vertices on list */
1495  for (vtxlp = __off_vtxlst_hdr; vtxlp != NULL; vtxlp = vtxlp->vtxnxt)
1496   {
1497    vtxlp->vtxp->vtx_mark = FALSE;
1498    vtxlp->vtxp->vtx_mark2 = FALSE;
1499   }
1500 
1501  /* then add entire list to end of free list */
1502  if (__off_vtxlst_hdr != NULL)
1503   {
1504    __off_vtxlst_end->vtxnxt =  __vtxlst_freelst;
1505    __vtxlst_freelst = __off_vtxlst_hdr;
1506   }
1507  __off_vtxlst_hdr = __off_vtxlst_end = NULL;
1508 }
1509 
1510 /*
1511  * add all vertices to list using vtx mark2 for cutting cyles
1512  * also used as first step in freeing channel
1513  */
add_vtx_to_offlist(struct vtx_t * vtxp)1514 static void add_vtx_to_offlist(struct vtx_t *vtxp)
1515 {
1516  register struct vtxlst_t *vtxlp;
1517  register struct edge_t *ep;
1518 
1519  /* alloc and add to list */
1520  if (__vtxlst_freelst == NULL)
1521   vtxlp = (struct vtxlst_t *) __my_malloc(sizeof(struct vtxlst_t));
1522  else
1523   {
1524    vtxlp = __vtxlst_freelst;
1525    __vtxlst_freelst = __vtxlst_freelst->vtxnxt;
1526   }
1527  /* add it */
1528  vtxlp->vtxp = vtxp;
1529  vtxlp->vtxnxt = NULL;
1530  vtxp->vtx_mark2 = TRUE;
1531 
1532  /* put on end */
1533  if (__off_vtxlst_end != NULL)
1534   { __off_vtxlst_end->vtxnxt = vtxlp; __off_vtxlst_end = vtxlp; }
1535  else __off_vtxlst_end = __off_vtxlst_hdr = vtxlp;
1536 
1537  /* all all other side vertices of this one's edges */
1538  for (ep = vtxp->vedges; ep != NULL; ep = ep->enxt)
1539   {
1540    if (ep->ev2->vtx_mark2) continue;
1541 
1542    add_vtx_to_offlist(ep->ev2);
1543   }
1544 }
1545 
1546 /*
1547  * check edge and vertex on other side and update chanp fields
1548  * 09/11/00 - SJM - DBG now only called if debug flag on
1549  */
chkchan_edge(struct chanrec_t * chanp,struct edge_t * ep)1550 static void chkchan_edge(struct chanrec_t *chanp, struct edge_t *ep)
1551 {
1552  struct vtx_t *vtxp;
1553  struct net_t *np;
1554  struct edge_t *ep2;
1555 
1556  vtxp = ep->ev2;
1557  np = vtxp->vnp;
1558  /* DBG remove -- */
1559  if (np->ntraux == NULL) __misc_terr(__FILE__, __LINE__);
1560  if (chanp->chvxp->vnp->n_stren != np->n_stren)
1561   __misc_terr(__FILE__, __LINE__);
1562  /* --- */
1563  if (chanp->chvxp->vnp->ntyp != np->ntyp)
1564   {
1565    if (chanp->chan_diffwtyps != TRUE) __misc_terr(__FILE__, __LINE__);
1566   }
1567 
1568  /* depth first process vertices edges */
1569  /* back edge vertex already part of processed tree */
1570  for (ep2 = vtxp->vedges; ep2 != NULL; ep2 = ep2->enxt)
1571   {
1572    if (ep2->ev2->vtx_mark) continue;
1573    ep2->ev2->vtx_mark = TRUE;
1574    chkchan_edge(chanp, ep2);
1575   }
1576 }
1577 
1578 /*
1579  * finished with tran soft drivers remove from in tran channel
1580  * np list in traux
1581  *
1582  * know one traux per net with any bits in tran channel
1583  * SJM 05/15/99 - move tran channel npps to traux - needed for PLI
1584  */
save_bidandtran_npps(void)1585 static void save_bidandtran_npps(void)
1586 {
1587  register struct net_pin_t *npp;
1588  register int32 ni;
1589  register struct mod_t *mdp;
1590  register struct net_t *np;
1591  struct net_pin_t *last_npp, *npp2;
1592 
1593  for (mdp = __modhdr; mdp != NULL; mdp = mdp->mnxt)
1594   {
1595    if (mdp->mnnum == 0) continue;
1596    for (ni = 0, np = &(mdp->mnets[0]); ni < mdp->mnnum; ni++, np++)
1597     {
1598      for (last_npp = NULL, npp = np->ndrvs; npp != NULL;)
1599       {
1600        /* save next field */
1601        npp2 = npp->npnxt;
1602        if (npp->npntyp == NP_BIDMDPRT || npp->npntyp == NP_BIDICONN
1603         || npp->npntyp == NP_TRAN)
1604         {
1605          /* link out but pointed to by graph edges so cannot free */
1606          if (last_npp == NULL) np->ndrvs = npp->npnxt;
1607          else last_npp->npnxt = npp->npnxt;
1608 
1609          /* and link onto front of tran channel npp list in traux */
1610          /* for first one, sets nil then it migrates to end */
1611          npp->npnxt = np->ntraux->tran_npps;
1612          np->ntraux->tran_npps = npp;
1613         }
1614        else last_npp = npp;
1615        npp = npp2;
1616       }
1617     }
1618   }
1619 }
1620 
1621 /*
1622  * ROUTINES TO EVALUATE AND STORE TRAN CHANNEL AND INOUT WIRE GRAPHS
1623  */
1624 
1625 /*
1626  * evaluate stren tran channels including inouts during initialization
1627  *
1628  * non stren wires do not need to be evaluated since z unless driver
1629  * causes change but when initializing driver will cause re-eval
1630  * for rtran since everything z to start attenuation can't go weaker than z
1631  *
1632  * also since any driver change will cause trnva change, can use initialized
1633  * values do not need to reevaluate drivers
1634  *
1635  * must do for every instance of modules
1636  * notice evaluating each channel (all vertices) only once
1637  * LOOKATME - for now initializing all channels, but think only some needed
1638  * maybe not since need all trans for sure here and if inout only has
1639  * pull on it will never be evaluated from gate evaluation
1640  */
__init_all_trchans(void)1641 extern void __init_all_trchans(void)
1642 {
1643  register int32 ii;
1644  int32 tot_tran_vtxs, tot_bid_vtxs, num_tran_done, num_bid_done;
1645  struct chanrec_t *chanp;
1646  struct vtxlst_t *vtxlp, *vtxlp2;
1647 
1648  if (__switch_verbose && __nxt_chan_id > 0)
1649   {
1650    __cv_msg("  SWITCH: begin switch channel initialization (%d channels).\n",
1651     __nxt_chan_id);
1652   }
1653 
1654  /* if no tran in design nothing to do here */
1655  if (__nxt_chan_id == 0) return;
1656 
1657  tot_tran_vtxs = tot_bid_vtxs = 0;
1658  num_tran_done = num_bid_done = 0;
1659  /* first initialize all tran(if) chans using relaxation of entire chan */
1660  for (ii = 0; ii < __nxt_chan_id; ii++)
1661   {
1662    chanp = &(__chantab[ii]);
1663    if (chanp->chtyp == TRPROC_STWTYPBID || chanp->chtyp == TRPROC_TRAN)
1664     {
1665      init_sttranif_chan(chanp);
1666      tot_tran_vtxs += chanp->numvtxs;
1667      num_tran_done++;
1668     }
1669    else
1670     {
1671      if (chanp->chtyp == TRPROC_BID) eval_assign_bid_chan(chanp);
1672      else eval_assign_stbid_chan(chanp);
1673      tot_bid_vtxs += chanp->numvtxs;
1674      num_bid_done++;
1675     }
1676 
1677    /* SJM only emit message if switch verbose and some channels inited */
1678    if (__switch_verbose && ((num_tran_done + num_bid_done) % 5000) == 0)
1679     {
1680      double d1, d2;
1681 
1682      d1 = 100.0*(((double) num_tran_done)/((double) __num_switch_chans));
1683      d2 = 100.0*(((double) num_bid_done)
1684       / ((double) __nxt_chan_id - __num_switch_chans));
1685      __cv_msg(
1686      "  SWITCH: %d (%.2f%%) tran and %d (%.2f%%) inout channels completed.\n",
1687      num_tran_done, d1, num_bid_done, d2);
1688     }
1689   }
1690  if (__switch_verbose)
1691   {
1692    if (__num_switch_chans == 0)
1693     { __cv_msg("  SWITCH: design contains no tran(if) switch channels.\n"); }
1694    else
1695     {
1696      __cv_msg(
1697       "  SWITCH: tran channels initialized (%d containing %d nodes in design).\n",
1698       __num_switch_chans, tot_tran_vtxs);
1699     }
1700    if (__num_switch_chans == __nxt_chan_id)
1701     { __cv_msg("  SWITCH: design contains no inout switch channels.\n"); }
1702    else
1703     {
1704      __cv_msg(
1705       "  SWITCH: inout channels initialized (%d containing %d nodes in design).\n",
1706       __nxt_chan_id - __num_switch_chans, tot_bid_vtxs);
1707     }
1708   }
1709 
1710  /* when done with initialization full relax, reclaim storage */
1711  /* since lists will be much shorter from now on */
1712  for (vtxlp = __vtxlst_freelst; vtxlp != NULL;)
1713   {
1714    vtxlp2 = vtxlp->vtxnxt;
1715    __my_free((char *) vtxlp, sizeof(struct vtxlst_t));
1716    vtxlp = vtxlp2;
1717   }
1718  __vtxlst_freelst = NULL;
1719 }
1720 
1721 /*
1722  * initialize entire tran(if) channel at once (relax the whole thing)
1723  */
init_sttranif_chan(struct chanrec_t * chanp)1724 static void init_sttranif_chan(struct chanrec_t *chanp)
1725 {
1726  /* add all vertices to st vtx tab lists indexed by low stren */
1727  /* starts with the one random chan vertex (since digraph any works) */
1728  __push_itstk(chanp->chvitp);
1729  init_add_vtx_and_subtree(chanp->chvxp, chanp->chvitp);
1730  __pop_itstk();
1731 
1732  /* now that all vertices in st vtx tab, turn off marks so can be used again */
1733  /* if needed - mostly for dumping channels */
1734  /* works in st vtx tab - know all vertices in it */
1735  off_stvtxtab_marks();
1736 
1737  stchan_trif_relax();
1738 
1739  /* final step, assign all changed vertices */
1740  if (__chg_vtxlst_hdr != NULL) assign_chged_vtxs();
1741 }
1742 
1743 /*
1744  * routine to add all vertices to st vtx tab lists indexed by low stren
1745  * also set internal tran vertex value from net(bit) driving value
1746  * works because force release not possible before here
1747  *
1748  * NOTICE - this must be called after all normal initialization
1749  * LOOKATME - think it pushing does not work for inout port edges
1750  */
init_add_vtx_and_subtree(struct vtx_t * vtxp,struct itree_t * vt1_itp)1751 static void init_add_vtx_and_subtree(struct vtx_t *vtxp,
1752  struct itree_t *vt1_itp)
1753 {
1754  register struct edge_t *ep;
1755  int32 bi, inum;
1756  word32 vtxval, lowst, st0, st1;
1757  struct vtxlst_t *vtxlp;
1758  struct net_t *np;
1759  struct itree_t *oside_itp;
1760 
1761  np = vtxp->vnp;
1762  bi = vtxp->vi1;
1763  bi = (bi == -1) ? 0 : bi;
1764 
1765  inum = vt1_itp->itinum;
1766  /* save current value of net as old vtx value */
1767  vtxp->old_vtxval = np->nva.bp[inum*np->nwid + bi];
1768 
1769  /* get switch contribution from hard driver's value */
1770  vtxval = (word32) np->ntraux->trnva.bp[inum*np->nwid + bi];
1771 
1772  /* and use as first new channel value guess */
1773  vtxp->new_vtxval = (byte) vtxval;
1774  if (vtxval == ST_SUPPLY0 || vtxval == ST_SUPPLY1) vtxp->vtx_supply = TRUE;
1775 
1776  /* get low strength */
1777  st1 = (vtxval >> 2) & 0x7;
1778  st0 = (vtxval >> 5) & 0x7;
1779  lowst = (int32) ((st1 < st0) ? st1 : st0);
1780 
1781  if (__vtxlst_freelst == NULL)
1782   vtxlp = (struct vtxlst_t *) __my_malloc(sizeof(struct vtxlst_t));
1783  else
1784   {
1785    vtxlp = __vtxlst_freelst;
1786    __vtxlst_freelst = __vtxlst_freelst->vtxnxt;
1787   }
1788  vtxlp->vtxp = vtxp;
1789  vtxlp->vtx_itp = vt1_itp;
1790  vtxlp->vtxnxt = NULL;
1791  if (__stvtxtabend[lowst] != NULL)
1792   { __stvtxtabend[lowst]->vtxnxt = vtxlp; __stvtxtabend[lowst] = vtxlp; }
1793  else __stvtxtab[lowst] = __stvtxtabend[lowst] = vtxlp;
1794 
1795  vtxp->vtx_mark = TRUE;
1796  __num_switch_vtxs_processed++;
1797 
1798  /* DBG remove -- */
1799  if (__debug_flg && __ev_tracing)
1800   {
1801    __dbg_msg("** vertex %s.%s level %d added to perturb list\n",
1802     __msg2_blditree(__xs, vtxlp->vtx_itp), to_vtx(__xs2, vtxp), lowst);
1803   }
1804 
1805  for (ep = vtxp->vedges; ep != NULL; ep = ep->enxt)
1806   {
1807    if (ep->ev2->vtx_mark) continue;
1808 
1809    /* SJM - 05/21/01 - compute oside edge and use call stack since itstk */
1810    /* not deep enough */
1811    if (ep->edgoside_itp != NULL) oside_itp = ep->edgoside_itp;
1812    else oside_itp = vt1_itp;
1813 
1814    init_add_vtx_and_subtree(ep->ev2, oside_itp);
1815   }
1816 }
1817 
1818 /*
1819  * routine to turn off all mark in vertices in st vtx tab
1820  *
1821  * needed to save marking memory (8 byte per vtx) and now uses just 2 bits
1822  * but need to build and free list of all vertices in channel when done
1823  *
1824  * done after all verties are added to tab for intial relax - since marks
1825  * not needed in relax can turn off using full st vtx tab
1826  */
off_stvtxtab_marks(void)1827 static void off_stvtxtab_marks(void)
1828 {
1829  register int32 si;
1830  register struct vtxlst_t *vtxlp;
1831 
1832  for (si = 7; si >= 0; si--)
1833   {
1834    for (vtxlp = __stvtxtab[si]; vtxlp != NULL; vtxlp = vtxlp->vtxnxt)
1835     {
1836      vtxlp->vtxp->vtx_mark = FALSE;
1837      vtxlp->vtxp->vtx_mark2 = FALSE;
1838     }
1839   }
1840 }
1841 
1842 /*
1843  * TOP LEVEL ROUTINES TO EVAL TRAN CHANNEL
1844  */
1845 
1846 /*
1847  * evaluate tran channels bit by bit - process for exactly one itree loc.
1848  *
1849  * bits may be part of empty tran channel (no trans or inouts on chan)
1850  * in which case just store/schedule the trnva hard driver value
1851  *
1852  * for tran or inout, some hard driver (bid. tran chan in) must have
1853  * changed to cause evaluation of channel, except for initialization,
1854  * any fi>1 eval. goes through here if wire in tran channel
1855  */
__eval_tran_bits(register struct net_t * np)1856 extern void __eval_tran_bits(register struct net_t *np)
1857 {
1858  register int32 bi;
1859  int32 base;
1860  word32 nav, nbv;
1861  struct traux_t *trap;
1862  struct vbinfo_t *vbip;
1863  struct chanrec_t *chanp;
1864 
1865  /* SJM 04/11/01 - can't do incremental relaxation until wire init done */
1866  if (__wire_init) return;
1867 
1868  trap = np->ntraux;
1869  /* SJM 04/23/01 - all channels now per inst, no extra cost for 1 inst mod */
1870  base = __inum*np->nwid;
1871 
1872  /* all others are per bit */
1873  for (bi = np->nwid - 1; bi >= 0; bi--)
1874   {
1875    /* bit of wire is not really in any tran channel - treated as 1 wire */
1876    /* tran channel - just access stored internal hard trnva value */
1877    if ((vbip = trap->vbitchans[base + bi]) == NULL)
1878     {
1879      /* 03/15/00 - SJM - if forced getting drivers ok since st vtx inhibits */
1880      /* actual assign */
1881      if (np->n_stren)
1882       {
1883        nav = (word32) trap->trnva.bp[__inum*np->nwid + bi];
1884        /* really unused but passing it */
1885        nbv = 0;
1886       }
1887      else ld_vtx_netbit(&nav, &nbv, np, bi);
1888      /* notice if strength, nbv not used */
1889      st_vtx_netbit(np, bi, nav, nbv);
1890     }
1891    else
1892     {
1893      chanp = &(__chantab[vbip->chan_id]);
1894      /* BEWARE when this bit on, vb info vtx field invalid (can't use) */
1895      if (chanp->chan_no_vtxs)
1896       {
1897        if (chanp->chtyp == TRPROC_BID) eval_assign_bid_chan(chanp);
1898        else eval_assign_stbid_chan(chanp);
1899       }
1900      /* real tran switch chan - use change vertex to perturb and relax chan */
1901      else eval_update_1w_tranchan(vbip->vivxp);
1902     }
1903   }
1904 }
1905 
1906 /*
1907  * evaluate one tran channel bit - version for path dest. event
1908  * so know always per inst form
1909  * just one bit part of all bits routine
1910  */
__eval_tran_1bit(register struct net_t * np,register int32 bi)1911 extern void __eval_tran_1bit(register struct net_t *np, register int32 bi)
1912 {
1913  int32 base;
1914  word32 nav, nbv;
1915  struct chanrec_t *chanp;
1916  struct traux_t *trap;
1917  struct vbinfo_t *vbip;
1918 
1919  /* SJM 04/11/01 - can't do incremental relaxation until wire init done */
1920  if (__wire_init) return;
1921 
1922  trap = np->ntraux;
1923  /* SJM - 12/19/00 - new using 1 bit form in many place so must set */
1924  /* per instance form because if all in one inst. not per inst*/
1925  base = __inum*np->nwid;
1926 
1927  /* bit of wire is not really in any tran channel - treated as 1 wire */
1928  /* tran channel - just access stored internal hard trnva value */
1929  if ((vbip = trap->vbitchans[base + bi]) == NULL)
1930   {
1931    /* 03/15/00 - SJM - if forced getting drivers ok since st vtx inhibits */
1932    /* actual assign */
1933    if (np->n_stren)
1934     {
1935      nav = (word32) trap->trnva.bp[__inum*np->nwid + bi];
1936      /* really unused but passing it */
1937      nbv = 0;
1938     }
1939    else ld_vtx_netbit(&nav, &nbv, np, bi);
1940    /* notice if strength, nbv not used */
1941    st_vtx_netbit(np, bi, nav, nbv);
1942   }
1943  else
1944   {
1945    chanp = &(__chantab[vbip->chan_id]);
1946    /* BEWARE when this bit on, vb info vtx field invalid (can't use) */
1947    if (chanp->chan_no_vtxs)
1948     {
1949      if (chanp->chtyp == TRPROC_BID) eval_assign_bid_chan(chanp);
1950      else eval_assign_stbid_chan(chanp);
1951     }
1952    /* real tran switch chan - use change vertex to perturb and relax chan */
1953    else eval_update_1w_tranchan(vbip->vivxp);
1954   }
1955 }
1956 
1957 /*
1958  * update tran and/or inout channel changed wire hard drivers (chan ins)
1959  * returns T if hard drivers of wire changed, F if unchanged
1960  *
1961  * called from target (definition) changed instance
1962  * for special wires (stren only) such as supply0 mdr load use wire stren
1963  * this only changes entire nets trnva per instance hard drivers
1964  * update the hard driver for this net-instance
1965  *
1966  * know driver of np changed, but no other drivers therefore
1967  * only need to update hard drivers for the one net but entire channel
1968  * anywhere in it can change - must update all of wire
1969  * if no hard driver change, returns F, channel cannot change
1970  * if only conducting state of tranif change no need to eval. driver
1971  * but must always re-eval all chan wires
1972  *
1973  * notice for rare bits of wire that are not part of tran channel
1974  * they are part of empty tran channel and can just store the saved
1975  * trnva value - since must eval. entire wire drivers will have those bits
1976  * i.e. this is not bit by bit
1977  */
__update_tran_harddrvs(struct net_t * np)1978 extern int32 __update_tran_harddrvs(struct net_t *np)
1979 {
1980  register byte *sbp, *sbp2;
1981  register struct xstk_t *xsp, *xsp2;
1982 
1983  sbp = NULL;
1984  if (np->n_stren)
1985   {
1986    /* this will make chan wire section of input drivers have wire type */
1987    /* i.e. supply will probably override its drivers */
1988    xsp = __stload_mdrwire(np);
1989    sbp = (byte *) xsp->ap;
1990    /* SJM - 03/15/01 - even if forced wtill need to update hard drvrs */
1991    /* in case ever released - just do not use them when force in effect */
1992    sbp2 = &(np->ntraux->trnva.bp[__inum*np->nwid]);
1993 
1994    /* if this is path dest. and part of inout tran channel, must sched */
1995    /* change to internal stored hard driver values */
1996    /* some bits may need immed. assign - routine handles updating tran chan */
1997    if (np->iotyp == IO_BID && np->n_isapthdst && !__wire_init)
1998     {
1999      stren_schd_bidpthdrvrs(np, sbp, sbp2);
2000      __pop_xstk();
2001      /* for any real changes this re-evals 1 bit of channel */
2002      /* therefore return F to stop another chan re-eval and store vtx */
2003      return(FALSE);
2004     }
2005    /* SJM 11/24/00 - for path destinations can't eliminate schedule using */
2006    /* current switch channel state because there may be pending event that */
2007    /* requires inertial rescheduling */
2008 
2009    /* SJM 12/13/00 malloc lib fails when bcmp of 1 byte scalar */
2010    if (np->nwid == 1)
2011     {
2012      if (sbp[0] != sbp2[0]) sbp2[0] = sbp[0];
2013      goto chg;
2014    }
2015 
2016    /* if all changed drivers (tran channel input) same, nothing to do */
2017    if (memcmp(sbp, sbp2, np->nwid) == 0) goto no_chg;
2018 
2019    /* update this wire drvrs (chan in sect), copy 2nd arg is dest. */
2020    memcpy(sbp2, sbp, np->nwid);
2021    /* now done with sbp and xsp */
2022    goto chg;
2023   }
2024 
2025  xsp = __load_mdrwire(np);
2026  /* but must update driving value fron tran channel of xmr from inst */
2027  push_xstk_(xsp2, np->nwid);
2028  __ld_perinst_val(xsp2->ap, xsp2->bp, np->ntraux->trnva, np->nwid);
2029 
2030  /* some bit changed update or schedule path dest. per bit channels */
2031  if (np->iotyp == IO_BID && np->n_isapthdst && !__wire_init)
2032   {
2033    schd_bidpthdrvrs(np, xsp, xsp2);
2034    __pop_xstk();
2035    __pop_xstk();
2036    /* for any real changes this re-evals 1 bit of channel */
2037    /* therefore return F to stop another chan re-eval and store vtx */
2038    return(FALSE);
2039   }
2040 
2041  /* SJM 11/24/00 - also for non stren case, must always use inertial */
2042  /* rescheduling on inout paths */
2043  if (np->nwid <= WBITS)
2044   {
2045    if (xsp->ap[0] == xsp2->ap[0] && xsp->bp[0] == xsp2->bp[0]) goto no_chg;
2046   }
2047  else
2048   {
2049    if (cmp_vval_(xsp->ap, xsp2->ap, np->nwid) == 0
2050     && cmp_vval_(xsp->bp, xsp2->bp, np->nwid) == 0) goto no_chg;
2051   }
2052 
2053  /* update ins (drvs) for this tran chan wire section */
2054  __st_perinst_val(np->ntraux->trnva, np->nwid, xsp->ap, xsp->bp);
2055 chg:
2056  if (__ev_tracing)
2057   {
2058    char s1[RECLEN], s2[RECLEN], s3[RECLEN];
2059 
2060    if (np->n_stren)
2061     { strcpy(s1, " strength"); __st_regab_tostr(s2, sbp, np->nwid); }
2062    else
2063     {
2064      strcpy(s1, "");
2065      __regab_tostr(s2, xsp->ap, xsp->bp, xsp->xslen, BHEX, FALSE);
2066     }
2067    __tr_msg(
2068     "-- driver (hard tran input) of%s channel %s %s changed new value %s\n",
2069     s1, __to_wtnam(s3, np), np->nsym->synam, s2);
2070   }
2071  __pop_xstk();
2072  if (!np->n_stren) __pop_xstk();
2073  return(TRUE);
2074 
2075 no_chg:
2076  if (__ev_tracing)
2077   {
2078    char s1[RECLEN], s2[RECLEN], s3[RECLEN];
2079 
2080    if (np->n_stren)
2081     { strcpy(s1, " strength"); __st_regab_tostr(s2, sbp, np->nwid); }
2082    else
2083     {
2084      strcpy(s1, "");
2085      __regab_tostr(s2, xsp->ap, xsp->bp, xsp->xslen, BHEX, FALSE);
2086     }
2087    __tr_msg(
2088     "-- a driver (tran channel input) of%s channel %s %s changed but value %s unchanged\n",
2089     s1, __to_wtnam(s3, np), np->nsym->synam, s2);
2090   }
2091  __pop_xstk();
2092  if (!np->n_stren) __pop_xstk();
2093  return(FALSE);
2094 }
2095 
2096 /*
2097  * schedule stren inout channel path destination hard drivers and channel
2098  * some bits may not have changed or need immediate assign and chan update
2099  */
stren_schd_bidpthdrvrs(struct net_t * np,byte * drv_sbp,byte * trnva_sbp)2100 static void stren_schd_bidpthdrvrs(struct net_t *np, byte *drv_sbp,
2101  byte *trnva_sbp)
2102 {
2103  register int32 bi;
2104  int32 nd_chan_upd;
2105  i_tev_ndx *itevpi;
2106  struct rngdwir_t *dwirp;
2107 
2108  dwirp = np->nu.rngdwir;
2109  itevpi = &(dwirp->wschd_pbtevs[__inum*np->nwid]);
2110  for (bi = 0; bi < np->nwid; bi++)
2111   {
2112    __new_gateval = (word32) drv_sbp[bi];
2113    __old_gateval = (word32) trnva_sbp[bi];
2114 
2115    if (__ev_tracing) nd_chan_upd = evtr_schd_1bitpthdrvr(np, bi, itevpi);
2116    else nd_chan_upd = schd_1bitpthdrvr(np, bi, itevpi);
2117 
2118    if (nd_chan_upd)
2119     {
2120      /* update stren tran driver and re-eval and store channel new value */
2121      trnva_sbp[bi] = (byte) __new_gateval;
2122      __eval_tran_1bit(np, bi);
2123     }
2124   }
2125 }
2126 
2127 /*
2128  * schedule non stren inout channel path dest. hard drivers and channel
2129  * some bits may not have changed or need immediate assign and chan update
2130  */
schd_bidpthdrvrs(struct net_t * np,struct xstk_t * drv_xsp,struct xstk_t * trnva_xsp)2131 static void schd_bidpthdrvrs(struct net_t *np, struct xstk_t *drv_xsp,
2132  struct xstk_t *trnva_xsp)
2133 {
2134  register int32 bi;
2135  register word32 tmp;
2136  int32 nd_chan_upd;
2137  i_tev_ndx *itevpi;
2138  struct rngdwir_t *dwirp;
2139 
2140  dwirp = np->nu.rngdwir;
2141  itevpi = &(dwirp->wschd_pbtevs[__inum*np->nwid]);
2142  for (bi = 0; bi < np->nwid; bi++)
2143   {
2144    tmp = rhsbsel_(drv_xsp->bp, bi);
2145    __new_gateval = (tmp << 1) | (rhsbsel_(drv_xsp->ap, bi));
2146    tmp = rhsbsel_(trnva_xsp->bp, bi);
2147    __old_gateval = (tmp << 1) | (rhsbsel_(trnva_xsp->ap, bi));
2148 
2149    if (__ev_tracing) nd_chan_upd = evtr_schd_1bitpthdrvr(np, bi, itevpi);
2150    else nd_chan_upd = schd_1bitpthdrvr(np, bi, itevpi);
2151 
2152    if (nd_chan_upd)
2153     {
2154      /* update stren tran driver and re-eval and store channel new value */
2155      __lhsbsel(trnva_xsp->ap, bi, (__new_gateval & 1L));
2156      __lhsbsel(trnva_xsp->bp, bi, ((__new_gateval >> 1) & 1L));
2157      __st_perinst_val(np->ntraux->trnva, np->nwid, trnva_xsp->ap,
2158       trnva_xsp->bp);
2159      __eval_tran_1bit(np, bi);
2160     }
2161   }
2162 }
2163 
2164 /*
2165  * schedule 1 bit path bidirect driver
2166  *
2167  * show cancel e analysis including non inout path distributed delay
2168  *
2169  * this works for both strength 8 bit nval and oval and non strength
2170  * if scalar bi must be 0 (i.e. biti can not be -1)
2171  * nval is new value to schedule change to, old value is current wire value
2172  *
2173  * old and new gate values in globals - maybe changed since caller saves
2174  */
schd_1bitpthdrvr(struct net_t * np,register int32 biti,i_tev_ndx * itevpi)2175 static int32 schd_1bitpthdrvr(struct net_t *np, register int32 biti,
2176  i_tev_ndx *itevpi)
2177 {
2178  register word32 nval, oval;
2179  word32 is_stren;
2180  word64 schtim;
2181  i_tev_ndx tevpi;
2182  struct rngdwir_t *dwirp;
2183  struct pthdst_t *pdp;
2184  struct tev_t *tevp;
2185 
2186  dwirp = np->nu.rngdwir;
2187  tevpi = itevpi[biti];
2188 
2189  /* DBG remove ---
2190  if (tevpi != -1 && __tevtab[tevpi].tetyp != TE_BIDPATH)
2191   __misc_terr(__FILE__, __LINE__);
2192  --- */
2193 
2194  nval = __new_gateval;
2195  oval = __old_gateval;
2196 
2197  /* since always use last changed value, if last same as current */
2198  /* because gate style glitch nothing to do since already right value */
2199  if (tevpi == -1 && nval == oval)
2200   return(FALSE);
2201 
2202  is_stren = np->n_stren;
2203 
2204  /* possible for some bits to not be path destinations */
2205  if (oval != nval)
2206   {
2207    if ((pdp = __get_path_del(dwirp, biti, &schtim)) == NULL)
2208     return(TRUE);
2209   }
2210  else { pdp = NULL; schtim = 0ULL; }
2211 
2212  /* special case 0 - distributed delay longer - immediate assign */
2213  /* normal cause is path (probably from multiple input final driving gate) */
2214  /* that has not path delay on it - this may be ok */
2215  if (pdp != NULL && schtim <= __simtime)
2216   {
2217    /* problem with modeling - distributed delay longer than path */
2218    if (!__no_informs) __emit_path_distinform(np, pdp, &__pdmindel);
2219 
2220    /* modeling anomally style spike possible - know immed. assign earlier */
2221    if (tevpi != -1)
2222     {
2223      /* calls here take ptr not index */
2224      tevp = &(__tevtab[tevpi]);
2225      if (__warn_cancel_e && !__no_warns && !__em_suppr(592))
2226       {
2227        __emit_path_pulsewarn(pdp, tevp, &__simtime, &(tevp->etime),
2228         "distributed longer or no path delay", is_stren);
2229       }
2230      /* always cancel pending */
2231      __cancel_1wev(tevp);
2232      itevpi[biti] = -1;
2233 
2234      /* this is same for on detect and on event */
2235      if (__show_cancel_e)
2236       {
2237        /* this is special case where immediate assign must be to x */
2238        /* and cancel future event that can be scheduled for now */
2239 set_on_detect_x:
2240        /* set global causes use in tran channel re-eval */
2241        if (is_stren) __new_gateval = (word32) ST_STRONGX;
2242        else __new_gateval = (word32) 3;
2243        return(TRUE);
2244       }
2245      /* if no show canceled e, just assign later */
2246     }
2247    /* no schedule, distributed longer - global new gateval right and used */
2248    return(TRUE);
2249   }
2250 
2251  /* no pending event */
2252  /* SJM 11/24/00 - know if no pending event will have path */
2253  if (tevpi == -1)
2254   {
2255    /* because no pending event must be different */
2256    __schedule_1wev(np, biti, TE_BIDPATH, __pdmindel, schtim, nval,
2257     itevpi, FALSE);
2258    return(FALSE);
2259   }
2260  /* pending event */
2261  tevp = &(__tevtab[tevpi]);
2262  /* new and old same but know scheduled different - classic pulse/glitch */
2263  if (nval == oval)
2264   {
2265    /* have delay to use to select pa0th */
2266    if (__warn_cancel_e && !__no_warns && !__em_suppr(592))
2267     {
2268      __emit_path_samewarn(np, biti, tevp, &(tevp->etime), "pulse",
2269       is_stren);
2270     }
2271 
2272    /* if spike, suppress future but schedule to x at currently scheduled */
2273    if (__show_cancel_e)
2274     {
2275      if (__showe_onevent)
2276       { tevp->outv = (is_stren) ? ST_STRONGX : 3; return(FALSE); }
2277 
2278      __cancel_1wev(tevp);
2279      itevpi[biti] = -1;
2280      goto set_on_detect_x;
2281     }
2282    /* remove pulse */
2283    __cancel_1wev(tevp);
2284    itevpi[biti] = -1;
2285    return(FALSE);
2286   }
2287 
2288  /* now know pdp set */
2289 
2290  /* new schedule to same value case */
2291  /* here delay can be different because different path selected */
2292  /* and maybe other reasons */
2293  /* done silently here - trace message only below */
2294  if (tevp->outv == (byte) nval) return(FALSE);
2295 
2296  /* inertial reschedule */
2297  if (__warn_cancel_e && !__no_warns && !__em_suppr(592))
2298   __emit_path_pulsewarn(pdp, tevp, &(tevp->etime), &schtim,
2299    "inout unstable", is_stren);
2300 
2301  /* easy show cancel (set to x case) - no new event may or may not switch */
2302  if (__show_cancel_e)
2303   {
2304    /* LOOKATME - maybe need to check old tevp and new schd time and if 2nd */
2305    /* input change results in earlier edge cancel and schedule earlier */
2306    if (__showe_onevent)
2307     { tevp->outv = (is_stren) ? ST_STRONGX : 3; return(FALSE); }
2308 
2309    __cancel_1wev(tevp);
2310    itevpi[biti] = -1;
2311    goto set_on_detect_x;
2312   }
2313  /* inertial reschedule, this handles cancel if needed */
2314  __reschedule_1wev(tevpi, nval, __pdmindel, schtim, itevpi);
2315  return(FALSE);
2316 }
2317 
2318 /*
2319  * tracing version of schedule stren 1 bit inout channel path dest drivers
2320  *
2321  * show cancel e analysis including non inout path distributed delay
2322  *
2323  * this works for both strength 8 bit nval and oval and non strength
2324  * if scalar bi must be 0 (i.e. biti can not be -1)
2325  * nval is new value to schedule change to, old value is current wire value
2326  *
2327  * old and new gate values in globals - maybe changed since caller saves
2328  */
evtr_schd_1bitpthdrvr(struct net_t * np,register int32 biti,i_tev_ndx * itevpi)2329 static int32 evtr_schd_1bitpthdrvr(struct net_t *np, register int32 biti,
2330  i_tev_ndx *itevpi)
2331 {
2332  register word32 nval, oval;
2333  i_tev_ndx tevpi;
2334  word32 is_stren;
2335  word32 outval;
2336  word64 schtim, distdel, tevptim;
2337  struct rngdwir_t *dwirp;
2338  struct pthdst_t *pdp;
2339  struct spcpth_t *pthp;
2340  struct tev_t *tevp;
2341  char s1[RECLEN], s2[RECLEN], vs1[10], vs2[10], vs3[10];
2342 
2343  is_stren = np->n_stren;
2344  dwirp = np->nu.rngdwir;
2345  tevpi = itevpi[biti];
2346  nval = __new_gateval;
2347  oval = __old_gateval;
2348 
2349  if (tevpi != -1)
2350   {
2351    /* DBG remove --- */
2352    if (__tevtab[tevpi].tetyp != TE_BIDPATH) __misc_terr(__FILE__, __LINE__);
2353    /* --- */
2354    strcpy(s1, " (pending event)");
2355   }
2356  else strcpy(s1, "");
2357 
2358  /* if no change and do not need schedule time for cancel, done */
2359  __tr_msg("-- path delay inout destination %s driver change%s now %s:\n",
2360   __to_evtrwnam(__xs, np, biti, biti, __inst_ptr), s1,
2361   __to_timstr(__xs2, &__simtime));
2362 
2363  /* since always use last changed value, if last same as current */
2364  /* because gate style glitch nothing to do since already right value */
2365  if (tevpi == -1 && nval == oval)
2366   {
2367    __tr_msg(" PATHDEL, NOCHG <OV=%s> at %s\n", __to_vnam(vs1, is_stren, nval),
2368     __to_timstr(__xs, &__simtime));
2369    return(FALSE);
2370   }
2371 
2372  /* possible for some bits to not be path desitinations - just immed assign */
2373  if (nval != oval)
2374   {
2375    if ((pdp = __get_path_del(dwirp, biti, &schtim)) == NULL)
2376     {
2377      __tr_msg(" BIT %d NOT PATH DEST: IMMED ASSIGN <OV=%s, NV=%s>\n",
2378       biti,__to_vnam(vs1, is_stren, oval), __to_vnam(vs2, is_stren, nval));
2379      return(TRUE);
2380     }
2381    pthp = pdp->pstchgp->chgu.chgpthp;
2382    __tr_msg(" PATH (at line %s) SRC CHG TIME %s\n",
2383     __bld_lineloc(s1, pthp->pthsym->syfnam_ind, pthp->pthsym->sylin_cnt),
2384     __to_timstr(__xs, &__pdlatechgtim));
2385   }
2386  else { pdp = NULL; schtim = 0ULL; }
2387 
2388  /* special case 0 - distributed delay longer - immediate assign */
2389  if (schtim <= __simtime)
2390   {
2391    /* problem with modeling - distributed delay longer than path */
2392    /* or changed path has no path delay */
2393    if (!__no_informs && pdp != NULL)
2394      __emit_path_distinform(np, pdp, &__pdmindel);
2395 
2396    /* modeling anomally style spike possible - know immed. assign earlier */
2397    if (tevpi != -1)
2398     {
2399      /* most routines here need ptr to event not index */
2400      tevp = &(__tevtab[tevpi]);
2401      if (__warn_cancel_e && !__no_warns && !__em_suppr(592) && pdp != NULL)
2402       __emit_path_pulsewarn(pdp, tevp, &__simtime, &(tevp->etime),
2403        "distributed longer or no path delay", is_stren);
2404 
2405      /* always cancel pending */
2406      outval = (word32) tevp->outv;
2407      tevptim = tevp->etime;
2408      /* always cancel pending */
2409      __cancel_1wev(tevp);
2410      itevpi[biti] = -1;
2411 
2412      /* this is same for on detect and on event since immed. assign */
2413      if (__show_cancel_e)
2414       {
2415        /* this is special case where immediate assign must be to x */
2416        /* and cancel future event that can be scheduled for now */
2417        __tr_msg(
2418         " INOUT PATH, DIST DELAY PULSE <OV=%s, OSV=%s at %s NV=%s SHOWING X FROM NOW>\n",
2419         __to_vnam(vs1, is_stren, oval), __to_vnam(vs2, is_stren, outval),
2420         __to_timstr(s1, &tevptim), __to_vnam(vs3, is_stren, nval));
2421 set_on_detect_x:
2422        if (is_stren) __new_gateval = (word32) ST_STRONGX;
2423        else __new_gateval = (word32) 3;
2424        return(TRUE);
2425       }
2426      __tr_msg(
2427       " INOUT PATH, DIST DELAY PULSE <OV=%s, OSV=%s at %s - NV=%s ASSIGN AND CANCEL>\n",
2428       __to_vnam(vs1, is_stren, oval), __to_vnam(vs2, is_stren,
2429       (word32) tevp->outv), __to_timstr(s1, &(tevp->etime)),
2430       __to_vnam(vs3, is_stren, nval));
2431      /* no schedule, distributed delay longer - new gate val used for chan. */
2432      return(TRUE);
2433     }
2434 
2435    /* know if no pending event, pdp not nil */
2436    /* no pending event store - know must be different */
2437    distdel = __simtime - __pdlatechgtim;
2438    __tr_msg(
2439     " DIST DELAY %s LONGER THAN INOUT PATH %s: IMMED ASSIGN <OV=%s, NV=%s>\n",
2440     __to_timstr(__xs2, &distdel), __to_timstr(s1, &__pdmindel),
2441     __to_vnam(vs1, is_stren, oval), __to_vnam(vs2, is_stren, nval));
2442    /* use new gateval in tran channel eval */
2443    return(TRUE);
2444   }
2445 
2446  /* real path delay */
2447  /* case 1: no pending event */
2448  /* know if no pending event, pdp not nil */
2449  if (tevpi == -1)
2450   {
2451    /* because no pending event must be different */
2452    __tr_msg(" PATH DEL, SCHD AT %s <OV=%s, NSV=%s>\n",
2453     __to_timstr(s1, &schtim), __to_vnam(vs1, is_stren, oval),
2454     __to_vnam(vs2, is_stren, nval));
2455    __schedule_1wev(np, biti, TE_BIDPATH, __pdmindel, schtim, nval,
2456     itevpi, FALSE);
2457    return(FALSE);
2458   }
2459 
2460  /* pending event */
2461  tevp = &(__tevtab[tevpi]);
2462  /* new and old same but know scheduled different - classic pulse/glitch */
2463  if (nval == oval)
2464   {
2465    /* preform show cancel e analysis, know scheduled different - tell user */
2466    /* this is classical spike analysis */
2467    if (__warn_cancel_e && !__no_warns && !__em_suppr(592))
2468     {
2469      __emit_path_samewarn(np, biti, tevp, &(tevp->etime), "pulse",
2470       is_stren);
2471     }
2472 
2473    /* if spike, suppress future but schedule to x at currently scheduled */
2474    if (__show_cancel_e)
2475     {
2476      if (__showe_onevent) sprintf(s1, "%s (on event)", __to_timstr(__xs,
2477       &(tevp->etime)));
2478      else sprintf(s1, "%s (on detect)", __to_timstr(__xs, &__simtime));
2479 
2480      /* LOOKATME - think on event pulse should use schedule if earlier? */
2481      __tr_msg(
2482       " INOUT PATH DEL, PEND AT %s, PULSE <OV=NSV=%s, OSV=%s SHOWING X FROM %s>\n",
2483       __to_timstr(__xs, &(tevp->etime)), __to_vnam(vs1, is_stren, oval),
2484       __to_vnam(vs2, is_stren, (word32) tevp->outv), s1);
2485 
2486      if (__showe_onevent)
2487       { tevp->outv = (is_stren) ? ST_STRONGX : 3; return(FALSE); }
2488 
2489      __cancel_1wev(tevp);
2490      itevpi[biti] = -1;
2491      goto set_on_detect_x;
2492     }
2493    /* remove pulse */
2494    __tr_msg(
2495     " INOUT PATH DEL, PEND, PULSE, INERTIAL CANCEL AT %s <OV=%s, OSV=%s>\n",
2496     __to_timstr(s1, &(tevp->etime)), __to_vnam(vs1, is_stren, oval),
2497     __to_vnam(vs2, is_stren, (word32) tevp->outv));
2498    /* no spike, but newly scheduled to same so no event */
2499    __cancel_1wev(tevp);
2500    itevpi[biti] = -1;
2501    return(FALSE);
2502   }
2503 
2504  /* form here on know pdp set */
2505  /* new schedule to same value case */
2506  /* know that delay same and later so just discard new event */
2507  /* done silently here - trace message only */
2508  if (tevp->outv == (byte) nval)
2509   {
2510    __tr_msg(
2511     " INOUT PATH DEL, MODEL ANOMALLY IGNORE SCHED TO SAME <OSV=NSV=%s> OLD AT %s NEW %s\n",
2512     __to_vnam(vs1, is_stren, nval), __to_timstr(s1, &(tevp->etime)),
2513     __to_timstr(s2, &schtim));
2514    return(FALSE);
2515   }
2516 
2517  /* inertial reschedule */
2518  if (__warn_cancel_e && !__no_warns && !__em_suppr(592))
2519   __emit_path_pulsewarn(pdp, tevp, &(tevp->etime), &schtim, "inout unstable",
2520    is_stren);
2521 
2522  /* easy show cancel (set to x case) - no new event may or may not switch */
2523  if (__show_cancel_e)
2524   {
2525    if (__showe_onevent) sprintf(s2, "%s (on event)", __to_timstr(__xs,
2526     &(tevp->etime)));
2527    else sprintf(s2, "%s (on detect)", __to_timstr(__xs, &__simtime));
2528 
2529    __tr_msg(
2530     " INOUT PATH DEL, PEND AT %s, UNSTABLE <OV=%s, OSV=%s, NSV=%s SHOWING X FROM %s>\n",
2531     __to_timstr(s1, &(tevp->etime)), __to_vnam(vs1, is_stren, oval),
2532     __to_vnam(vs2, is_stren, (word32) tevp->outv), __to_vnam(vs3, is_stren,
2533     nval), s2);
2534    if (__showe_onevent)
2535     { tevp->outv = (is_stren) ? ST_STRONGX : 3; return(FALSE); }
2536 
2537    __cancel_1wev(tevp);
2538    itevpi[biti] = -1;
2539    goto set_on_detect_x;
2540   }
2541 
2542  /* inertial reschedule, this handles cancel if needed */
2543  __tr_msg(
2544   " INOUT PATH DEL, PEND, UNSTABLE, INERTIAL RESCHD <OV=%s, OSV=%s AT %s, NSV=%s AT %s>\n",
2545   __to_vnam(vs1, is_stren, oval), __to_vnam(vs2, is_stren, (word32) tevp->outv),
2546   __to_timstr(s1, &(tevp->etime)), __to_vnam(vs3, is_stren, nval),
2547   __to_timstr(s2, &schtim));
2548 
2549  __reschedule_1wev(tevpi, nval, __pdmindel, schtim, itevpi);
2550  return(FALSE);
2551 }
2552 
2553 /*
2554  * ROUTINES FOR BID ONLY ALL SAME WIRE TYPE CHANNELS
2555  */
2556 
2557 /*
2558  * for stren bid only per bit same wire type tran channel
2559  * combine all nodes into one stren value using chan rec list
2560  *
2561  * this accumlates combined channel strength values in __acum_sb
2562  *
2563  * SJM 04/23/01 - changed so always eval from one chan distinguished vtx
2564  * works since no stren reduction or wired nets in channel always
2565  * need to combine all nodes into one value that is then stored everywhere
2566  * this allows removing back edges (i.e. no longer digraph)
2567  */
eval_assign_stbid_chan(struct chanrec_t * chanp)2568 static void eval_assign_stbid_chan(struct chanrec_t *chanp)
2569 {
2570  register struct bidvtxlst_t *bidvtxlp;
2571  register word32 sb2;
2572  register struct net_t *np;
2573  register int32 bi;
2574 
2575  /* first eval all contributors to the one universal new value */
2576  __acum_sb = ST_HIZ;
2577  bidvtxlp = chanp->bid_vtxlp;
2578  for (; bidvtxlp != NULL; bidvtxlp = bidvtxlp->bidvtxnxt)
2579   {
2580    __push_itstk(bidvtxlp->bidvtx_itp);
2581 
2582    np = bidvtxlp->vnp;
2583    bi = bidvtxlp->vi1;
2584    bi = (bi == -1) ? 0 : bi;
2585    /* SJM - 03/15/00 - if wire is forced, use its values as "driving" val */
2586    if (np->frc_assgn_allocated
2587     && np->nu2.qcval[__inum*np->nwid + bi].qc_active)
2588     {
2589      sb2 = np->nva.bp[__inum*np->nwid + bi];
2590     }
2591    else sb2 = np->ntraux->trnva.bp[__inum*np->nwid + bi];
2592    __acum_sb = (word32) __comb_1bitsts(np->ntyp, __acum_sb, sb2);
2593    __pop_itstk();
2594   }
2595 
2596  /* then assign it to every vertex */
2597  bidvtxlp = chanp->bid_vtxlp;
2598  for (; bidvtxlp != NULL; bidvtxlp = bidvtxlp->bidvtxnxt)
2599   {
2600    __push_itstk(bidvtxlp->bidvtx_itp);
2601 
2602    np = bidvtxlp->vnp;
2603    bi = bidvtxlp->vi1;
2604    stassign_1tranbit(np, bi, __acum_sb);
2605    __pop_itstk();
2606   }
2607 }
2608 
2609 /*
2610  * for oon stren bid only per bit same wire type tran channel
2611  * combine all nodes into one stren value using chan rec list
2612  *
2613  * SJM 04/23/01 - changed so always eval from one chan distinguished vtx
2614  * works since no stren reduction or wired nets in channel always
2615  * need to combine all nodes into one value that is then stored everywhere
2616  * this allows removing back edges (i.e. no longer digraph)
2617  */
eval_assign_bid_chan(struct chanrec_t * chanp)2618 static void eval_assign_bid_chan(struct chanrec_t *chanp)
2619 {
2620  register struct bidvtxlst_t *bidvtxlp;
2621  register struct net_t *np;
2622  word32 nav, nbv;
2623  int32 bi, bi2;
2624 
2625  /* initialize to 2 (hiz) */
2626  __acum_a = 0;
2627  __acum_b = 1;
2628 
2629  /* first eval all contributors to the one universal new value */
2630  /* uses global accum - in gcc global access faster */
2631  bidvtxlp = chanp->bid_vtxlp;
2632  for (; bidvtxlp != NULL; bidvtxlp = bidvtxlp->bidvtxnxt)
2633   {
2634    __push_itstk(bidvtxlp->bidvtx_itp);
2635    np = bidvtxlp->vnp;
2636    bi = bidvtxlp->vi1;
2637    bi2 = (bi == -1) ? 0 : bi;
2638    if (np->frc_assgn_allocated
2639     && np->nu2.qcval[__inum*np->nwid + bi2].qc_active)
2640     {
2641      if (bi == -1) ld_scalval_(&nav, &nbv, np->nva.bp);
2642      else __ld_bit(&nav, &nbv, np, bi);
2643     }
2644    else ld_vtx_netbit(&nav, &nbv, np, bi);
2645    __eval_1w_nonstren(&__acum_a, &__acum_b, nav, nbv, np->ntyp);
2646    __pop_itstk();
2647   }
2648 
2649  /* then assign it to every vertex */
2650  bidvtxlp = chanp->bid_vtxlp;
2651  for (; bidvtxlp != NULL; bidvtxlp = bidvtxlp->bidvtxnxt)
2652   {
2653    __push_itstk(bidvtxlp->bidvtx_itp);
2654    np = bidvtxlp->vnp;
2655    bi = bidvtxlp->vi1;
2656    assign_1tranbit(np, bi, __acum_a, __acum_b);
2657    __pop_itstk();
2658   }
2659 }
2660 
2661 /*
2662  * load the bit value of one non strength vertex
2663  * called from itree location of wire
2664  */
ld_vtx_netbit(word32 * ap,word32 * bp,struct net_t * np,int32 bi)2665 static void ld_vtx_netbit(word32 *ap, word32 *bp, struct net_t *np, int32 bi)
2666 {
2667  struct xstk_t *xsp;
2668 
2669  if (!np->n_isavec) ld_scalval_(ap, bp, np->ntraux->trnva.bp);
2670  else
2671   {
2672    /* know bi not -1 since vector */
2673    push_xstk_(xsp, np->nwid);
2674    __ld_perinst_val(xsp->ap, xsp->bp, np->ntraux->trnva, np->nwid);
2675    ap[0] = rhsbsel_(xsp->ap, bi);
2676    bp[0] = rhsbsel_(xsp->bp, bi);
2677    __pop_xstk();
2678   }
2679 }
2680 
2681 /*
2682  * routine to assign 1 non stren bid only channel tran bit
2683  */
assign_1tranbit(struct net_t * np,int32 bi,word32 nav,word32 nbv)2684 static void assign_1tranbit(struct net_t *np, int32 bi, word32 nav, word32 nbv)
2685 {
2686  /* non stren case */
2687  if (bi == -1) bi = 0;
2688  /* tran channel elements in P1364 can not have wire delays */
2689  /* no meaning since drivers of all in channel and wires must be same */
2690 
2691  /* SJM 06/02/99 - will have DWIR and no path if all 0 paths removed */
2692  /* to optimize invalidating internal consistency check */
2693 
2694  /* for path dest., need immediate assign - internal hard driver value */
2695  /* change is delayed */
2696 
2697  /* if wire bit forced (qcval inst non nil), must not assign to wire */
2698  if (np->frc_assgn_allocated
2699   && np->nu2.qcval[__inum*np->nwid + bi].qc_active)
2700   {
2701    /* need not pass bi -1 since checks separately for scalar net */
2702    if (__debug_flg && __ev_tracing) trmsg_frc_inhibit(np, bi);
2703    return;
2704   }
2705 
2706  if (!np->n_isavec) __chg_st_val(np, &nav, &nbv);
2707  else __chg_st_bit(np, bi, nav, nbv);
2708 
2709  if (__debug_flg && __ev_tracing)
2710   {
2711    transtore_trmsg(np, bi, __lhs_changed, nav, nbv);
2712   }
2713  /* SJM 08/24/03 - since record reset must test to see if chged */
2714  if (__lhs_changed) record_sel_nchg_(np, bi, bi);
2715 }
2716 
2717 /*
2718  * store one vertex net bit from passed a and b values
2719  * called from itree location of np definition
2720  */
st_vtx_netbit(struct net_t * np,int32 bi,word32 nav,word32 nbv)2721 static void st_vtx_netbit(struct net_t *np, int32 bi, word32 nav, word32 nbv)
2722 {
2723  /* this does not use lhs changed */
2724  if (np->n_stren) stassign_1tranbit(np, bi, nav);
2725  else assign_1tranbit(np, bi, nav, nbv);
2726 }
2727 
2728 /*
2729  * emit a tran channel force inhibit of tran channel store
2730  */
trmsg_frc_inhibit(struct net_t * np,int32 bi)2731 static void trmsg_frc_inhibit(struct net_t *np, int32 bi)
2732 {
2733  char s1[RECLEN], s2[RECLEN];
2734 
2735  if (np->n_isavec) sprintf(s1, "%s[%d]", np->nsym->synam, bi);
2736  else strcpy(s1, np->nsym->synam);
2737  if (np->n_stren) strcpy(s2, " strength"); else strcpy(s2, "");
2738  __tr_msg("## tran/inout channel store of%s %s inhibited - active force\n",
2739   s2, s1);
2740 }
2741 
2742 /*
2743  * emit a tran channel store trace message
2744  */
transtore_trmsg(struct net_t * np,int32 bi,int32 chg,word32 nav,word32 nbv)2745 static void transtore_trmsg(struct net_t *np, int32 bi, int32 chg, word32 nav,
2746  word32 nbv)
2747 {
2748  char s1[RECLEN], s2[RECLEN], s3[RECLEN], s4[RECLEN];
2749  byte sb2;
2750 
2751  if (np->n_stren)
2752   {
2753    strcpy(s1, " strength");
2754    sb2 = (byte) nav;
2755    __st_regab_tostr(s2, &sb2, 1);
2756   }
2757  else
2758   {
2759    strcpy(s1, "");
2760    __regab_tostr(s2, &nav, &nbv, 1, BHEX, FALSE);
2761   }
2762  if (np->n_isavec) sprintf(s3, "%s[%d]", np->nsym->synam, bi);
2763  else strcpy(s3, np->nsym->synam);
2764  if (chg)
2765   __tr_msg("## tran/inout channel store of%s %s %s NV=%s\n", s1,
2766    __to_wtnam(s4, np), s3, s2);
2767  else
2768   __tr_msg("## tran/inout channel no change of%s %s %s OV=%s\n",
2769    s1, __to_wtnam(s4, np), s3, s2);
2770 }
2771 
2772 /*
2773  * assign one strength tran channel element (wire-bit)
2774  * bi is -1 for scalar else bit index
2775  * handles trireg and any forced assign inhibition
2776  * called from itree loc. of np
2777  *
2778  * routine does not traverse edge graph
2779  */
stassign_1tranbit(struct net_t * np,register int32 bi,register word32 sbv)2780 static void stassign_1tranbit(struct net_t *np, register int32 bi,
2781  register word32 sbv)
2782 {
2783  register byte *sbp2;
2784  register int32 bind;
2785 
2786  bind = (bi == -1) ? 0 : bi;
2787 
2788  /* get strength wire address */
2789  get_stwire_addr_(sbp2, np);
2790 
2791  /* tran channel elements in P1364 can not have wire delays */
2792  /* no meaning since drivers of all in channel and wires must be same */
2793  /* SJM 06/02/99 - will have DWIR and no path if all 0 paths removed */
2794  /* to optimize invalidating internal consistency check */
2795 
2796  /* stren non delay wire */
2797  /* if bit forced, must not assign */
2798  if (np->frc_assgn_allocated
2799   && np->nu2.qcval[__inum*np->nwid + bind].qc_active)
2800    {
2801     if (__debug_flg && __ev_tracing) trmsg_frc_inhibit(np, bi);
2802     return;
2803    }
2804 
2805  /* this may change sbv from tran channel new value to trireg value */
2806  /* SJM 10/16/00 - this was commented out but that caused Samsung trireg */
2807  /* tests to fail - must have had a reason to comment out but why? */
2808  /* --- */
2809  if (np->ntyp == N_TRIREG)
2810   {
2811    if (__wire_init) sbv = (byte) (3 | __cap_to_stren[np->n_capsiz]);
2812    else
2813     {
2814      if (sbv == ST_HIZ)
2815       sbv = (byte) ((sbp2[bind] & 3) | __cap_to_stren[np->n_capsiz]);
2816     }
2817   }
2818  /* --- */
2819  if (__debug_flg && __ev_tracing)
2820   {
2821    int32 chg;
2822 
2823    if (sbv != (word32) sbp2[bind]) chg = TRUE; else chg = FALSE;
2824    transtore_trmsg(np, bi, chg, sbv, sbv);
2825   }
2826  if (sbv != (word32) sbp2[bind])
2827   {
2828    sbp2[bind] = (byte) sbv;
2829    /* notice needs to be bi since need -1 if scalar */
2830    /* always record since immediate assign - no setting of lhs changed */
2831    /* SJM 08/24/03 - and no need to now since this resets anyway */
2832    record_sel_nchg_(np, bi, bi);
2833   }
2834 }
2835 
2836 /*
2837  * ROUTINES FOR BID TRAN STREN CHANNEL (ALSO BID DIFFERENT WIRE TYPES)
2838  */
2839 
2840 /*
2841  * assign or schedule channel when hard driver of transistor enable adjacent
2842  * to this vertex changed
2843  *
2844  * normally pass only argument but when tranif enable changes pass both
2845  * this allows doing only one relaxation
2846  *
2847  * this must handle assign inhibition from forcing and trireg decays
2848  * build the vincincity lists and relax only those vertices
2849  * uses Bryant algorithm
2850  *
2851  * called from itree loc. of start vertex
2852  */
2853 
2854 /*
2855  * eval and asssign locally changed part of tran always stren channel
2856  *
2857  * itee context on it stk
2858  * has trans or bid strength and different wire types
2859  * add this vertex to relaxation list
2860  */
eval_update_1w_tranchan(struct vtx_t * vtxp)2861 static void eval_update_1w_tranchan(struct vtx_t *vtxp)
2862 {
2863  struct vtxlst_t *vtxlp;
2864 
2865  vtxlp = add_stchan_chged_vtx(vtxp, __inst_ptr);
2866  vtxp->vtx_in_vicinity = TRUE;
2867  find_chgvtx_vicinity(vtxlp);
2868 
2869  /* do the relaxation only on perturbed in vicinity vertices */
2870  stchan_trif_relax();
2871 
2872  /* final step, assign (maybe schedule) all changed vertices */
2873  if (__chg_vtxlst_hdr != NULL) assign_chged_vtxs();
2874 }
2875 
2876 /*
2877  * add a changed vertex to st vtx tab in preparation for relaxation
2878  * passed itree context of vertex
2879  *
2880  * this adds the vertex that has changed because hard driver changed, or
2881  * it was forced/released or it is tranif terminal and enable changed
2882  */
add_stchan_chged_vtx(struct vtx_t * vtxp,struct itree_t * vt1_itp)2883 static struct vtxlst_t *add_stchan_chged_vtx(struct vtx_t *vtxp,
2884  struct itree_t *vt1_itp)
2885 {
2886  int32 bi, inum;
2887  word32 vtxval, st0, st1, lowst;
2888  struct net_t *np;
2889  struct vtxlst_t *vtxlp;
2890 
2891  np = vtxp->vnp;
2892  bi = (vtxp->vi1 == -1) ? 0 : vtxp->vi1;
2893  inum = vt1_itp->itinum;
2894 
2895  /* always move new vtx value to old */
2896  /* for trireg and net's with delay (when supported?) net val may differ */
2897  /* NO - only update old value when store new value ---
2898  vtxp->old_vtxval = vtxp->new_vtxval;
2899  --- */
2900 
2901  /* add vtx to change list and solve channel using relaxation */
2902  /* first guess at new (current) is hard driver value unless forced */
2903  if (np->frc_assgn_allocated
2904   && np->nu2.qcval[__inum*np->nwid + bi].qc_active)
2905   {
2906    vtxp->vtx_forced = TRUE;
2907    vtxval = (word32) np->nva.bp[inum*np->nwid + bi];
2908    vtxp->new_vtxval = vtxval;
2909   }
2910  else
2911   {
2912    /* when released tran channel relaxed again - this turns off force */
2913    vtxval = (word32) np->ntraux->trnva.bp[inum*np->nwid + bi];
2914    /* 04/25/01 - SJM - must turn off forcing each time since some releases */
2915    /* may have happened but release not connectd to tran vertices */
2916    vtxp->vtx_forced = FALSE;
2917   }
2918  vtxp->new_vtxval = (byte) vtxval;
2919  if (vtxval == ST_SUPPLY0 || vtxval == ST_SUPPLY1) vtxp->vtx_supply = TRUE;
2920  /* think needed since vpi can drive to supply then remove */
2921  else vtxp->vtx_supply = FALSE;
2922 
2923  /* SJM 04/09/01 - for stren ranges using low - think that will work */
2924  st1 = (vtxval >> 2) & 0x7;
2925  st0 = (vtxval >> 5) & 0x7;
2926  lowst = (st1 < st0) ? st1 : st0;
2927 
2928  /* alloc and link on changed one vertex */
2929  /* because will be large at start then tiny better to malloc/free */
2930  if (__vtxlst_freelst == NULL)
2931   vtxlp = (struct vtxlst_t *) __my_malloc(sizeof(struct vtxlst_t));
2932  else
2933   {
2934    vtxlp = __vtxlst_freelst;
2935    __vtxlst_freelst = __vtxlst_freelst->vtxnxt;
2936   }
2937  vtxlp->vtxp = vtxp;
2938  vtxlp->vtx_itp = vt1_itp;
2939  vtxlp->vtxnxt = NULL;
2940 
2941  /* must always put on end */
2942  if (__stvtxtabend[lowst] != NULL)
2943   { __stvtxtabend[lowst]->vtxnxt = vtxlp; __stvtxtabend[lowst] = vtxlp; }
2944  else __stvtxtab[lowst] = __stvtxtabend[lowst] = vtxlp;
2945 
2946  __num_switch_vtxs_processed++;
2947 
2948  /* DBG remove -- */
2949  if (__debug_flg && __ev_tracing)
2950   {
2951    __dbg_msg("** vertex %s.%s level %d added to incremental perturb list\n",
2952     __msg2_blditree(__xs, vtxlp->vtx_itp), to_vtx(__xs2, vtxp), lowst);
2953   }
2954  return(vtxlp);
2955 }
2956 
2957 /*
2958  * routine to store into nets all changed vertices
2959  */
assign_chged_vtxs(void)2960 static void assign_chged_vtxs(void)
2961 {
2962  register struct vtxlst_t *vtxlp;
2963  register int32 bi, bi2;
2964  register struct vtx_t *vtxp;
2965  word32 sbv;
2966  byte *sbp2;
2967  struct net_t *np;
2968 
2969  for (vtxlp = __chg_vtxlst_hdr; vtxlp != NULL; vtxlp = vtxlp->vtxnxt)
2970   {
2971    __push_itstk(vtxlp->vtx_itp);
2972 
2973    vtxp = vtxlp->vtxp;
2974    np = vtxp->vnp;
2975    bi = vtxp->vi1;
2976    bi2 = (bi == -1) ? 0 : bi;
2977    /* sbp2 of index is addr to store new net value into */
2978    get_stwire_addr_(sbp2, np);
2979    /* sbv is new value to store */
2980    sbv = (word32) vtxp->new_vtxval;
2981    /* for next relax old value is current node val, old val no longer need */
2982    vtxp->old_vtxval = vtxp->new_vtxval;
2983 
2984    /* know if vertex forced will never be on change list */
2985 
2986    /* this may change sbv from tran channel new value to trireg value */
2987    /* SJM 10/16/00 - this was commented out but that caused Samsung trireg */
2988    /* tests to fail - must have had a reason to comment out but why? */
2989    if (np->ntyp == N_TRIREG)
2990     {
2991      if (__wire_init) sbv = (byte) (3 | __cap_to_stren[np->n_capsiz]);
2992      else
2993       {
2994        if (sbv == ST_HIZ)
2995         sbv = (byte) ((sbp2[bi2] & 3) | __cap_to_stren[np->n_capsiz]);
2996       }
2997     }
2998    /* DBG remove --- */
2999    if (__debug_flg && __ev_tracing)
3000     {
3001      int32 chg;
3002 
3003      if (sbv != (word32) sbp2[bi2]) chg = TRUE; else chg = FALSE;
3004      transtore_trmsg(np, bi, chg, sbv, sbv);
3005     }
3006    if (sbv != (word32) sbp2[bi2])
3007     {
3008      sbp2[bi2] = (byte) sbv;
3009 
3010      /* notice needs to be bi since need -1 if scalar */
3011      /* always record since this assign does not set lhs changed */
3012      if (bi == -1) record_nchg_(np);
3013      else record_sel_nchg_(np, bi, bi);
3014     }
3015    vtxlp->vtxp->vtx_chged = FALSE;
3016 
3017    __pop_itstk();
3018   }
3019  /* add entire list to end of free list */
3020  if (__chg_vtxlst_hdr != NULL)
3021   {
3022    __chg_vtxlst_end->vtxnxt =  __vtxlst_freelst;
3023    __vtxlst_freelst = __chg_vtxlst_hdr;
3024   }
3025  __chg_vtxlst_hdr = __chg_vtxlst_end = NULL;
3026 }
3027 
3028 /*
3029  * find all nodes in vicinity of a changed node and add to change list
3030  *
3031  * any node on other side of x/1 conducting edge with lower stren is
3032  * in vicincity (usually undriven nodes) - forced/assigned never in this set
3033  * this gets itree context from passed vtx list
3034  */
find_chgvtx_vicinity(struct vtxlst_t * vtxlp)3035 static void find_chgvtx_vicinity(struct vtxlst_t *vtxlp)
3036 {
3037  register struct edge_t *ep;
3038  int32 bi, bi2, gid;
3039  word32 conducting;
3040  struct vtx_t *vtxp, *vtxp2;
3041  struct vtxlst_t *vtxlp2;
3042  struct gate_t *gp;
3043  struct itree_t *oside_itp;
3044 
3045  vtxp = vtxlp->vtxp;
3046  bi = vtxp->vi1;
3047  if (bi == -1) bi = 0;
3048 
3049  for (ep = vtxp->vedges; ep != NULL; ep = ep->enxt)
3050   {
3051    vtxp2 = ep->ev2;
3052    if (vtxp2->vtx_in_vicinity) continue;
3053 
3054    bi2 = vtxp2->vi1;
3055    if (bi2 == -1) bi2 = 0;
3056 
3057    /* if vicinty node forced - can never be in vicinity, can't change */
3058    if (vtxp2->vtx_forced) continue;
3059 
3060    /* supply can only change if opposite supply across inout */
3061    if (vtxp2->vtx_supply)
3062     {
3063      /* if other side not supply, never can change */
3064      if (!vtxp->vtx_supply) continue;
3065 
3066      /* if edge tran (not port) reducing so can never change */
3067      if (ep->enpp->npntyp != NP_BIDMDPRT && ep->enpp->npntyp != NP_BIDICONN)
3068       continue;
3069      /* if both supplies same (values never changes so good here) can't chg */
3070      if (vtxp->new_vtxval == vtxp2->new_vtxval) continue;
3071     }
3072 
3073    /* SJM 08/26/00 - all xmr/inout edges per inst. so store oside itp */
3074    if (ep->edgoside_itp != NULL) oside_itp = ep->edgoside_itp;
3075    else oside_itp = vtxlp->vtx_itp;
3076 
3077    if (ep->enpp->npntyp == NP_TRAN)
3078     {
3079      gp = ep->enpp->elnpp.egp;
3080      if (gp->g_class == GC_TRANIF)
3081       {
3082        gid = gp->gmsym->el.eprimp->gateid;
3083 
3084        __push_itstk(oside_itp);
3085        conducting = get_switch_tranif_onoff(gp, gid);
3086        __pop_itstk();
3087 
3088        if (__debug_flg && __ev_tracing)
3089         {
3090          char s1[RECLEN];
3091 
3092          if (conducting == 0) strcpy(s1, "*OFF*");
3093          else if (conducting == 1) strcpy(s1, "*ON*");
3094          else strcpy(s1, "*UNKNOWN*");
3095          __dbg_msg( "-- tranif vicinity switch %s at %s conducting %s\n",
3096           gp->gsym->synam, __bld_lineloc(__xs, gp->gsym->syfnam_ind,
3097           gp->gsym->sylin_cnt), s1);
3098         }
3099        /* if off, no contribution - if x, conducting stronger */
3100        if (conducting == 0) continue;
3101       }
3102     }
3103    else gp = NULL;
3104    vtxlp2 = add_stchan_chged_vtx(vtxp2, oside_itp);
3105 
3106    vtxp2->vtx_in_vicinity = TRUE;
3107    /* find vicinity of this node */
3108    find_chgvtx_vicinity(vtxlp2);
3109   }
3110 }
3111 
3112 /*
3113  * routine to solve tran(if) channel by relaxation
3114  *
3115  * st vtxtab filled with changed nodes (hard drivers/stren tranif enable chged)
3116  * works from highest to lowest stren - key is that values can only lessen
3117  * this uses Bryant algorithm
3118  *
3119  * this does not call routines which pushes itree stk can't grow too big
3120  * and gets vtx itree loc from vtx list
3121  */
stchan_trif_relax(void)3122 static void stchan_trif_relax(void)
3123 {
3124  register int32 si;
3125  register struct edge_t *ep;
3126  word32 cur_vtxval, oside_val, st0, st1, lowst, conducting;
3127  int32 bi, bi2, nd_itpop, chged, gid, stable;
3128  struct vtx_t *vtxp, *vtxp2;
3129  struct vtxlst_t *vtxlp, *vtxlp2, *last_vtxlp;
3130  struct gate_t *gp;
3131  struct net_t *np;
3132 
3133  /* DBG remove --
3134  if (__debug_flg && __ev_tracing)
3135   {
3136    __dbg_msg("=== starting channel relaxtion ===\n");
3137   }
3138  --- */
3139 
3140  for (;;)
3141   {
3142    /* process all level from top down */
3143    for (stable = TRUE, si = 7; si >= 0; si--)
3144     {
3145      /* DBG remove --
3146      if (__debug_flg && __ev_tracing) dmp_perturb_list();
3147      -- */
3148 
3149      last_vtxlp = NULL;
3150      /* relax all vertices at level si */
3151      for (vtxlp = __stvtxtab[si]; vtxlp != NULL;)
3152       {
3153        /* always push and start at vtx itree loc */
3154        __push_itstk(vtxlp->vtx_itp);
3155 
3156        /* find new val of this vtx by stren competition with all neighbors */
3157        vtxp = vtxlp->vtxp;
3158        np = vtxp->vnp;
3159        vtxp->vtx_in_vicinity = FALSE;
3160        bi = vtxp->vi1;
3161        if (bi == -1) bi = 0;
3162        /* DBG remove -- */
3163        if (__debug_flg && __ev_tracing)
3164         {
3165          __dbg_msg( "<> relaxing vertex %s.%s at level %d\n",
3166           __msg2_blditree(__xs, __inst_ptr), to_vtx(__xs2, vtxp), si);
3167         }
3168        /* --- */
3169 
3170        /* if this changed node is forced, always wins */
3171        /* but need to eval supply in case inout port opposite also supply */
3172        if (vtxp->vtx_forced)
3173         {
3174          __pop_itstk();
3175          last_vtxlp = vtxlp;
3176          vtxlp = vtxlp->vtxnxt;
3177          continue;
3178         }
3179 
3180        /* first quess is hard driver val for current changed vtx */
3181        cur_vtxval = (word32) vtxp->new_vtxval;
3182        /* add in all contributing edges - only exit is fall thru bottom */
3183        for (chged = FALSE, ep = vtxp->vedges; ep != NULL; ep = ep->enxt)
3184         {
3185          vtxp2 = ep->ev2;
3186 
3187          /* value for competition is most recent other side vtx value */
3188          oside_val = vtxp2->new_vtxval;
3189          /* if value is hiz, skip since know will always lose */
3190          if (oside_val == ST_HIZ) continue;
3191 
3192          bi2 = vtxp2->vi1;
3193          if (bi2 == -1) bi2 = 0;
3194 
3195          if (ep->enpp->npntyp == NP_TRAN)
3196           {
3197            gp = ep->enpp->elnpp.egp;
3198            gid = gp->gmsym->el.eprimp->gateid;
3199 
3200            if (gp->g_class == GC_TRANIF)
3201             {
3202              /* move to itree loc of edge's other side vtx to eval on/off */
3203              /* SJM 08/26/00 - all  edges per inst. so store oside itp*/
3204              if (ep->edgoside_itp != NULL)
3205               { __push_itstk(ep->edgoside_itp); nd_itpop = TRUE; }
3206              else nd_itpop = FALSE;
3207 
3208              /* compute conducting state if tranif and reduce if tran/tranif */
3209              /* if conducting x/z, other val stren H/L */
3210              conducting = try_reduce_tranif_stren(&oside_val, gp);
3211 
3212              if (nd_itpop) __pop_itstk();
3213 
3214              /* if tranif not conducting,  no contribution for other side */
3215              if (conducting == 0) continue;
3216             }
3217            /* tran just reduces stren, always on - itree cntxt not needed */
3218            else try_reduce_tran_stren(&oside_val, gid);
3219           }
3220          /* if inout, oside value is unchanged */
3221 
3222          /* if cur val exactly the same as oside no need for stren */
3223          /* competition since know result will be same */
3224          if (cur_vtxval != oside_val)
3225           {
3226            /* do stren competition with vtx being determined and oside value */
3227            /* oside value may have been corrected if edge stren reducing */
3228            cur_vtxval = (word32) __comb_1bitsts(np->ntyp, cur_vtxval, oside_val);
3229 
3230            /* if cur (new) value changed (oside at last partially won), */
3231            /* need to combine to produce actual cur (new) val */
3232            if (cur_vtxval != vtxp->new_vtxval)
3233             {
3234              /* combine the cur (latest) val with last relax latest */
3235              /* this handles stren ranges */
3236              cur_vtxval = (word32) __comb_1bitsts(np->ntyp, cur_vtxval,
3237               vtxp->new_vtxval);
3238              /* use cur (new) as vtx val from now on, rest can all lose */
3239              vtxp->new_vtxval = cur_vtxval;
3240 
3241              /* chg cur to new imoproved val that was assigned to vtx val */
3242              /* LOOKATME - can this happen - think yes but rare */
3243              if (cur_vtxval == ST_SUPPLY0 || cur_vtxval == ST_SUPPLY1)
3244               vtxp->vtx_supply = TRUE;
3245 
3246              /* relax value changes so must relax again */
3247              chged = TRUE;
3248             }
3249           }
3250          /* if oside same, cur vtx val does not change */
3251         }
3252        /* now have new relaxed value - record and move if needed */
3253 
3254        /* SJM 04/23/01 - during wire init do not record - must store */
3255        /* all internal tran chan new values into nets */
3256        /* old may not be same as net's if net pdst or del so scheduled */
3257        if (cur_vtxval != vtxp->old_vtxval && !vtxp->vtx_chged)
3258         {
3259          add_to_chg_vtx_list(vtxlp, si);
3260         }
3261 
3262        /* independent of whether new changed value differs from old relax */
3263        /* entry value, if changed must move to new correct stren tab index */
3264        if (chged)
3265         {
3266          stable = FALSE;
3267          /* DBG remove -- */
3268          if (__debug_flg && __ev_tracing)
3269           {
3270            char s1[RECLEN];
3271 
3272            __dbg_msg("<> vertex %s.%s at level %d changed to %s\n",
3273             __msg2_blditree(__xs, vtxlp->vtx_itp), to_vtx(__xs2, vtxp), si,
3274             __to_vvstnam(s1, cur_vtxval));
3275           }
3276          /* --- */
3277 
3278          /* put on new higher (probably) st list - need to relax list again */
3279          /* can be lower if tranif disabled */
3280          st1 = (cur_vtxval >> 2) & 0x7;
3281          st0 = (cur_vtxval >> 5) & 0x7;
3282          lowst = (st1 < st0) ? st1 : st0;
3283 
3284          /* this can be same stren as previous but stren range or new val */
3285          /* low st will usually be higher but if tranif off can be lower */
3286          if (lowst != si)
3287           {
3288            vtxlp2 = vtxlp->vtxnxt;
3289 
3290            /* link out and put on end of other list */
3291            if (last_vtxlp == NULL) __stvtxtab[si] = vtxlp->vtxnxt;
3292            else last_vtxlp->vtxnxt = vtxlp->vtxnxt;
3293            if (vtxlp == __stvtxtabend[si]) __stvtxtabend[si] = last_vtxlp;
3294 
3295            /* linked out is now end of higher list */
3296            vtxlp->vtxnxt = NULL;
3297 
3298            /* since vtxlp has been removed from prev list do not need new */
3299            /* alloc, can just link onto end of higher stren list */
3300            if (__stvtxtabend[lowst] != NULL)
3301             {
3302              __stvtxtabend[lowst]->vtxnxt = vtxlp;
3303              __stvtxtabend[lowst] = vtxlp;
3304             }
3305            else __stvtxtab[lowst] = __stvtxtabend[lowst] = vtxlp;
3306 
3307            /* this will make move to right next one work - no chg of last */
3308            __pop_itstk();
3309            vtxlp = vtxlp2;
3310            continue;
3311           }
3312          /* if stren same leave in list */
3313         }
3314        __pop_itstk();
3315        last_vtxlp = vtxlp;
3316        vtxlp = vtxlp->vtxnxt;
3317       }
3318     }
3319    if (stable) break;
3320   }
3321  /* final step free all st vtx tab level lists */
3322  for (si = 7; si >=0; si--)
3323   {
3324    /* finished with last level can free entire perturbed vtx list */
3325    /* i.e. all nodes of higher strength now known */
3326    if (__stvtxtab[si] != NULL)
3327     {
3328      __stvtxtabend[si]->vtxnxt = __vtxlst_freelst;
3329      __vtxlst_freelst = __stvtxtab[si];
3330      __stvtxtab[si] = __stvtxtabend[si] = NULL;
3331     }
3332   }
3333  /* DBG remove --
3334  if (__debug_flg && __ev_tracing)
3335   {
3336    __dbg_msg("=== relaxation completed - channel stable ===\n");
3337   }
3338  --- */
3339 }
3340 
3341 /*
3342  * add vertxx to chg vtx list
3343  * put on store when done list - only called if not already on list
3344  * does not need itree context
3345  */
add_to_chg_vtx_list(struct vtxlst_t * vtxlp,int32 si)3346 static void add_to_chg_vtx_list(struct vtxlst_t *vtxlp, int32 si)
3347 {
3348  struct vtx_t *vtxp;
3349  struct vtxlst_t *chg_vtxlp;
3350 
3351  vtxp = vtxlp->vtxp;
3352  if (__vtxlst_freelst == NULL)
3353   chg_vtxlp = (struct vtxlst_t *) __my_malloc(sizeof(struct vtxlst_t));
3354  else
3355   {
3356    chg_vtxlp = __vtxlst_freelst;
3357    __vtxlst_freelst = __vtxlst_freelst->vtxnxt;
3358   }
3359  *chg_vtxlp = *vtxlp;
3360  chg_vtxlp->vtxnxt = NULL;
3361 
3362  if (__chg_vtxlst_end == NULL) __chg_vtxlst_hdr = __chg_vtxlst_end = chg_vtxlp;
3363  else
3364   {
3365    __chg_vtxlst_end->vtxnxt = chg_vtxlp;
3366    __chg_vtxlst_end = chg_vtxlp;
3367   }
3368  vtxp->vtx_chged = TRUE;
3369  /* DBG remove -- */
3370  if (__debug_flg && __ev_tracing)
3371   {
3372    __dbg_msg("++ vertex %s.%s level %d added to store net change list\n",
3373     __msg2_blditree(__xs, vtxlp->vtx_itp), to_vtx(__xs2, vtxp), si);
3374   }
3375  /* --- */
3376 }
3377 
3378 /*
3379  * dump perturbed queue (element for each stren level)
3380  */
dmp_perturb_list(void)3381 static void dmp_perturb_list(void)
3382 {
3383  register int32 si;
3384  register struct vtxlst_t *vtxlp;
3385  char s1[RECLEN];
3386 
3387  for (si = 7; si >= 0; si--)
3388   {
3389    if (__stvtxtab[si] == NULL) __tr_msg(" Level %d **empty**\n", si);
3390    else
3391     {
3392      __dbg_msg(" Level %d:\n", si);
3393      for (vtxlp = __stvtxtab[si]; vtxlp != NULL; vtxlp = vtxlp->vtxnxt)
3394       {
3395        __dbg_msg("  %s\n", to_vtx_info(s1, vtxlp->vtxp, vtxlp->vtx_itp));
3396        /* DBG remove -- */
3397        if (vtxlp->vtxnxt == NULL)
3398         {
3399          if (vtxlp != __stvtxtabend[si]) __misc_terr(__FILE__, __LINE__);
3400         }
3401        /* --- */
3402       }
3403     }
3404   }
3405 }
3406 
3407 /*
3408  * dump all vertices in switch channel
3409  */
to_vtx_info(char * s,struct vtx_t * vtxp,struct itree_t * itp)3410 static char *to_vtx_info(char *s, struct vtx_t *vtxp, struct itree_t *itp)
3411 {
3412  char s1[RECLEN], s2[RECLEN];
3413 
3414  sprintf(s,
3415   "  vertex %s.%s new=%s old=%s chg=%d, vicinity=%d, frc=%d, sup=%d",
3416   __msg2_blditree(__xs, itp), to_vtx(__xs2, vtxp),
3417   __to_vvstnam(s1, vtxp->new_vtxval), __to_vvstnam(s2, vtxp->old_vtxval),
3418   vtxp->vtx_chged, vtxp->vtx_in_vicinity, vtxp->vtx_forced, vtxp->vtx_supply);
3419  return(s);
3420 }
3421 
3422 /*
3423  * compute conducting state and oside value for tranif gate
3424  *
3425  * returns conducting value if not 0 sets oside_val to reduced val
3426  * SJM 04/23/01 - also if tranif and conducting x/z (3), value is H or L
3427  */
try_reduce_tranif_stren(word32 * oside_val,struct gate_t * gp)3428 static int32 try_reduce_tranif_stren(word32 *oside_val, struct gate_t *gp)
3429 {
3430  int32 conducting, is_resist, gid;
3431  word32 sb2, sb3, st1, st0;
3432 
3433  gid = gp->gmsym->el.eprimp->gateid;
3434  conducting = get_switch_tranif_onoff(gp, gid);
3435  if (__debug_flg && __ev_tracing)
3436   {
3437    char s1[RECLEN];
3438 
3439    if (conducting == 0) strcpy(s1, "*OFF*");
3440    else if (conducting == 1) strcpy(s1, "*ON*");
3441    else strcpy(s1, "*UNKNOWN*");
3442    __dbg_msg("-- tranif switch %s at %s conducting %s\n",
3443     gp->gsym->synam, __bld_lineloc(__xs, gp->gsym->syfnam_ind,
3444     gp->gsym->sylin_cnt), s1);
3445   }
3446  /* if off, no contribution */
3447  if (conducting == 0) return(0);
3448 
3449  /* LOOKATME - maybe should use g_gone or other bit - but adds whole word32 */
3450  if (gid == G_RTRANIF0 || gid == G_RTRANIF1) is_resist = TRUE;
3451  else is_resist = FALSE;
3452 
3453  sb2 = *oside_val;
3454  /* reduce stren for tran and rtran - if conducting off, won't get here */
3455  if (is_resist)
3456   {
3457    /* SJM 04/23/01 - had stren backward st0 is high bit 5-7, st1 2-4 */
3458    st0 = __rmos_stmap[(sb2 >> 5) & 0x7];
3459    st1 = __rmos_stmap[(sb2 >> 2) & 0x7];
3460   }
3461  else
3462   {
3463    st0 = __mos_stmap[(sb2 >> 5) & 0x7];
3464    st1 = __mos_stmap[(sb2 >> 2) & 0x7];
3465   }
3466 
3467  /* SJM 12/04/00 - fixed typo where 0 stren was mixed with 1 stren */
3468  /* so reduction was wrong */
3469  sb3 = sb2 & 3;
3470  if (conducting == 3)
3471   {
3472    /* L */
3473    /* SJM 04/23/01 - had stren backward st0 is high bit 5-7, st1 2-4 */
3474    if (sb3 == 0) *oside_val = (st0 << 5) | 3;
3475    /* H */
3476    else if (sb3 == 1) *oside_val = (st1 << 2) | 3;
3477    /* LOOKATME - think is z won't get here so only possible is x */
3478    else *oside_val = sb3 | (st1 << 2) | (st0 << 5);
3479    return(3);
3480   }
3481  *oside_val = sb3 | (st1 << 2) | (st0 << 5);
3482  return(conducting);
3483 }
3484 
3485 /*
3486  * compute conducting state and oside value for tran
3487  * here conducting state always implied 1
3488  * notice this does not need itree context
3489  *
3490  * returns conducting value if not 0 sets oside_val to reduced val
3491  * SJM 04/23/01 - also if tranif and conducting x/z (3), value is H or L
3492  */
try_reduce_tran_stren(word32 * oside_val,int32 gid)3493 static void try_reduce_tran_stren(word32 *oside_val, int32 gid)
3494 {
3495  int32 is_resist;
3496  word32 sb2, st1, st0;
3497 
3498  /* LOOKATME - maybe should use g_gone or other bit - but adds whole word32 */
3499  if (gid == G_RTRAN) is_resist = TRUE; else is_resist = FALSE;
3500 
3501  sb2 = *oside_val;
3502  /* reduce stren for tran and rtran - if conducting off, won't get here */
3503  if (is_resist)
3504   {
3505    /* SJM 04/23/01 - had stren backward st0 is high bit 5-7, st1 2-4 */
3506    st0 = __rmos_stmap[(sb2 >> 5) & 0x7];
3507    st1 = __rmos_stmap[(sb2 >> 2) & 0x7];
3508   }
3509  else
3510   {
3511    st0 = __mos_stmap[(sb2 >> 5) & 0x7];
3512    st1 = __mos_stmap[(sb2 >> 2) & 0x7];
3513   }
3514  *oside_val = (sb2 & 3) | (st0 << 5) | (st1 << 2);
3515 }
3516 
3517 /*
3518  * routine to access tranif on/off during switch channel evaluation
3519  * called with itree context of tranif gate
3520  *
3521  * since vertex changes of tran switch channels not made and propagated until
3522  * channel completed, if channel in any switch channel must use vtx value
3523  *
3524  * LOOKATME - could use state unless in same switch channel
3525  */
get_switch_tranif_onoff(struct gate_t * gp,int32 gid)3526 static int32 get_switch_tranif_onoff(struct gate_t *gp, int32 gid)
3527 {
3528  register struct expr_t *termxp;
3529  register struct net_t *np2;
3530  register int32 val, bi;
3531  int32 ibi;
3532  struct vbinfo_t *vbip;
3533  struct chanrec_t *chanp;
3534 
3535  termxp = gp->gpins[2];
3536  if (termxp->optyp == ID || termxp->optyp == GLBREF)
3537   np2 = termxp->lu.sy->el.enp;
3538  else if (termxp->optyp == LSB) np2 = termxp->lu.x->lu.sy->el.enp;
3539  else goto no_traux;
3540 
3541  if (np2->ntraux == NULL) goto no_traux;
3542 
3543  switch ((byte) termxp->optyp) {
3544   case ID:
3545    ibi = np2->nwid*__inum;
3546    if ((vbip = np2->ntraux->vbitchans[ibi]) == NULL) goto no_traux;
3547    chanp = &(__chantab[vbip->chan_id]);
3548    /* SJM 10/29/01 - need to access var value for inout chans, no vtx state */
3549    if (chanp->chan_no_vtxs) val = get_bidchan_val(chanp, np2, ibi, 0);
3550    else val = (int32) vbip->vivxp->new_vtxval;
3551    break;
3552   case GLBREF:
3553    __xmrpush_refgrp_to_targ(termxp->ru.grp);
3554    ibi = np2->nwid*__inum;
3555    if ((vbip = np2->ntraux->vbitchans[ibi]) == NULL)
3556     { __pop_itstk(); goto no_traux; }
3557 
3558    chanp = &(__chantab[vbip->chan_id]);
3559    if (chanp->chan_no_vtxs) val = get_bidchan_val(chanp, np2, ibi, 0);
3560    else val = (int32) vbip->vivxp->new_vtxval;
3561    __pop_itstk();
3562    break;
3563   case LSB:
3564    /* BEWARE - this assume constant select expr folded */
3565    if (termxp->ru.x->optyp != NUMBER) goto no_traux;
3566    bi = __get_const_bselndx(termxp);
3567    if (termxp->lu.x->optyp == ID)
3568     {
3569      ibi = np2->nwid*__inum;
3570      if ((vbip = np2->ntraux->vbitchans[ibi + bi]) == NULL) goto no_traux;
3571      chanp = &(__chantab[vbip->chan_id]);
3572      if (chanp->chan_no_vtxs) val = get_bidchan_val(chanp, np2, ibi, bi);
3573      else val = (int32) vbip->vivxp->new_vtxval;
3574      break;
3575     }
3576    __xmrpush_refgrp_to_targ(termxp->lu.x->ru.grp);
3577    ibi = np2->nwid*__inum;
3578    if ((vbip = np2->ntraux->vbitchans[ibi + bi]) == NULL)
3579     { __pop_itstk(); goto no_traux; }
3580    chanp = &(__chantab[vbip->chan_id]);
3581    if (chanp->chan_no_vtxs) val = get_bidchan_val(chanp, np2, ibi, bi);
3582    else val = (int32) vbip->vivxp->new_vtxval;
3583    __pop_itstk();
3584    break;
3585   default: goto no_traux;
3586  }
3587  val &= 3;
3588  if (val == 2 || val == 3) return(3);
3589  /* SJM 04/20/01 - for tranif0 gates, conducting 1 is vtx value 0 */
3590  if (gid == G_RTRANIF0 || gid == G_TRANIF0)
3591   {
3592    val = (val == 0) ? 1 : 0;
3593   }
3594  return(val);
3595 
3596 no_traux:
3597  /* notice state is stored as conducting after correct for tranif0 gates */
3598  return((int32) get_tranif_onoff_(gp));
3599 }
3600 
3601 /*
3602  * get bid non vertex channel value - i.e. the net value
3603  * since that determines on off state
3604  *
3605  * SJM 10/29/01 - need to access var value for inout chans, no vtx state
3606  */
get_bidchan_val(struct chanrec_t * chanp,register struct net_t * np,int32 ibi,int32 bi)3607 static int32 get_bidchan_val(struct chanrec_t *chanp, register struct net_t *np,
3608  int32 ibi, int32 bi)
3609 {
3610  register int32 val, chtyp;
3611  word32 nav, nbv;
3612 
3613  if ((chtyp = chanp->chtyp) == TRPROC_STBID) val = np->nva.bp[ibi + bi];
3614  else if (chtyp == TRPROC_BID)
3615   {
3616    /* LOOKATME - can this be low bit of vector - think yes */
3617    if (!np->n_isavec) ld_scalval_(&nav, &nbv, np->nva.bp);
3618    else __ld_bit(&nav, &nbv, np, bi);
3619    val = nav | (nbv << 1);
3620   }
3621  else { val = 3; __case_terr(__FILE__, __LINE__); }
3622  return(val);
3623 }
3624 
3625 /*
3626  * evaluate a tranif channel when 3rd control input changes
3627  * called from itree ref. location of gate
3628  *
3629  * since no hard driver change, can ignore non tran channel bits
3630  * because they only change when hard drivers change
3631  *
3632  * SJM 04/11/01 - put back to immediately eval switch channel when enable chgs
3633  * LOOKATME - think should change so only called when from to 0 (off)
3634  * think to/from 1/x do not change channel
3635  */
__immed_eval_trifchan(struct gate_t * gp)3636 extern void __immed_eval_trifchan(struct gate_t *gp)
3637 {
3638  int32 nd_itpop, bi;
3639  struct net_t *np;
3640  struct traux_t *trap;
3641  struct expr_t *xp, *idndp;
3642  struct vbinfo_t *vbip;
3643  struct vtxlst_t *vtxlp;
3644 
3645  /* SJM 04/23/01 - remove non peri channels since only used for 1 inst case */
3646  /* but only one graph for that in any case */
3647  /* need to find tranif transistor first terminal and perturb */
3648  xp = gp->gpins[0];
3649  if (xp->optyp == ID || xp->optyp == GLBREF) idndp = xp;
3650  else if (xp->optyp == LSB) idndp = xp->lu.x;
3651  else { __case_terr(__FILE__, __LINE__); idndp = NULL; }
3652 
3653  if (idndp->optyp == GLBREF)
3654   { __xmrpush_refgrp_to_targ(idndp->ru.grp); nd_itpop = TRUE; }
3655  else nd_itpop = FALSE;
3656 
3657  np = idndp->lu.sy->el.enp;
3658  bi = -1;
3659  if (xp->optyp == LSB) bi = __get_const_bselndx(xp);
3660  if (bi == -1) bi = 0;
3661  trap = np->ntraux;
3662  vbip = trap->vbitchans[np->nwid*__inum + bi];
3663 
3664  /* add this vertex to relaxation list */
3665  vtxlp = add_stchan_chged_vtx(vbip->vivxp, __inst_ptr);
3666  vbip->vivxp->vtx_in_vicinity = TRUE;
3667  find_chgvtx_vicinity(vtxlp);
3668 
3669  if (nd_itpop) __pop_itstk();
3670 
3671  /* also find and perturb 2nd terminal */
3672  xp = gp->gpins[1];
3673  if (xp->optyp == ID || xp->optyp == GLBREF) idndp = xp;
3674  else if (xp->optyp == LSB) idndp = xp->lu.x;
3675  else { __case_terr(__FILE__, __LINE__); idndp = NULL; }
3676 
3677  if (idndp->optyp == GLBREF)
3678   { __xmrpush_refgrp_to_targ(idndp->ru.grp); nd_itpop = TRUE; }
3679  else nd_itpop = FALSE;
3680 
3681  np = idndp->lu.sy->el.enp;
3682  bi = -1;
3683  if (xp->optyp == LSB) bi = __get_const_bselndx(xp);
3684  if (bi == -1) bi = 0;
3685  trap = np->ntraux;
3686  vbip = trap->vbitchans[np->nwid*__inum + bi];
3687 
3688  /* add this vertex to relaxation list */
3689  if (!vbip->vivxp->vtx_in_vicinity)
3690   {
3691    vtxlp = add_stchan_chged_vtx(vbip->vivxp, __inst_ptr);
3692    vbip->vivxp->vtx_in_vicinity = TRUE;
3693    find_chgvtx_vicinity(vtxlp);
3694   }
3695 
3696  if (nd_itpop) __pop_itstk();
3697 
3698  /* do the relaxation only on perturbed in vicinity vertices */
3699  stchan_trif_relax();
3700  /* final step, assign (maybe schedule) all changed vertices */
3701  if (__chg_vtxlst_hdr != NULL) assign_chged_vtxs();
3702 }
3703 
3704 /*
3705  * ROUTINES TO DUMP TRAN CHANNELS
3706  */
3707 
3708 /*
3709  * dump all trans in a module
3710  */
__dmp_modtrans(struct mod_t * mdp)3711 extern void __dmp_modtrans(struct mod_t *mdp)
3712 {
3713  register int32 ni, ii;
3714  register struct net_t *np;
3715  int32 insts, bi2;
3716  struct traux_t *trap;
3717  struct vbinfo_t *vbip;
3718  struct vtx_t *vtxp;
3719  char s1[RECLEN], s2[RECLEN];
3720 
3721  for (ni = 0, np = &(mdp->mnets[0]); ni < mdp->mnnum; ni++, np++)
3722   {
3723    if ((trap = np->ntraux) == NULL)
3724     continue;
3725 
3726    insts = mdp->flatinum;
3727    for (ii = 0; ii < insts; ii++)
3728     {
3729      __push_itstk(mdp->moditps[ii]);
3730      for (bi2 = np->nwid - 1; bi2 >= 0; bi2--)
3731       {
3732        if ((vbip = trap->vbitchans[__inum*np->nwid + bi2])
3733         == NULL)
3734         {
3735          sprintf(s1, "%s.", __msg2_blditree(__xs, __inst_ptr));
3736          if (!np->n_isavec) strcpy(s2, ""); else sprintf(s2, "[%d]", bi2);
3737          __dbg_msg("-- net %s%s%s not in any tran channel\n", s1,
3738           np->nsym->synam, s2);
3739         }
3740        else
3741         {
3742          if (__chantab[vbip->chan_id].chan_no_vtxs)
3743           __dmp_bidchan(&(__chantab[vbip->chan_id]));
3744          else { vtxp = vbip->vivxp; __dmp_trchan(vtxp); }
3745         }
3746       }
3747      __pop_itstk();
3748     }
3749   }
3750 }
3751 
3752 /*
3753  * dump a chan. channel - requires set itree loc. context to work
3754  */
__dmp_trchan(struct vtx_t * vtxp)3755 extern void __dmp_trchan(struct vtx_t *vtxp)
3756 {
3757  register struct edge_t *ep;
3758  int32 bi, chanid, base, ei;
3759  struct net_t *np;
3760  struct chanrec_t *chanp;
3761 
3762  np = vtxp->vnp;
3763  bi = (vtxp->vi1 == -1) ? 0 : vtxp->vi1;
3764  base = __inum*np->nwid;
3765  chanid = vtxp->vnp->ntraux->vbitchans[base + bi]->chan_id;
3766  chanp = &(__chantab[chanid]);
3767 
3768  /* __dbg_msg("<<> marking %s\n", to_vtx(__xs2, vtxp)); */
3769  vtxp->vtx_mark = TRUE;
3770 
3771  /* dump the distinguished vertex */
3772  __dbg_msg(
3773   "-** vertex %s.%s(id=%d, type=%d, mixed wires=%d, mark=%u) edges:\n",
3774    __msg2_blditree(__xs, __inst_ptr), to_vtx(__xs2, vtxp), chanid,
3775    chanp->chtyp, chanp->chan_diffwtyps, vtxp->vtx_mark);
3776   for (ei = 1, ep = vtxp->vedges; ep != NULL; ep = ep->enxt, ei++)
3777    { prt_edge(vtxp, ep, ei); }
3778 
3779  dmp_vtx_edges(vtxp, __inst_ptr);
3780 
3781  /* turn off marks, to be ready for next change */
3782  off_bichan_marks(chanp);
3783 
3784  __dbg_msg("=== end of channel ===\n");
3785 }
3786 
3787 /*
3788  * dump inout channel - simple list and each node has own itree context
3789  */
__dmp_bidchan(struct chanrec_t * chanp)3790 extern void __dmp_bidchan(struct chanrec_t *chanp)
3791 {
3792  register struct bidvtxlst_t *bidvtxlp;
3793  int32 chanid;
3794  char s1[RECLEN];
3795 
3796  chanid = chanp - __chantab;
3797  __dbg_msg(" *** inout channel (id=%d, type=%d, mixed_wires=%d):\n",
3798   chanid, chanp->chtyp, chanp->chan_diffwtyps);
3799 
3800  bidvtxlp = chanp->bid_vtxlp;
3801  for (; bidvtxlp != NULL; bidvtxlp = bidvtxlp->bidvtxnxt)
3802   {
3803    if (bidvtxlp->vi1 == -1) strcpy(s1, bidvtxlp->vnp->nsym->synam);
3804    else sprintf(s1, "%s[%d]", bidvtxlp->vnp->nsym->synam, bidvtxlp->vi1);
3805    __dbg_msg(" -- vertex %s.%s\n", __msg2_blditree(__xs,
3806     bidvtxlp->bidvtx_itp), s1);
3807   }
3808  __dbg_msg("=== end of channel ===\n");
3809 }
3810 
3811 /*
3812  * dump vertices on other side of edges of a vtx
3813  * passed vtx itree loc since itree stack probably not high enough
3814  */
dmp_vtx_edges(struct vtx_t * vtxp,struct itree_t * vt1_itp)3815 static void dmp_vtx_edges(struct vtx_t *vtxp, struct itree_t *vt1_itp)
3816 {
3817  register struct edge_t *ep;
3818  struct itree_t *oside_itp;
3819 
3820  /* mark the root vertex */
3821  for (ep = vtxp->vedges; ep != NULL; ep = ep->enxt)
3822   {
3823    /* DBG remove -- */
3824    if (ep->ev2 == NULL) __misc_terr(__FILE__, __LINE__);
3825    /* --- */
3826 
3827    if (ep->ev2->vtx_mark)
3828     {
3829      /* __dbg_msg("<<> vertex %s already marked\n", to_vtx(__xs2, ep->ev2)); */
3830      continue;
3831     }
3832    /* __dbg_msg("<<> vertex %s not marked\n", to_vtx(__xs2, ep->ev2)); */
3833 
3834    /* for mdprt will always be local */
3835    /* DBG remove --- */
3836    if (ep->enpp->npntyp == NP_BIDMDPRT
3837     && ep->enpp->npproctyp != NP_PROC_INMOD)
3838     __misc_terr(__FILE__, __LINE__);
3839    /* --- */
3840 
3841    /* SJM - 05/21/01 - compute oside edge and use call stack since itstk */
3842    /* not deep enough */
3843    if (ep->edgoside_itp != NULL) oside_itp = ep->edgoside_itp;
3844    else oside_itp = vt1_itp;
3845 
3846    dmp_vtx_and_out_edges(ep->ev2, oside_itp);
3847 
3848    /* __dbg_msg("<<> marking %s\n", to_vtx(__xs2, ep->ev2)); */
3849    ep->ev2->vtx_mark = TRUE;
3850 
3851    dmp_vtx_edges(ep->ev2, oside_itp);
3852   }
3853 }
3854 
3855 /*
3856  * dump a vertex and all of its out edges
3857  */
dmp_vtx_and_out_edges(struct vtx_t * vtxp,struct itree_t * oside_itp)3858 static void dmp_vtx_and_out_edges(struct vtx_t *vtxp,
3859  struct itree_t *oside_itp)
3860 {
3861  register int32 ei;
3862  register struct edge_t *ep;
3863 
3864  if (vtxp->vtx_mark)
3865   {
3866    /* ---
3867    __dbg_msg("<<> %s marked - out edges not printed\n",
3868     to_vtx(__xs2, vtxp));
3869    --- */
3870    return;
3871   }
3872  /* else __dbg_msg("<<> %s not marked\n", to_vtx(__xs2, vtxp)); */
3873 
3874  __push_itstk(oside_itp);
3875  __dbg_msg(
3876   "=== vertex %s.%s edges:\n", __msg2_blditree(__xs, __inst_ptr),
3877   to_vtx(__xs2, vtxp));
3878  for (ei = 1, ep = vtxp->vedges; ep != NULL; ep = ep->enxt, ei++)
3879   { prt_edge(vtxp, ep, ei); }
3880  __pop_itstk();
3881 }
3882 
3883 /*
3884  * print edge - itree version (expects start to be on itree stack)
3885  */
prt_edge(struct vtx_t * vtxp,struct edge_t * ep,int32 ei)3886 static void prt_edge(struct vtx_t *vtxp, struct edge_t *ep, int32 ei)
3887 {
3888  struct itree_t *eitp, *v2itp;
3889  char s1[RECLEN], s2[RECLEN], s3[RECLEN];
3890 
3891  if (ei == -1) strcpy(s1, ""); else sprintf(s1, " %d:", ei);
3892  getv2_itp(ep, __inst_ptr, &eitp, &v2itp);
3893  __dbg_msg(" -- edge%s %s.%s->%s.%s: npp:\n", s1,
3894   __msg2_blditree(__xs, __inst_ptr), to_vtx(s2, vtxp),
3895   __msg2_blditree(__xs2, v2itp), to_vtx(s3, ep->ev2));
3896  /* LOOKATME - shouldn't test be internal error */
3897  if (vtxp != NULL) __dmp1_nplstel(__inst_mod, vtxp->vnp, ep->enpp);
3898 }
3899 
3900 /*
3901  * build an vertex identifying string
3902  */
to_vtx(char * s,struct vtx_t * vp)3903 static char *to_vtx(char *s, struct vtx_t *vp)
3904 {
3905  if (vp == NULL) strcpy(s, "*NONE*");
3906  if (vp->vi1 == -1) strcpy(s, vp->vnp->nsym->synam);
3907  else sprintf(s, "%s[%d]", vp->vnp->nsym->synam, vp->vi1);
3908  return(s);
3909 }
3910 
3911 /*
3912  * called with itree loc. of first edge in vitp - 2nd edge and 2nd vtx itp
3913  */
getv2_itp(struct edge_t * ep,struct itree_t * vitp,struct itree_t ** eitp,struct itree_t ** v2itp)3914 static void getv2_itp(struct edge_t *ep, struct itree_t *vitp,
3915  struct itree_t **eitp, struct itree_t **v2itp)
3916 {
3917  int32 num_itpops;
3918 
3919  if (ep->ev2 == NULL) __arg_terr(__FILE__, __LINE__);
3920  /* move forward - vitp is first */
3921  num_itpops = 1;
3922  __push_itstk(vitp);
3923  if (ep->enpp->npproctyp == NP_PROC_INMOD) *eitp = vitp;
3924  else
3925   {
3926    /* SJM 04/17/03 - if XMR does not match - do not change itree loc */
3927    if (__move_to_npprefloc(ep->enpp))
3928     {
3929      *eitp = __inst_ptr;
3930      num_itpops++;
3931     }
3932    else __misc_terr(__FILE__, __LINE__);
3933   }
3934  /* LOOKATME - bit for concat must be index that is from vtx */
3935  /* SJM 08/26/00 - because all xmr/inout edges per inst. can store new itree */
3936  if (ep->edgoside_itp != NULL)
3937   {
3938    __push_itstk(ep->edgoside_itp);
3939    num_itpops++;
3940   }
3941  *v2itp = __inst_ptr;
3942  while (num_itpops-- > 0) __pop_itstk();
3943 }
3944 
3945 /*
3946  * build drivers on a net
3947  *
3948  * entire net drivers are not net bit
3949  * nothing on itree stack here
3950  */
__dmp_bidnet_drvs(struct net_t * np,struct mod_t * mdp)3951 extern void __dmp_bidnet_drvs(struct net_t *np, struct mod_t *mdp)
3952 {
3953  register struct edge_t *ep;
3954  register int32 ei;
3955  struct traux_t *trap;
3956  struct vbinfo_t *vbip;
3957  struct vtx_t *vtxp;
3958  char s1[RECLEN], s2[RECLEN], s3[RECLEN];
3959 
3960  if (np->ntraux == NULL) return;
3961  if ((trap = np->ntraux) == NULL) return;
3962  if ((vbip = trap->vbitchans[0]) == NULL) return;
3963  vtxp = vbip->vivxp;
3964  if ((ep = vtxp->vedges) == NULL) return;
3965 
3966  for (ei = 0; ep != NULL; ep = ep->enxt, ei++)
3967   {
3968    if (ei == -1) strcpy(s1, ""); else sprintf(s1, " %d:", ei);
3969    __dbg_msg(" -- edge%s %s->%s: npp:\n", s1,
3970     to_vtx(s2, vtxp), to_vtx(s3, ep->ev2));
3971    if (vtxp != NULL) __dmp1_nplstel(mdp, vtxp->vnp, ep->enpp);
3972   }
3973 }
3974 
3975 /*
3976  * ROUTINES TO BUILD XL STYLE CONNECTED LOAD/DRIVER NET/BIT TABLES
3977  */
3978 
3979 /*
3980  * build net/bit vertex table for all net/bits that contribute xl style
3981  * loads to acc_ or vpi_ iterator
3982  *
3983  * LOOKATME - trying to mimic xl style flattening for loads here
3984  */
__bld_xl_drvld_vtxtab(struct net_t * np,int32 bi,struct itree_t * itp,int32 is_load)3985 extern int32 __bld_xl_drvld_vtxtab(struct net_t *np, int32 bi,
3986  struct itree_t *itp, int32 is_load)
3987 {
3988  register struct net_pin_t *npp;
3989  int32 ix_insert, vi, osbi, fromr_bi, catel_bi;
3990  struct xldlnpp_t *xldlp, *xldlp2;
3991  struct itree_t *ositp;
3992  struct mod_t *osmdp;
3993  struct mod_pin_t *mpp;
3994  struct net_t *osnp;
3995  struct expr_t *xp, *catxp;
3996 
3997  __xldl_hdr = __last_xldl = NULL;
3998 
3999  /* know always at least one vertex - needed for actual lds/drvrs in iter */
4000  /* this sets ix insert loc. */
4001  vi = get_xldl_vtx(itp, np, bi, &ix_insert);
4002  /* DBG remove --- */
4003  if (vi != -1) __vpi_terr(__FILE__, __LINE__);
4004  /* -- */
4005  /* add first net/bit vertex */
4006  vi = add_xldl_vtx(itp, np, bi, ix_insert);
4007 
4008  /* add any iconn or mod port 1 bit npp's to a list for more processing */
4009  fill_port_npps(np, bi, itp);
4010  /* common case of xl style and local ld/driver iterator same */
4011  if (__xldl_hdr == NULL) return(__num_xldlvtxs);
4012 
4013  for (; __xldl_hdr != NULL;)
4014   {
4015    /* get first unprocessed npp */
4016    xldlp = __xldl_hdr;
4017    npp = xldlp->xlnpp;
4018    __push_itstk(xldlp->xlitp);
4019 
4020    /* move to other side */
4021    switch ((byte) npp->npntyp) {
4022     /* SJM 09/20/02 - never see PB forms for inouts */
4023     case NP_ICONN: case NP_BIDICONN:
4024      /* other side is mdprt */
4025      ositp = &(__inst_ptr->in_its[npp->elnpp.eii]);
4026      osmdp = ositp->itip->imsym->el.emdp;
4027      mpp = &(osmdp->mpins[npp->obnum]);
4028      if (is_load)
4029       {
4030        if (npp->npntyp != NP_BIDICONN && mpp->mptyp != IO_IN)
4031         goto nxt_xlld_npp;
4032       }
4033      else
4034       {
4035        if (npp->npntyp != NP_BIDICONN && mpp->mptyp != IO_OUT)
4036         goto nxt_xlld_npp;
4037       }
4038      xp = mpp->mpref;
4039      break;
4040     case NP_MDPRT: case NP_BIDMDPRT:
4041      /* other side is iconn */
4042      /* access port before mosing up */
4043      mpp = &(__inst_mod->mpins[npp->obnum]);
4044 
4045      /* DBG remove -- */
4046      if (__inst_ptr->up_it == NULL) __vpi_terr(__FILE__, __LINE__);
4047      /* --- */
4048      ositp = __inst_ptr->up_it;
4049      osmdp = ositp->itip->imsym->el.emdp;
4050      if (is_load)
4051       {
4052        if (npp->npntyp != NP_BIDMDPRT && mpp->mptyp != IO_OUT)
4053         goto nxt_xlld_npp;
4054       }
4055      else
4056       {
4057        if (npp->npntyp != NP_BIDMDPRT && mpp->mptyp != IO_IN)
4058         goto nxt_xlld_npp;
4059       }
4060      xp = __inst_ptr->itip->ipins[npp->obnum];
4061      break;
4062     default:
4063      goto nxt_xlld_npp;
4064    }
4065    /* add other side npp to end of list - expr, net, bit set */
4066    catel_bi = -1;
4067    if (npp->npaux == NULL) fromr_bi = bi;
4068    else
4069     {
4070      if (npp->npaux->nbi1 == -1) fromr_bi = bi;
4071      /* ??? LOOKATME - why is npp low psel bit subtracted off */
4072      else fromr_bi = bi - npp->npaux->nbi2.i;
4073 
4074      /* in case this side expr in concat need low of where in concat */
4075      /* so can add to otherside index to get matching oside bit */
4076      if (npp->npaux->lcbi1 != -1)
4077       {
4078        fromr_bi = bi + npp->npaux->lcbi1;
4079        catxp = xp;
4080        xp = find_cat_oside_xp(catxp, fromr_bi, &(catel_bi));
4081       }
4082     }
4083    osnp = xldrvld_to_netbit(xp, ((catel_bi == -1) ? fromr_bi : catel_bi),
4084     &(osbi), ositp);
4085    if (osbi == -2) goto nxt_xlld_npp;
4086 
4087    /* try to add net vertex - added unless already in table */
4088    if ((vi = get_xldl_vtx(ositp, osnp, osbi, &ix_insert)) != -1)
4089     {
4090      if (__debug_flg)
4091       {
4092        __dbg_msg(
4093         " -- net %s bit %d in %s already in xl style load/drive list\n",
4094         osnp->nsym->synam, osbi, __msg2_blditree(__xs, ositp));
4095       }
4096      goto nxt_xlld_npp;
4097     }
4098 
4099    /* add this in sorted position in table */
4100    add_xldl_vtx(ositp, osnp, osbi, ix_insert);
4101    /* also add all its connecting npp's */
4102    fill_port_npps(osnp, osbi, ositp);
4103 
4104 nxt_xlld_npp:
4105    __pop_itstk();
4106    /* done with current that is on front so free and update header */
4107    xldlp2 = __xldl_hdr->xlnxt;
4108    __my_free((char *) __xldl_hdr, sizeof(struct xldlnpp_t));
4109    __xldl_hdr = xldlp2;
4110   }
4111  return(__num_xldlvtxs);
4112 }
4113 
4114 /*
4115  * get net and bit from expr for buildin
4116  * given an driv tran channel lhs non concatenate expression get net and bit
4117  *
4118  * almost same as tranx to net bit
4119  * sets bi to -2 for other side out of this side range or not constant ndx
4120  */
xldrvld_to_netbit(register struct expr_t * xp,int32 fromr_bi,int32 * bi,struct itree_t * oside_itp)4121 static struct net_t *xldrvld_to_netbit(register struct expr_t *xp,
4122  int32 fromr_bi, int32 *bi, struct itree_t *oside_itp)
4123 {
4124  register struct net_t *np;
4125  int32 ri1, ri2;
4126 
4127  np = __find_tran_conn_np(xp);
4128  if (xp->optyp == LSB)
4129  {
4130    __push_itstk(oside_itp);
4131    if (xp->ru.x->optyp != NUMBER && xp->ru.x->optyp != ISNUMBER) *bi = -2;
4132    else
4133     {
4134      *bi = __get_const_bselndx(xp);
4135      if (fromr_bi > 0) *bi = -2;
4136     }
4137    __pop_itstk();
4138    return(np);
4139   }
4140  if (xp->optyp == PARTSEL)
4141   {
4142    ri1 = __contab[xp->ru.x->lu.x->ru.xvi];
4143    ri2 = __contab[xp->ru.x->ru.x->ru.xvi];
4144    if (fromr_bi == -1) *bi = ri2;
4145    else { *bi = ri2 + fromr_bi; if (*bi > ri1) *bi = -2;  }
4146    return(np);
4147   }
4148  /* this side is 1 bit scalar */
4149  if (!np->n_isavec)
4150   {
4151    *bi = -1;
4152    if (fromr_bi > 0) *bi = -2;
4153   }
4154  else { if (fromr_bi >= np->nwid) *bi = -2; else *bi = fromr_bi;  }
4155  return(np);
4156 }
4157 
4158 /*
4159  * add mod port and iconn edges (npps) connecting to net or net bit to list
4160  *
4161  * this include all npp's that are one bit and match bit if bit select
4162  * filtering out other type (non load or non driver) done elsewhere
4163  */
fill_port_npps(struct net_t * np,int32 bi,struct itree_t * itp)4164 static void fill_port_npps(struct net_t *np, int32 bi, struct itree_t *itp)
4165 {
4166  register struct net_pin_t *npp;
4167 
4168  __push_itstk(itp);
4169  /* first all npp loads connected to ports */
4170  for (npp = np->nlds; npp != NULL; npp = npp->npnxt)
4171   {
4172    /* only consider iconn and mod port npps */
4173    if (npp->npntyp != NP_ICONN && npp->npntyp != NP_MDPRT) continue;
4174 
4175    add_match_vtxs(np, npp, bi);
4176   }
4177  /* will be no I/O drivers and no tran channel for reg */
4178  if (np->ntyp >= NONWIRE_ST)
4179   {
4180    /* DBG remove --- */
4181    if (np->ndrvs != NULL || np->ntraux != NULL) __vpi_terr(__FILE__, __LINE__);
4182    /* --- */
4183    goto done;
4184   }
4185  /* then all npp drivers connected to ports */
4186  for (npp = np->ndrvs; npp != NULL; npp = npp->npnxt)
4187   {
4188    /* only consider iconn and mod port npps */
4189    if (npp->npntyp != NP_ICONN && npp->npntyp != NP_MDPRT) continue;
4190 
4191    add_match_vtxs(np, npp, bi);
4192   }
4193  /* tran channel connections if they exist */
4194  if (np->ntraux == NULL) goto done;
4195 
4196  /* iconn tran channel connections are all loads and all drivers */
4197  for (npp = np->ntraux->tran_npps; npp != NULL; npp = npp->npnxt)
4198   {
4199    /* only consider bid iconn and bid mod port npps */
4200    if (npp->npntyp != NP_BIDICONN && npp->npntyp != NP_BIDMDPRT) continue;
4201 
4202    add_match_vtxs(np, npp, bi);
4203   }
4204 done:
4205  __pop_itstk();
4206 }
4207 
4208 /*
4209  * add right inst and right bit npps to port vertex list
4210  */
add_match_vtxs(struct net_t * np,struct net_pin_t * npp,int32 bi)4211 static void add_match_vtxs(struct net_t *np, struct net_pin_t *npp, int32 bi)
4212 {
4213  int32 ri1, ri2;
4214  struct xldlnpp_t *xldlp;
4215 
4216  /* filter out per inst. that is not this instance */
4217  if (npp->npproctyp == NP_PROC_FILT
4218   && npp->npaux->npu.filtitp != __inst_ptr) return;
4219 
4220  /* LOOKATME - what happens with xmr iconns - for now not in xl iters */
4221  if (npp->npproctyp == NP_PROC_GREF) return;
4222 
4223  /* this must run with right itree loc. (for vector ri1, ri2 h:0 forms */
4224  __get_bidnpp_sect(np, npp, &ri1, &ri2);
4225  /* xl drives and loads only combined for scalar or bit select connects */
4226  /* any scalar must match */
4227  if (ri1 == -1)
4228   {
4229    /* DBG remove --- */
4230    if (bi != -1) __vpi_terr(__FILE__, __LINE__);
4231    /* --- */
4232    goto got_match;
4233   }
4234  /* SJM 01/28/05 - old algorithm that stop on entire vector or psel */
4235  /* wrong - must match as usual */
4236  if (bi > ri1 || bi < ri2) return;
4237 
4238 got_match:
4239   xldlp = (struct xldlnpp_t *) __my_malloc(sizeof(struct xldlnpp_t));
4240   xldlp->xlnpp = npp;
4241   xldlp->xlitp = __inst_ptr;
4242   xldlp->xlnxt = NULL;
4243 
4244   if (__last_xldl == NULL) __xldl_hdr = xldlp;
4245   else __last_xldl->xlnxt = xldlp;
4246   __last_xldl = xldlp;
4247 }
4248 
4249 /*
4250  * search sorted index into xl drive/load vertex table for matching vertex
4251  *
4252  * sets ix_insert index if not found
4253  * binary search better even for small table
4254  */
get_xldl_vtx(struct itree_t * itp,struct net_t * np,int32 bi,int32 * ix_insert)4255 static int32 get_xldl_vtx(struct itree_t *itp, struct net_t *np, int32 bi,
4256  int32 *ix_insert)
4257 {
4258  register int32 m, cv;
4259  int32 l, h;
4260  register struct xldlvtx_t *xldlvp;
4261 
4262  if (__num_xldlvtxs <= 0) { *ix_insert = 0; return(-1); }
4263  l = 0; h = __num_xldlvtxs - 1;
4264  for (;;)
4265   {
4266    m = (l + h)/2;
4267    xldlvp = __xldlvtxind[m];
4268    /* first sort by module name */
4269    if ((cv = strcmp(xldlvp->dlitp->itip->imsym->synam,
4270     itp->itip->imsym->synam)) == 0)
4271     {
4272      /* then by instance of module */
4273      if ((cv = xldlvp->dlitp->itinum - itp->itinum) == 0) return(m);
4274      /* then by net name */
4275      if ((cv = strcmp(xldlvp->dlnp->nsym->synam, np->nsym->synam)) == 0)
4276       {
4277        /* finally by bit index */
4278        if ((cv = xldlvp->dlbi - bi) == 0) return(m);
4279       }
4280     }
4281    if (cv < 0) l = m + 1; else h = m - 1;
4282    if (h < l) { *ix_insert = l; break; }
4283   }
4284  return(-1);
4285 }
4286 
4287 /*
4288  * add an a new xl drv/ld vertex (net/bit)
4289  *
4290  * know now in table and ix_insert set to place to insert or not called
4291  * must call get xldl vtx before calling this to set ix_insert
4292  */
add_xldl_vtx(struct itree_t * itp,struct net_t * np,int32 bi,int32 ix_insert)4293 static int32 add_xldl_vtx(struct itree_t *itp, struct net_t *np, int32 bi,
4294  int32 ix_insert)
4295 {
4296  register int32 ki;
4297  int32 osize, nsize;
4298  struct xldlvtx_t *xldlvp;
4299 
4300  if (++__num_xldlvtxs > __siz_xldlvtxtab)
4301   {
4302    /* first grow table because few new keywords, fibronaci growth */
4303    if (__siz_xldlvtxtab == 0)
4304     {
4305      nsize = 100*sizeof(struct xldlvtx_t *);
4306      __xldlvtxind = (struct xldlvtx_t **) __my_malloc(nsize);
4307      __siz_xldlvtxtab = 100;
4308     }
4309    else
4310     {
4311      osize = __siz_xldlvtxtab*sizeof(struct xldlvtx_t *);
4312      nsize = 2*osize;
4313      __siz_xldlvtxtab *= 2;
4314      __xldlvtxind = (struct xldlvtx_t **) __my_realloc((char *) __xldlvtxind,
4315       osize, nsize);
4316    }
4317   }
4318  /* allocate and fill new record */
4319  xldlvp = (struct xldlvtx_t *) __my_malloc(sizeof(struct xldlvtx_t));
4320  xldlvp->dlnp = np;
4321  xldlvp->dlbi = bi;
4322  xldlvp->dlitp = itp;
4323 
4324  /* copy downward from end making room at ix_insert */
4325  for (ki = __num_xldlvtxs - 1; ki > ix_insert; ki--)
4326   __xldlvtxind[ki] = __xldlvtxind[ki - 1];
4327  __xldlvtxind[ix_insert] = xldlvp;
4328 
4329  return(ix_insert);
4330 }
4331 
4332 /*
4333  * ROUTINES TO FORCE/RELEASE ALL WIRES IN SWITCH CHANNEL
4334  */
4335 
4336 /*
4337  * qc force wrapper than decomposes vector net object into bit selects
4338  * result is that for every bit a switch channel is forced
4339  *
4340  * know biti down to bitj where values is wid -1 to 0 for wire or psel rng
4341  * rhsbi is rhs value bit matching low bit j
4342  * this is called in stmt itree context but if lhs xmr, itp passed
4343  * and returns also in stmt itree context - called proc handle push/pop
4344  */
__qc_tran_wireforce(struct net_t * np,int32 biti,int32 bitj,int32 rhsbi,struct itree_t * itp,struct st_t * qcfstp)4345 extern void __qc_tran_wireforce(struct net_t *np, int32 biti, int32 bitj,
4346  int32 rhsbi, struct itree_t *itp, struct st_t *qcfstp)
4347 {
4348  register int32 bi;
4349  word32 aval, bval;
4350  struct xstk_t *xsp;
4351  struct expr_t *rhsx;
4352 
4353  rhsx = qcfstp->st.sqca->qcrhsx;
4354  /* this converts rhs if needed and makes lhs right width */
4355  /* must eval rhs in itree context of rhs (i.e. statement) */
4356  xsp = __eval_assign_rhsexpr(rhsx, qcfstp->st.sqca->qclhsx);
4357  /* do rhs bsel here so pass a/b words with low bit asign bit */
4358  if (rhsbi == -1) rhsbi = 0;
4359 
4360  if (!np->n_isavec)
4361   {
4362    /* for scalar biti and bitj will be 0 */
4363    /* DBG remove -- */
4364    if (biti != 0 || bitj != 0) __misc_terr(__FILE__, __LINE__);
4365    /* -- */
4366 
4367    aval = rhsbsel_(xsp->ap, rhsbi);
4368    bval = rhsbsel_(xsp->bp, rhsbi);
4369    /* SJM 03/15/01 - now just forcing the one wire - caller re-evals channel */
4370    /* SJM 04/15/01 - must pass any xmr itree context */
4371    do_qc_wire_intran_force(np, -1, aval, bval, itp);
4372   }
4373  else
4374   {
4375    for (bi = biti; bi >= bitj; bi--)
4376     {
4377      aval = rhsbsel_(xsp->ap, rhsbi + (bi - bitj));
4378      bval = rhsbsel_(xsp->bp, rhsbi + (bi - bitj));
4379      /* SJM 04/15/01 - must pass any xmr itree context */
4380      do_qc_wire_intran_force(np, bi, aval, bval, itp);
4381     }
4382   }
4383  __pop_xstk();
4384 }
4385 
4386 /*
4387  * do force for one net-bit or scalar in tran channel
4388  */
do_qc_wire_intran_force(struct net_t * np,int32 biti,word32 aval,word32 bval,struct itree_t * lhs_itp)4389 static void do_qc_wire_intran_force(struct net_t *np, int32 biti, word32 aval,
4390  word32 bval, struct itree_t *lhs_itp)
4391 {
4392  register int32 ibase;
4393  int32 nd_itpop;
4394 
4395  /* debug message must use statment not lhs itree context */
4396  if (__debug_flg && __ev_tracing)
4397   {
4398    char s1[RECLEN], s2[RECLEN];
4399 
4400    if (np->n_isavec) sprintf(s1, "%s[%d]", np->nsym->synam, biti);
4401    else strcpy(s1, np->nsym->synam);
4402    __tr_msg(
4403     ":: quasi-continuous force of switch channel wire %s in %s from force at %s now %s\n",
4404     s1, __msg2_blditree(__xs2, __inst_ptr),
4405      __bld_lineloc(__xs, (word32) __sfnam_ind, __slin_cnt),
4406     __to_timstr(s2, &__simtime));
4407   }
4408 
4409  nd_itpop = FALSE;
4410  if (lhs_itp != NULL) { __push_itstk(lhs_itp); nd_itpop = TRUE; }
4411 
4412  /* this must run in itree context of lhs (maybe an xmr) */
4413  ibase = __inum*np->nwid;
4414  if (!np->n_isavec)
4415   {
4416    __bit1_vpi_or_tran_wireforce(np, &aval, &bval, ibase, 0, 0,
4417     "switch channel");
4418 
4419    /* DBG remove -- */
4420    if (biti != -1) __misc_terr(__FILE__, __LINE__);
4421    /* --- */
4422   }
4423  else
4424   {
4425    /* DBG remove -- */
4426    if (biti == -1) __misc_terr(__FILE__, __LINE__);
4427    /* --- */
4428    /* because right bit value already selected into a/b low bit, rhsbi is 0 */
4429    __bit1_vpi_or_tran_wireforce(np, &aval, &bval, ibase, biti, 0,
4430     "switch channel");
4431   }
4432 
4433  /* notice can have both many wire specific and many all cbs */
4434  /* call backs also need to match itree loc for lhs xmr */
4435  if (__num_vpi_force_cbs > 0) __find_call_force_cbs(np, biti);
4436  if (__vpi_force_cb_always) __cb_all_rfs(np, biti, TRUE);
4437 
4438  if (nd_itpop) __pop_itstk();
4439 }
4440 
4441 /*
4442  * qc release wrapper than decomposes vector net object into bit selects
4443  * result is that for every bit a switch channel is released
4444  *
4445  * SJM 11/24/00 - because no need to get original force value this
4446  * can use lhs expr and net/bit
4447  * SJM 04/15/01 - need to pass any lhs itree context if lhs xmr
4448  */
__qc_tran_wirerelease(struct net_t * np,int32 biti,int32 bitj,struct itree_t * itp,struct expr_t * lhsx)4449 extern void __qc_tran_wirerelease(struct net_t *np, int32 biti, int32 bitj,
4450  struct itree_t *itp, struct expr_t *lhsx)
4451 {
4452  register int32 bi;
4453 
4454  if (!np->n_isavec)
4455   {
4456    /* DBG remove -- */
4457    if (biti != 0 || bitj != 0) __misc_terr(__FILE__, __LINE__);
4458    /* -- */
4459    do_qc_wire_intran_release(np, -1, lhsx, itp);
4460   }
4461  else
4462   {
4463    for (bi = biti; bi >= bitj; bi--)
4464     { do_qc_wire_intran_release(np, bi, lhsx, itp); }
4465   }
4466 }
4467 
4468 /*
4469  * do release for one net-bit or scalar in tran channel
4470  *
4471  * this must push lhs xmr itree loc that it is passed if needed
4472  * this handles release PLI callbacks if needed
4473  */
do_qc_wire_intran_release(struct net_t * np,int32 biti,struct expr_t * lhsx,struct itree_t * itp)4474 static void do_qc_wire_intran_release(struct net_t *np, int32 biti,
4475  struct expr_t *lhsx, struct itree_t *itp)
4476 {
4477  register int32 bi, ibase;
4478  int32 bit_forced, nd_itpop;
4479  struct qcval_t *frc_qcp;
4480  char s1[RECLEN];
4481 
4482  ibase = __inum*np->nwid;
4483  /* messages must use stmt itree loc */
4484  for (bi = 0; bi < np->nwid; bi++)
4485   {
4486    if (np->nu2.qcval[ibase + bi].qc_active) goto some_bit_forced;
4487   }
4488  strcpy(s1, " - no bits forced");
4489  __sgfinform(465, "attempted release of %s in instance %s failed%s",
4490   __msgexpr_tostr(__xs2, lhsx), __msg2_blditree(__xs, __inst_ptr), s1);
4491  /* SJM - 04/15/01 - if nothing forced must not exec any call backs */
4492  return;
4493 
4494 some_bit_forced:
4495  if (__debug_flg && __ev_tracing)
4496   {
4497    char s2[RECLEN];
4498 
4499    if (np->n_isavec) sprintf(s1, "%s[%d]", np->nsym->synam, biti);
4500    else strcpy(s1, np->nsym->synam);
4501 
4502    __tr_msg(
4503     ":: quasi-continuous release of switch channel wire %s in %s from release at %s now %s\n",
4504     s1, __msg2_blditree(__xs, __inst_ptr),
4505     __bld_lineloc(__xs2, (word32) __sfnam_ind, __slin_cnt),
4506     __to_timstr(s2, &__simtime));
4507   }
4508 
4509  nd_itpop = FALSE;
4510  if (itp != NULL) { nd_itpop = TRUE; __push_itstk(itp); }
4511 
4512  /* know some forced or will not get here */
4513  bit_forced = TRUE;
4514  bi = (biti == -1) ? 0 : biti;
4515  frc_qcp = &(np->nu2.qcval[ibase + bi]);
4516  if (!frc_qcp->qc_active) bit_forced = FALSE;
4517  else frc_qcp->qc_active = FALSE;
4518 
4519  /* now that released, assign value from current drivers */
4520  __assign_1mdrwire(np);
4521 
4522  if (!bit_forced)
4523   {
4524    if (nd_itpop) __pop_itstk();
4525 
4526    strcpy(s1, " - some bits forced");
4527    __sgfinform(465, "attempted release of %s in instance %s failed%s",
4528     __msgexpr_tostr(__xs2, lhsx), __msg2_blditree(__xs, __inst_ptr), s1);
4529 
4530    if (nd_itpop) __push_itstk(itp);
4531   }
4532 
4533  /* FIXME - why not calling for every bit in range - only 1 bit possible? */
4534  /* notice can have both many wire specific and many all cbs */
4535  if (__num_vpi_rel_cbs > 0) __find_call_rel_cbs(np, biti);
4536  if (__vpi_rel_cb_always) __cb_all_rfs(np, biti, FALSE);
4537 
4538  if (nd_itpop) __pop_itstk();
4539 }
4540 
4541 /*
4542  * VPI ROUTINES TO FORCE/RELEASE ALL WIRES IN SWITCH CHANNEL
4543  */
4544 
4545 /*
4546  * vpi force wrapper than decomposes vector net object into bit selects
4547  * result is that for every bit a switch channel is forced
4548  * here since comes from vpi passed right force itree context
4549  */
__tran_wire_vpi_force(struct net_t * np,word32 * ap,word32 * bp,int32 ndx)4550 extern void __tran_wire_vpi_force(struct net_t *np, word32 *ap, word32 *bp,
4551  int32 ndx)
4552 {
4553  register int32 bi;
4554 
4555  if (ndx == -1 && np->n_isavec)
4556   {
4557    for (bi = np->nwid - 1; bi >= 0; bi--)
4558     { do_vpi_wire_intran_force(np, bi, ap, bp); }
4559   }
4560  else do_vpi_wire_intran_force(np, ndx, ap, bp);
4561 }
4562 
4563 /*
4564  * do vpi force for one net-bit or scalar in tran channel
4565  *
4566  * know itree change already made if needed (no different lhs context)
4567  * never a need for separate rhs vpi force since user sets and passes value
4568  * also do not need dces for these
4569  */
do_vpi_wire_intran_force(struct net_t * np,int32 ndx,word32 * ap,word32 * bp)4570 static void do_vpi_wire_intran_force(struct net_t *np, int32 ndx,
4571  word32 *ap, word32 *bp)
4572 {
4573  register int32 ibase;
4574  char s3[RECLEN];
4575 
4576  /* make sure assign/force table exists */
4577  /* AIV 03/09/05 - force inhibition requires allocated bit set */
4578  if (!np->frc_assgn_allocated) __alloc_qcval(np);
4579 
4580  if (__debug_flg && __ev_tracing)
4581   {
4582    if (ndx == -1) strcpy(__wrks1, np->nsym->synam);
4583    else sprintf(__wrks1, "%s[%d]", np->nsym->synam,
4584     __unnormalize_ndx(np, ndx));
4585    __tr_msg(":: vpi_put_value force of wire %s in %s now %s\n", __wrks1,
4586     __msg2_blditree(__wrks2, __inst_ptr), __to_timstr(s3, &__simtime));
4587   }
4588 
4589  ibase = __inum*np->nwid;
4590  if (!np->n_isavec)
4591   {
4592    /* SJM 02/23/05 - since scalar do not need ndx check */
4593    /* SJM 11/14/00 - must also check cbs for scalar case */
4594    __bit1_vpi_or_tran_wireforce(np, ap, bp, ibase, 0, 0,
4595     "switch channel vpi_put_value");
4596    ndx = -1;
4597    goto chk_cbs;
4598   }
4599  /* SJM 02/23/05 - it is legal to force bit selects too */
4600  __bit1_vpi_or_tran_wireforce(np, ap, bp, ibase, ndx, ndx,
4601   "switch channel vpi_put_value");
4602 
4603 chk_cbs:
4604  /* notice can have both many wire specific and many all cbs */
4605  if (__num_vpi_force_cbs > 0) __find_call_force_cbs(np, ndx);
4606  if (__vpi_force_cb_always) __cb_all_rfs(np, ndx, TRUE);
4607 }
4608 
4609 /*
4610  * vpi release wrapper than decomposes vector net object into bit selects
4611  *
4612  * result is that for every bit a switch channel released
4613  * never a need for separate rhs vpi force since user sets and passes value
4614  * here since comes from vpi passed right force itree context
4615  */
__tran_wire_vpi_release(struct net_t * np,int32 ndx)4616 extern void __tran_wire_vpi_release(struct net_t *np, int32 ndx)
4617 {
4618  register int32 bi;
4619 
4620  if (ndx == -1 && np->n_isavec)
4621   {
4622    for (bi = np->nwid - 1; bi >= 0; bi--)
4623     {
4624      /* SJM 03/15/01 - now release entire wire but nothing else in chan */
4625      do_vpi_wire_intran_release(np, bi);
4626      /* after release must re-compute tran channel */
4627      __eval_tran_1bit(np, bi);
4628     }
4629   }
4630  else
4631   {
4632    /* SJM 03/15/01 - now just release one wire */
4633    do_vpi_wire_intran_release(np, ndx);
4634    /* since this is scalar eval entire wire */
4635    __eval_tran_bits(np);
4636   }
4637 }
4638 
4639 /*
4640  * do vpi release for one net-bit or scalar in tran channel
4641  * know itree change already made if needed
4642  */
do_vpi_wire_intran_release(struct net_t * np,int32 ndx)4643 static void do_vpi_wire_intran_release(struct net_t *np, int32 ndx)
4644 {
4645  register int32 bi, ibase;
4646  int32 bit_forced;
4647  struct qcval_t *frc_qcp;
4648  char s3[RECLEN];
4649 
4650  ibase = __inum*np->nwid;
4651  if (ndx == -1) bi = 0; else bi = ndx;
4652  frc_qcp = &(np->nu2.qcval[ibase + bi]);
4653  if (!frc_qcp->qc_active)
4654   {
4655 no_force:
4656    strcpy(__wrks1, " - not forced");
4657    if (ndx == -1) strcpy(__wrks2, np->nsym->synam);
4658    else sprintf(__wrks2, "%s[%d]", np->nsym->synam,
4659     __unnormalize_ndx(np, ndx));
4660    __vpi_err(2102, vpiNotice,
4661     "attempted vpi_put_value release of wire %s in %s failed%s", __wrks2,
4662     __msg2_blditree(s3, __inst_ptr), __wrks1);
4663    goto done;
4664   }
4665 
4666  if (__debug_flg && __ev_tracing)
4667   {
4668    if (ndx == -1) strcpy(__wrks2, np->nsym->synam);
4669    else sprintf(__wrks2, "%s[%d]", np->nsym->synam,
4670     __unnormalize_ndx(np, ndx));
4671    __tr_msg(":: vpi_put_value release of wire %s in %s now %s\n", __wrks2,
4672     __msg2_blditree(__wrks1, __inst_ptr), __to_timstr(s3, &__simtime));
4673   }
4674 
4675  /* know some forced or will not get here */
4676  bit_forced = TRUE;
4677  if (!frc_qcp->qc_active) bit_forced = FALSE;
4678  else frc_qcp->qc_active = FALSE;
4679 
4680  /* release is immedate assign even if wire has delay no schedule */
4681  /* LOOKATME - could this be 1 bit eval? */
4682  __assign_1mdrwire(np);
4683  if (!bit_forced && ndx == -1) goto no_force;
4684 
4685 done:
4686  /* notice can have both many wire specific and many all cbs */
4687  if (__num_vpi_rel_cbs > 0) __find_call_rel_cbs(np, ndx);
4688  if (__vpi_rel_cb_always) __cb_all_rfs(np, ndx, FALSE);
4689 }
4690 
4691 /*
4692  * vpi force wrapper than decomposes vector net object into bit selects
4693  * result is that for every bit a switch channel is forced
4694  */
__tran_exec_putv_wire_softforce(struct net_t * np,word32 * ap,word32 * bp,int32 ndx)4695 extern void __tran_exec_putv_wire_softforce(struct net_t *np, word32 *ap,
4696  word32 *bp, int32 ndx)
4697 {
4698  register int32 bi;
4699 
4700  if (ndx == -1 && np->n_isavec)
4701   {
4702    for (bi = np->nwid - 1; bi >= 0; bi--)
4703     { do_putv_wire_intran_softforce(np, bi, ap, bp); }
4704   }
4705  else do_putv_wire_intran_softforce(np, ndx, ap, bp);
4706 }
4707 
4708 /*
4709  * do vpi force for one net-bit or scalar in tran channel
4710  * know itree change already made if needed
4711  */
do_putv_wire_intran_softforce(struct net_t * np,int32 ndx,word32 * ap,word32 * bp)4712 static void do_putv_wire_intran_softforce(struct net_t *np, int32 ndx,
4713  word32 *ap, word32 *bp)
4714 {
4715  struct qcval_t *frc_qcp;
4716  /* case 1: scalar */
4717  if (!np->n_isavec)
4718   {
4719    /* DBG remove -- */
4720    if (ndx != -1) __misc_terr(__FILE__, __LINE__);
4721    /* -- */
4722 
4723    /* this add the changed wire to nchglst if needed */
4724    if (np->nu2.qcval != NULL)
4725     {
4726      /* return F if all of wire forced, nothing to do */
4727      /* if T, this will correct bits in ap and bp so actual assign is right */
4728      if (!__correct_forced_newwireval(np, ap, bp)) return;
4729     }
4730    if (np->nchg_nd_chgstore) __chg_st_val(np, ap, bp);
4731    else __st_val(np, ap, bp);
4732    return;
4733   }
4734 
4735  /* DBG remove -- */
4736  if (ndx == -1) __misc_terr(__FILE__, __LINE__);
4737  /* -- */
4738 
4739  /* case 2: wire bit select */
4740  /* if the 1 bit is really forced nothing to do */
4741  /* SJM 12/23/02 - if not allocated can't be active */
4742  if (!np->frc_assgn_allocated) return;
4743  frc_qcp = &(np->nu2.qcval[np->nwid*__inum + ndx]);
4744  if (frc_qcp->qc_active) return;
4745 
4746  /* else simple bit assign */
4747  if (np->nchg_nd_chgstore) __chg_st_bit(np, ndx, ap[0], bp[0]);
4748  else __st_bit(np, ndx, ap[0], bp[0]);
4749 }
4750