1 /*
2  * Copyright (c) 2001-2020 Stephen Williams (steve@icarus.com)
3  *
4  *    This source code is free software; you can redistribute it
5  *    and/or modify it in source code form under the terms of the GNU
6  *    General Public License as published by the Free Software
7  *    Foundation; either version 2 of the License, or (at your option)
8  *    any later version.
9  *
10  *    This program is distributed in the hope that it will be useful,
11  *    but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  *    GNU General Public License for more details.
14  *
15  *    You should have received a copy of the GNU General Public License
16  *    along with this program; if not, write to the Free Software
17  *    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18  */
19 
20 # include "config.h"
21 
22 # include  <cstdlib>
23 # include  <climits>
24 # include  "netlist.h"
25 # include  "netparray.h"
26 # include  "netvector.h"
27 # include  "netmisc.h"
28 # include  "PExpr.h"
29 # include  "pform_types.h"
30 # include  "compiler.h"
31 # include  "ivl_assert.h"
32 
33 
sub_net_from(Design * des,NetScope * scope,long val,NetNet * sig)34 NetNet* sub_net_from(Design*des, NetScope*scope, long val, NetNet*sig)
35 {
36       netvector_t*zero_vec = new netvector_t(sig->data_type(),
37 					     sig->vector_width()-1, 0);
38       NetNet*zero_net = new NetNet(scope, scope->local_symbol(),
39 				   NetNet::WIRE, zero_vec);
40       zero_net->set_line(*sig);
41       zero_net->local_flag(true);
42 
43       if (sig->data_type() == IVL_VT_REAL) {
44 	    verireal zero (val);
45 	    NetLiteral*zero_obj = new NetLiteral(scope, scope->local_symbol(), zero);
46 	    zero_obj->set_line(*sig);
47 	    des->add_node(zero_obj);
48 
49 	    connect(zero_net->pin(0), zero_obj->pin(0));
50 
51       } else {
52 	    verinum zero ((int64_t)val);
53 	    zero = cast_to_width(zero, sig->vector_width());
54 	    zero.has_sign(sig->get_signed());
55 	    NetConst*zero_obj = new NetConst(scope, scope->local_symbol(), zero);
56 	    zero_obj->set_line(*sig);
57 	    des->add_node(zero_obj);
58 
59 	    connect(zero_net->pin(0), zero_obj->pin(0));
60       }
61 
62       NetAddSub*adder = new NetAddSub(scope, scope->local_symbol(), sig->vector_width());
63       adder->set_line(*sig);
64       des->add_node(adder);
65       adder->attribute(perm_string::literal("LPM_Direction"), verinum("SUB"));
66 
67       connect(zero_net->pin(0), adder->pin_DataA());
68       connect(adder->pin_DataB(), sig->pin(0));
69 
70       netvector_t*tmp_vec = new netvector_t(sig->data_type(),
71 					    sig->vector_width()-1, 0);
72       NetNet*tmp = new NetNet(scope, scope->local_symbol(),
73 			      NetNet::WIRE, tmp_vec);
74       tmp->set_line(*sig);
75       tmp->local_flag(true);
76 
77       connect(adder->pin_Result(), tmp->pin(0));
78 
79       return tmp;
80 }
81 
cast_to_int2(Design * des,NetScope * scope,NetNet * src,unsigned wid)82 NetNet* cast_to_int2(Design*des, NetScope*scope, NetNet*src, unsigned wid)
83 {
84       if (src->data_type() == IVL_VT_BOOL)
85 	    return src;
86 
87       netvector_t*tmp_vec = new netvector_t(IVL_VT_BOOL, wid-1, 0,
88 					    src->get_signed());
89       NetNet*tmp = new NetNet(scope, scope->local_symbol(), NetNet::WIRE, tmp_vec);
90       tmp->set_line(*src);
91       tmp->local_flag(true);
92 
93       NetCastInt2*cast = new NetCastInt2(scope, scope->local_symbol(), wid);
94       cast->set_line(*src);
95       des->add_node(cast);
96 
97       connect(cast->pin(0), tmp->pin(0));
98       connect(cast->pin(1), src->pin(0));
99 
100       return tmp;
101 }
102 
cast_to_int4(Design * des,NetScope * scope,NetNet * src,unsigned wid)103 NetNet* cast_to_int4(Design*des, NetScope*scope, NetNet*src, unsigned wid)
104 {
105       if (src->data_type() != IVL_VT_REAL)
106 	    return src;
107 
108       netvector_t*tmp_vec = new netvector_t(IVL_VT_LOGIC, wid-1, 0);
109       NetNet*tmp = new NetNet(scope, scope->local_symbol(), NetNet::WIRE, tmp_vec);
110       tmp->set_line(*src);
111       tmp->local_flag(true);
112 
113       NetCastInt4*cast = new NetCastInt4(scope, scope->local_symbol(), wid);
114       cast->set_line(*src);
115       des->add_node(cast);
116 
117       connect(cast->pin(0), tmp->pin(0));
118       connect(cast->pin(1), src->pin(0));
119 
120       return tmp;
121 }
122 
cast_to_real(Design * des,NetScope * scope,NetNet * src)123 NetNet* cast_to_real(Design*des, NetScope*scope, NetNet*src)
124 {
125       if (src->data_type() == IVL_VT_REAL)
126 	    return src;
127 
128       netvector_t*tmp_vec = new netvector_t(IVL_VT_REAL);
129       NetNet*tmp = new NetNet(scope, scope->local_symbol(), NetNet::WIRE, tmp_vec);
130       tmp->set_line(*src);
131       tmp->local_flag(true);
132 
133       NetCastReal*cast = new NetCastReal(scope, scope->local_symbol(), src->get_signed());
134       cast->set_line(*src);
135       des->add_node(cast);
136 
137       connect(cast->pin(0), tmp->pin(0));
138       connect(cast->pin(1), src->pin(0));
139 
140       return tmp;
141 }
142 
cast_to_int2(NetExpr * expr,unsigned width)143 NetExpr* cast_to_int2(NetExpr*expr, unsigned width)
144 {
145 	// Special case: The expression is already BOOL
146       if (expr->expr_type() == IVL_VT_BOOL)
147 	    return expr;
148 
149       if (debug_elaborate)
150 	    cerr << expr->get_fileline() << ": debug: "
151 		 << "Cast expression to int2, width=" << width << "." << endl;
152 
153       NetECast*cast = new NetECast('2', expr, width, expr->has_sign());
154       cast->set_line(*expr);
155       return cast;
156 }
157 
cast_to_int4(NetExpr * expr,unsigned width)158 NetExpr* cast_to_int4(NetExpr*expr, unsigned width)
159 {
160 	// Special case: The expression is already LOGIC or BOOL
161       if (expr->expr_type() == IVL_VT_LOGIC || expr->expr_type() == IVL_VT_BOOL)
162 	    return expr;
163 
164       if (debug_elaborate)
165 	    cerr << expr->get_fileline() << ": debug: "
166 		 << "Cast expression to int4, width=" << width << "." << endl;
167 
168       NetECast*cast = new NetECast('v', expr, width, expr->has_sign());
169       cast->set_line(*expr);
170       return cast;
171 }
172 
cast_to_real(NetExpr * expr)173 NetExpr* cast_to_real(NetExpr*expr)
174 {
175       if (expr->expr_type() == IVL_VT_REAL)
176 	    return expr;
177 
178       if (debug_elaborate)
179 	    cerr << expr->get_fileline() << ": debug: "
180 		 << "Cast expression to real." << endl;
181 
182       NetECast*cast = new NetECast('r', expr, 1, true);
183       cast->set_line(*expr);
184       return cast;
185 }
186 
187 /*
188  * Add a signed constant to an existing expression. Generate a new
189  * NetEBAdd node that has the input expression and an expression made
190  * from the constant value.
191  */
make_add_expr(NetExpr * expr,long val)192 static NetExpr* make_add_expr(NetExpr*expr, long val)
193 {
194       if (val == 0)
195 	    return expr;
196 
197 	// If the value to be added is <0, then instead generate a
198 	// SUBTRACT node and turn the value positive.
199       char add_op = '+';
200       if (val < 0) {
201 	    add_op = '-';
202 	    val = -val;
203       }
204 
205       verinum val_v (val, expr->expr_width());
206       val_v.has_sign(expr->has_sign());
207 
208       NetEConst*val_c = new NetEConst(val_v);
209       val_c->set_line(*expr);
210 
211       NetEBAdd*res = new NetEBAdd(add_op, expr, val_c, expr->expr_width(),
212                                   expr->has_sign());
213       res->set_line(*expr);
214 
215       return res;
216 }
217 
make_add_expr(const LineInfo * loc,NetExpr * expr1,NetExpr * expr2)218 static NetExpr* make_add_expr(const LineInfo*loc, NetExpr*expr1, NetExpr*expr2)
219 {
220       bool use_signed = expr1->has_sign() && expr2->has_sign();
221       unsigned use_wid = expr1->expr_width();
222 
223       if (expr2->expr_width() > use_wid)
224 	    use_wid = expr2->expr_width();
225 
226       expr1 = pad_to_width(expr1, use_wid, *loc);
227       expr2 = pad_to_width(expr2, use_wid, *loc);
228 
229       NetEBAdd*tmp = new NetEBAdd('+', expr1, expr2, use_wid, use_signed);
230       return tmp;
231 }
232 
233 /*
234  * Subtract an existing expression from a signed constant.
235  */
make_sub_expr(long val,NetExpr * expr)236 static NetExpr* make_sub_expr(long val, NetExpr*expr)
237 {
238       verinum val_v (val, expr->expr_width());
239       val_v.has_sign(expr->has_sign());
240 
241       NetEConst*val_c = new NetEConst(val_v);
242       val_c->set_line(*expr);
243 
244       NetEBAdd*res = new NetEBAdd('-', val_c, expr, expr->expr_width(),
245                                   expr->has_sign());
246       res->set_line(*expr);
247 
248       return res;
249 }
250 
251 /*
252  * Subtract a signed constant from an existing expression.
253  */
make_sub_expr(NetExpr * expr,long val)254 static NetExpr* make_sub_expr(NetExpr*expr, long val)
255 {
256       verinum val_v (val, expr->expr_width());
257       val_v.has_sign(expr->has_sign());
258 
259       NetEConst*val_c = new NetEConst(val_v);
260       val_c->set_line(*expr);
261 
262       NetEBAdd*res = new NetEBAdd('-', expr, val_c, expr->expr_width(),
263                                   expr->has_sign());
264       res->set_line(*expr);
265 
266       return res;
267 }
268 
269 
270 /*
271  * Multiply an existing expression by a signed positive number.
272  * This does a lossless multiply, so the arguments will need to be
273  * sized to match the output size.
274  */
make_mult_expr(NetExpr * expr,unsigned long val)275 static NetExpr* make_mult_expr(NetExpr*expr, unsigned long val)
276 {
277       const unsigned val_wid = ceil(log2((double)val)) ;
278       unsigned use_wid = expr->expr_width() + val_wid;
279       verinum val_v (val, use_wid);
280       val_v.has_sign(expr->has_sign());
281 
282       NetEConst*val_c = new NetEConst(val_v);
283       val_c->set_line(*expr);
284 
285 	// We know by definitions that the expr argument needs to be
286 	// padded to be the right argument width for this lossless multiply.
287       expr = pad_to_width(expr, use_wid, *expr);
288 
289       NetEBMult*res = new NetEBMult('*', expr, val_c, use_wid, expr->has_sign());
290       res->set_line(*expr);
291 
292       return res;
293 }
294 
295 /*
296  * This routine is used to calculate the number of bits needed to
297  * contain the given number.
298  */
num_bits(long arg)299 static unsigned num_bits(long arg)
300 {
301       unsigned res = 0;
302 
303 	/* For a negative value we have room for one extra value, but
304 	 * we have a signed result so we need an extra bit for this. */
305       if (arg < 0) {
306 	    arg = -arg - 1;
307 	    res += 1;
308       }
309 
310 	/* Calculate the number of bits needed here. */
311       while (arg) {
312 	    res += 1;
313 	    arg >>= 1;
314       }
315 
316       return res;
317 }
318 
319 /*
320  * This routine generates the normalization expression needed for a variable
321  * bit select or a variable base expression for an indexed part
322  * select. This function doesn't actually look at the variable
323  * dimensions, it just does the final calculation using msb/lsb of the
324  * last slice, and the off of the slice in the variable.
325  */
normalize_variable_base(NetExpr * base,long msb,long lsb,unsigned long wid,bool is_up,long soff)326 NetExpr *normalize_variable_base(NetExpr *base, long msb, long lsb,
327 				 unsigned long wid, bool is_up, long soff)
328 {
329       long offset = lsb;
330 
331       if (msb < lsb) {
332 	      /* Correct the offset if needed. */
333 	    if (is_up) offset -= wid - 1;
334 	      /* Calculate the space needed for the offset. */
335 	    unsigned min_wid = num_bits(offset);
336 	    if (num_bits(soff) > min_wid)
337 		  min_wid = num_bits(soff);
338 	      /* We need enough space for the larger of the offset or the
339 	       * base expression. */
340 	    if (min_wid < base->expr_width()) min_wid = base->expr_width();
341 	      /* Now that we have the minimum needed width increase it by
342 	       * one to make room for the normalization calculation. */
343 	    min_wid += 2;
344 	      /* Pad the base expression to the correct width. */
345 	    base = pad_to_width(base, min_wid, *base);
346 	      /* If the base expression is unsigned and either the lsb
347 	       * is negative or it does not fill the width of the base
348 	       * expression then we could generate negative normalized
349 	       * values so cast the expression to signed to get the
350 	       * math correct. */
351 	    if ((lsb < 0 || num_bits(lsb+1) <= base->expr_width()) &&
352 	        ! base->has_sign()) {
353 		    /* We need this extra select to hide the signed
354 		     * property from the padding above. It will be
355 		     * removed automatically during code generation. */
356 		  NetESelect *tmp = new NetESelect(base, 0 , min_wid);
357 		  tmp->set_line(*base);
358 		  tmp->cast_signed(true);
359                   base = tmp;
360 	    }
361 	      /* Normalize the expression. */
362 	    base = make_sub_expr(offset+soff, base);
363       } else {
364 	      /* Correct the offset if needed. */
365 	    if (!is_up) offset += wid - 1;
366 	      /* If the offset is zero then just return the base (index)
367 	       * expression. */
368 	    if ((soff-offset) == 0) return base;
369 	      /* Calculate the space needed for the offset. */
370 	    unsigned min_wid = num_bits(-offset);
371 	    if (num_bits(soff) > min_wid)
372 		  min_wid = num_bits(soff);
373 	      /* We need enough space for the larger of the offset or the
374 	       * base expression. */
375 	    if (min_wid < base->expr_width()) min_wid = base->expr_width();
376 	      /* Now that we have the minimum needed width increase it by
377 	       * one to make room for the normalization calculation. */
378 	    min_wid += 2;
379 	      /* Pad the base expression to the correct width. */
380 	    base = pad_to_width(base, min_wid, *base);
381 	      /* If the offset is greater than zero then we need to do
382 	       * signed math to get the location value correct. */
383 	    if (offset > 0 && ! base->has_sign()) {
384 		    /* We need this extra select to hide the signed
385 		     * property from the padding above. It will be
386 		     * removed automatically during code generation. */
387 		  NetESelect *tmp = new NetESelect(base, 0 , min_wid);
388 		  tmp->set_line(*base);
389 		  tmp->cast_signed(true);
390                   base = tmp;
391 	    }
392 	      /* Normalize the expression. */
393 	    base = make_add_expr(base, soff-offset);
394       }
395 
396       return base;
397 }
398 
399 /*
400  * This method is how indices should work except that the base should
401  * be a vector of expressions that matches the size of the dims list,
402  * so that we can generate an expression based on the entire packed
403  * vector. For now, we assert that there is only one set of dimensions.
404  */
normalize_variable_base(NetExpr * base,const list<netrange_t> & dims,unsigned long wid,bool is_up)405 NetExpr *normalize_variable_base(NetExpr *base,
406 				 const list<netrange_t>&dims,
407 				 unsigned long wid, bool is_up)
408 {
409       ivl_assert(*base, dims.size() == 1);
410       const netrange_t&rng = dims.back();
411       return normalize_variable_base(base, rng.get_msb(), rng.get_lsb(), wid, is_up);
412 }
413 
normalize_variable_bit_base(const list<long> & indices,NetExpr * base,const NetNet * reg)414 NetExpr *normalize_variable_bit_base(const list<long>&indices, NetExpr*base,
415 				     const NetNet*reg)
416 {
417       const vector<netrange_t>&packed_dims = reg->packed_dims();
418       ivl_assert(*base, indices.size()+1 == packed_dims.size());
419 
420 	// Get the canonical offset of the slice within which we are
421 	// addressing. We need that address as a slice offset to
422 	// calculate the proper complete address
423       const netrange_t&rng = packed_dims.back();
424       long slice_off = reg->sb_to_idx(indices, rng.get_lsb());
425 
426       return normalize_variable_base(base, rng.get_msb(), rng.get_lsb(), 1, true, slice_off);
427 }
428 
normalize_variable_part_base(const list<long> & indices,NetExpr * base,const NetNet * reg,unsigned long wid,bool is_up)429 NetExpr *normalize_variable_part_base(const list<long>&indices, NetExpr*base,
430 				      const NetNet*reg,
431 				      unsigned long wid, bool is_up)
432 {
433       const vector<netrange_t>&packed_dims = reg->packed_dims();
434       ivl_assert(*base, indices.size()+1 == packed_dims.size());
435 
436 	// Get the canonical offset of the slice within which we are
437 	// addressing. We need that address as a slice offset to
438 	// calculate the proper complete address
439       const netrange_t&rng = packed_dims.back();
440       long slice_off = reg->sb_to_idx(indices, rng.get_lsb());
441 
442       return normalize_variable_base(base, rng.get_msb(), rng.get_lsb(), wid, is_up, slice_off);
443 }
444 
normalize_variable_slice_base(const list<long> & indices,NetExpr * base,const NetNet * reg,unsigned long & lwid)445 NetExpr *normalize_variable_slice_base(const list<long>&indices, NetExpr*base,
446 				       const NetNet*reg, unsigned long&lwid)
447 {
448       const vector<netrange_t>&packed_dims = reg->packed_dims();
449       ivl_assert(*base, indices.size() < packed_dims.size());
450 
451       vector<netrange_t>::const_iterator pcur = packed_dims.end();
452       for (size_t idx = indices.size() ; idx < packed_dims.size(); idx += 1) {
453 	    -- pcur;
454       }
455 
456       long sb = min(pcur->get_lsb(), pcur->get_msb());
457       long loff;
458       reg->sb_to_slice(indices, sb, loff, lwid);
459 
460       unsigned min_wid = base->expr_width();
461       if ((sb < 0) && !base->has_sign()) min_wid += 1;
462       if (min_wid < num_bits(pcur->get_lsb())) min_wid = pcur->get_lsb();
463       if (min_wid < num_bits(pcur->get_msb())) min_wid = pcur->get_msb();
464       base = pad_to_width(base, min_wid, *base);
465       if ((sb < 0) && !base->has_sign()) {
466 	    NetESelect *tmp = new NetESelect(base, 0 , min_wid);
467 	    tmp->set_line(*base);
468 	    tmp->cast_signed(true);
469             base = tmp;
470       }
471 
472       if (pcur->get_msb() >= pcur->get_lsb()) {
473 	    if (pcur->get_lsb() != 0)
474 		  base = make_sub_expr(base, pcur->get_lsb());
475 	    base = make_mult_expr(base, lwid);
476 	    min_wid = base->expr_width();
477 	    if (min_wid < num_bits(loff)) min_wid = num_bits(loff);
478 	    if (loff != 0) min_wid += 1;
479 	    base = pad_to_width(base, min_wid, *base);
480 	    base = make_add_expr(base, loff);
481       } else {
482 	    if (pcur->get_msb() != 0)
483 		  base = make_sub_expr(base, pcur->get_msb());
484 	    base = make_mult_expr(base, lwid);
485 	    min_wid = base->expr_width();
486 	    if (min_wid < num_bits(loff)) min_wid = num_bits(loff);
487 	    if (loff != 0) min_wid += 1;
488 	    base = pad_to_width(base, min_wid, *base);
489 	    base = make_sub_expr(loff, base);
490       }
491       return base;
492 }
493 
operator <<(ostream & o,__IndicesManip<long> val)494 ostream& operator << (ostream&o, __IndicesManip<long> val)
495 {
496       for (list<long>::const_iterator cur = val.val.begin()
497 		 ; cur != val.val.end() ; ++cur) {
498 	    o << "[" << *cur << "]";
499       }
500       return o;
501 }
502 
operator <<(ostream & o,__IndicesManip<NetExpr * > val)503 ostream& operator << (ostream&o, __IndicesManip<NetExpr*> val)
504 {
505       for (list<NetExpr*>::const_iterator cur = val.val.begin()
506 		 ; cur != val.val.end() ; ++cur) {
507 	    o << "[" << *(*cur) << "]";
508       }
509       return o;
510 }
511 
512 /*
513  * The src is the input index expression list from the expression, and
514  * the count is the number that are to be elaborated into the indices
515  * list. At the same time, create a indices_const list that contains
516  * the evaluated values for the expression, if they can be evaluated.
517  */
indices_to_expressions(Design * des,NetScope * scope,const LineInfo * loc,const list<index_component_t> & src,unsigned count,bool need_const,indices_flags & flags,list<NetExpr * > & indices,list<long> & indices_const)518 void indices_to_expressions(Design*des, NetScope*scope,
519 			      // loc is for error messages.
520 			    const LineInfo*loc,
521 			      // src is the index list, and count is
522 			      // the number of items in the list to use.
523 			    const list<index_component_t>&src, unsigned count,
524 			      // True if the expression MUST be constant.
525 			    bool need_const,
526 			      // These are the outputs.
527 			    indices_flags&flags,
528 			    list<NetExpr*>&indices, list<long>&indices_const)
529 {
530       ivl_assert(*loc, count <= src.size());
531 
532       flags.invalid   = false;
533       flags.variable  = false;
534       flags.undefined = false;
535       for (list<index_component_t>::const_iterator cur = src.begin()
536 		 ; count > 0 ;  ++cur, --count) {
537 	    ivl_assert(*loc, cur->sel != index_component_t::SEL_NONE);
538 
539 	    if (cur->sel != index_component_t::SEL_BIT) {
540 		  cerr << loc->get_fileline() << ": error: "
541 		       << "Array cannot be indexed by a range." << endl;
542 		  des->errors += 1;
543 	    }
544 	    ivl_assert(*loc, cur->msb);
545 
546 	    NetExpr*word_index = elab_and_eval_lossless(des, scope, cur->msb, -2, need_const);
547 
548 	    if (word_index == 0)
549 		  flags.invalid = true;
550 
551 	      // Track if we detect any non-constant expressions
552 	      // here. This may allow for a special case.
553 	    NetEConst*word_const = dynamic_cast<NetEConst*> (word_index);
554 	    if (word_const == 0)
555 		  flags.variable = true;
556 	    else if (!word_const->value().is_defined())
557 		  flags.undefined = true;
558 	    else if (!flags.variable && !flags.undefined)
559 		  indices_const.push_back(word_const->value().as_long());
560 
561 	    indices.push_back(word_index);
562       }
563 }
564 
make_strides(const vector<netrange_t> & dims,vector<long> & stride)565 static void make_strides(const vector<netrange_t>&dims,
566 			 vector<long>&stride)
567 {
568       stride[dims.size()-1] = 1;
569       for (size_t idx = stride.size()-1 ; idx > 0 ; --idx) {
570 	    long tmp = dims[idx].width();
571 	    if (idx < stride.size())
572 		  tmp *= stride[idx];
573 	    stride[idx-1] = tmp;
574       }
575 }
576 
577 /*
578  * Take in a vector of constant indices and convert them to a single
579  * number that is the canonical address (zero based, 1-d) of the
580  * word. If any of the indices are out of bounds, return nil instead
581  * of an expression.
582  */
normalize_variable_unpacked(const vector<netrange_t> & dims,list<long> & indices)583 static NetExpr* normalize_variable_unpacked(const vector<netrange_t>&dims, list<long>&indices)
584 {
585 	// Make strides for each index. The stride is the distance (in
586 	// words) to the next element in the canonical array.
587       vector<long> stride (dims.size());
588       make_strides(dims, stride);
589 
590       int64_t canonical_addr = 0;
591 
592       int idx = 0;
593       for (list<long>::const_iterator cur = indices.begin()
594 		 ; cur != indices.end() ; ++cur, ++idx) {
595 	    long tmp = *cur;
596 
597 	    if (dims[idx].get_lsb() <= dims[idx].get_msb())
598 		  tmp -= dims[idx].get_lsb();
599 	    else
600 		  tmp -= dims[idx].get_msb();
601 
602 	      // Notice of this index is out of range.
603 	    if (tmp < 0 || tmp >= (long)dims[idx].width()) {
604 		  return 0;
605 	    }
606 
607 	    canonical_addr += tmp * stride[idx];
608       }
609 
610       NetEConst*canonical_expr = new NetEConst(verinum(canonical_addr));
611       return canonical_expr;
612 }
613 
normalize_variable_unpacked(const NetNet * net,list<long> & indices)614 NetExpr* normalize_variable_unpacked(const NetNet*net, list<long>&indices)
615 {
616       const vector<netrange_t>&dims = net->unpacked_dims();
617       return normalize_variable_unpacked(dims, indices);
618 }
619 
normalize_variable_unpacked(const netsarray_t * stype,list<long> & indices)620 NetExpr* normalize_variable_unpacked(const netsarray_t*stype, list<long>&indices)
621 {
622       const vector<netrange_t>&dims = stype->static_dimensions();
623       return normalize_variable_unpacked(dims, indices);
624 }
625 
normalize_variable_unpacked(const LineInfo & loc,const vector<netrange_t> & dims,list<NetExpr * > & indices)626 NetExpr* normalize_variable_unpacked(const LineInfo&loc, const vector<netrange_t>&dims, list<NetExpr*>&indices)
627 {
628 	// Make strides for each index. The stride is the distance (in
629 	// words) to the next element in the canonical array.
630       vector<long> stride (dims.size());
631       make_strides(dims, stride);
632 
633       NetExpr*canonical_expr = 0;
634 
635       int idx = 0;
636       for (list<NetExpr*>::const_iterator cur = indices.begin()
637 		 ; cur != indices.end() ; ++cur, ++idx) {
638 	    NetExpr*tmp = *cur;
639 	      // If the expression elaboration generated errors, then
640 	      // give up. Presumably, the error during expression
641 	      // elaboration already generated the error message.
642 	    if (tmp == 0)
643 		  return 0;
644 
645 	    int64_t use_base;
646 	    if (! dims[idx].defined())
647 		  use_base = 0;
648 	    else if (dims[idx].get_lsb() <= dims[idx].get_msb())
649 		  use_base = dims[idx].get_lsb();
650 	    else
651 		  use_base = dims[idx].get_msb();
652 
653 	    int64_t use_stride = stride[idx];
654 
655 	      // Account for that we are doing arithmetic and should
656 	      // have a proper width to make sure there are no
657 	      // losses. So calculate a min_wid width.
658 	    unsigned tmp_wid;
659 	    unsigned min_wid = tmp->expr_width();
660 	    if (use_base != 0 && ((tmp_wid = num_bits(use_base)) >= min_wid))
661 		  min_wid = tmp_wid + 1;
662 	    if ((tmp_wid = num_bits(dims[idx].width()+1)) >= min_wid)
663 		  min_wid = tmp_wid + 1;
664 	    if (use_stride != 1)
665 		  min_wid += num_bits(use_stride);
666 
667 	    tmp = pad_to_width(tmp, min_wid, loc);
668 
669 	      // Now generate the math to calculate the canonical address.
670 	    NetExpr*tmp_scaled = 0;
671 	    if (NetEConst*tmp_const = dynamic_cast<NetEConst*> (tmp)) {
672 		    // Special case: the index is constant, so this
673 		    // iteration can be replaced with a constant
674 		    // expression.
675 		  int64_t val = tmp_const->value().as_long();
676 		  val -= use_base;
677 		  val *= use_stride;
678 		    // Very special case: the index is zero, so we can
679 		    // skip this iteration
680 		  if (val == 0)
681 			continue;
682 		  tmp_scaled = new NetEConst(verinum(val));
683 
684 	    } else {
685 		  tmp_scaled = tmp;
686 		  if (use_base != 0)
687 			tmp_scaled = make_add_expr(tmp_scaled, -use_base);
688 		  if (use_stride != 1)
689 			tmp_scaled = make_mult_expr(tmp_scaled, use_stride);
690 	    }
691 
692 	    if (canonical_expr == 0) {
693 		  canonical_expr = tmp_scaled;
694 	    } else {
695 		  bool expr_has_sign = canonical_expr->has_sign() &&
696 		                        tmp_scaled->has_sign();
697 		  canonical_expr = new NetEBAdd('+', canonical_expr, tmp_scaled,
698 						canonical_expr->expr_width()+1,
699 		                                expr_has_sign);
700 	    }
701       }
702 
703 	// If we don't have an expression at this point, all the indices were
704 	// constant zero. But this variant of normalize_variable_unpacked()
705 	// is only used when at least one index is not a constant.
706 	ivl_assert(loc, canonical_expr);
707 
708       return canonical_expr;
709 }
710 
normalize_variable_unpacked(const NetNet * net,list<NetExpr * > & indices)711 NetExpr* normalize_variable_unpacked(const NetNet*net, list<NetExpr*>&indices)
712 {
713       const vector<netrange_t>&dims = net->unpacked_dims();
714       return normalize_variable_unpacked(*net, dims, indices);
715 }
716 
normalize_variable_unpacked(const LineInfo & loc,const netsarray_t * stype,list<NetExpr * > & indices)717 NetExpr* normalize_variable_unpacked(const LineInfo&loc, const netsarray_t*stype, list<NetExpr*>&indices)
718 {
719       const vector<netrange_t>&dims = stype->static_dimensions();
720       return normalize_variable_unpacked(loc, dims, indices);
721 }
722 
make_canonical_index(Design * des,NetScope * scope,const LineInfo * loc,const std::list<index_component_t> & src,const netsarray_t * stype,bool need_const)723 NetExpr* make_canonical_index(Design*des, NetScope*scope,
724 			      const LineInfo*loc,
725 			      const std::list<index_component_t>&src,
726 			      const netsarray_t*stype,
727 			      bool need_const)
728 {
729       NetExpr*canon_index = 0;
730 
731       list<long> indices_const;
732       list<NetExpr*> indices_expr;
733       indices_flags flags;
734       indices_to_expressions(des, scope, loc,
735 			     src, src.size(),
736 			     need_const,
737 			     flags,
738 			     indices_expr, indices_const);
739 
740       if (flags.undefined) {
741 	    cerr << loc->get_fileline() << ": warning: "
742 		 << "ignoring undefined value array access." << endl;
743 
744       } else if (flags.variable) {
745 	    canon_index = normalize_variable_unpacked(*loc, stype, indices_expr);
746 
747       } else {
748 	    canon_index = normalize_variable_unpacked(stype, indices_const);
749       }
750 
751       return canon_index;
752 }
753 
make_const_x(unsigned long wid)754 NetEConst* make_const_x(unsigned long wid)
755 {
756       verinum xxx (verinum::Vx, wid);
757       NetEConst*resx = new NetEConst(xxx);
758       return resx;
759 }
760 
make_const_0(unsigned long wid)761 NetEConst* make_const_0(unsigned long wid)
762 {
763       verinum xxx (verinum::V0, wid);
764       NetEConst*resx = new NetEConst(xxx);
765       return resx;
766 }
767 
make_const_val(unsigned long value)768 NetEConst* make_const_val(unsigned long value)
769 {
770       verinum tmp (value, integer_width);
771       NetEConst*res = new NetEConst(tmp);
772       return res;
773 }
774 
make_const_val_s(long value)775 NetEConst* make_const_val_s(long value)
776 {
777       verinum tmp (value, integer_width);
778       tmp.has_sign(true);
779       NetEConst*res = new NetEConst(tmp);
780       return res;
781 }
782 
make_const_x(Design * des,NetScope * scope,unsigned long wid)783 NetNet* make_const_x(Design*des, NetScope*scope, unsigned long wid)
784 {
785       verinum xxx (verinum::Vx, wid);
786       NetConst*res = new NetConst(scope, scope->local_symbol(), xxx);
787       des->add_node(res);
788 
789       netvector_t*sig_vec = new netvector_t(IVL_VT_LOGIC, wid-1, 0);
790       NetNet*sig = new NetNet(scope, scope->local_symbol(), NetNet::WIRE, sig_vec);
791       sig->local_flag(true);
792 
793       connect(sig->pin(0), res->pin(0));
794       return sig;
795 }
796 
make_const_z(Design * des,NetScope * scope,unsigned long wid)797 NetNet* make_const_z(Design*des, NetScope*scope, unsigned long wid)
798 {
799       verinum xxx (verinum::Vz, wid);
800       NetConst*res = new NetConst(scope, scope->local_symbol(), xxx);
801       des->add_node(res);
802 
803       netvector_t*sig_vec = new netvector_t(IVL_VT_LOGIC, wid-1, 0);
804       NetNet*sig = new NetNet(scope, scope->local_symbol(), NetNet::WIRE, sig_vec);
805       sig->local_flag(true);
806 
807       connect(sig->pin(0), res->pin(0));
808       return sig;
809 }
810 
condition_reduce(NetExpr * expr)811 NetExpr* condition_reduce(NetExpr*expr)
812 {
813       if (expr->expr_type() == IVL_VT_REAL) {
814 	    if (NetECReal *tmp = dynamic_cast<NetECReal*>(expr)) {
815 		  verinum::V res;
816 		  if (tmp->value().as_double() == 0.0) res = verinum::V0;
817 		  else res = verinum::V1;
818 		  verinum vres (res, 1, true);
819 		  NetExpr *rtn = new NetEConst(vres);
820 		  rtn->set_line(*expr);
821 		  delete expr;
822 		  return rtn;
823 	    }
824 
825 	    NetExpr *rtn = new NetEBComp('n', expr,
826 	                                 new NetECReal(verireal(0.0)));
827 	    rtn->set_line(*expr);
828 	    return rtn;
829       }
830 
831       if (expr->expr_width() == 1)
832 	    return expr;
833 
834       verinum zero (verinum::V0, expr->expr_width());
835       zero.has_sign(expr->has_sign());
836 
837       NetEConst*ezero = new NetEConst(zero);
838       ezero->set_line(*expr);
839 
840       NetEBComp*cmp = new NetEBComp('n', expr, ezero);
841       cmp->set_line(*expr);
842       cmp->cast_signed(false);
843 
844       return cmp;
845 }
846 
do_elab_and_eval(Design * des,NetScope * scope,PExpr * pe,int context_width,bool need_const,bool annotatable,bool force_expand,ivl_variable_type_t cast_type,bool force_unsigned)847 static NetExpr* do_elab_and_eval(Design*des, NetScope*scope, PExpr*pe,
848 				 int context_width, bool need_const,
849 				 bool annotatable, bool force_expand,
850 				 ivl_variable_type_t cast_type,
851 				 bool force_unsigned)
852 {
853       PExpr::width_mode_t mode = PExpr::SIZED;
854       if ((context_width == -2) && !gn_strict_expr_width_flag)
855             mode = PExpr::EXPAND;
856       if (force_expand)
857 	    mode = PExpr::EXPAND;
858 
859       pe->test_width(des, scope, mode);
860 
861         // Get the final expression width. If the expression is unsized,
862         // this may be different from the value returned by test_width().
863       unsigned expr_width = pe->expr_width();
864 
865         // If context_width is positive, this is the RHS of an assignment,
866         // so the LHS width must also be included in the width calculation.
867       unsigned pos_context_width = context_width > 0 ? context_width : 0;
868       if ((pe->expr_type() != IVL_VT_REAL) && (expr_width < pos_context_width))
869             expr_width = pos_context_width;
870 
871 	// If this is the RHS of a compressed assignment, the LHS also
872 	// affects the expression type (signed/unsigned).
873       if (force_unsigned)
874 	    pe->cast_signed(false);
875 
876       if (debug_elaborate) {
877             cerr << pe->get_fileline() << ": elab_and_eval: test_width of "
878                  << *pe << endl;
879             cerr << pe->get_fileline() << ":              : "
880                  << "returns type=" << pe->expr_type()
881 		 << ", context_width=" << context_width
882                  << ", signed=" << pe->has_sign()
883 		 << ", force_expand=" << force_expand
884                  << ", expr_width=" << expr_width
885                  << ", mode=" << PExpr::width_mode_name(mode) << endl;
886 	    cerr << pe->get_fileline() << ":              : "
887 		 << "cast_type=" << cast_type << endl;
888       }
889 
890         // If we can get the same result using a smaller expression
891         // width, do so.
892 
893       unsigned min_width = pe->min_width();
894       if ((min_width != UINT_MAX) && (pe->expr_type() != IVL_VT_REAL)
895           && (pos_context_width > 0) && (expr_width > pos_context_width)) {
896             expr_width = max(min_width, pos_context_width);
897 
898             if (debug_elaborate) {
899                   cerr << pe->get_fileline() << ":              : "
900                        << "pruned to width=" << expr_width << endl;
901             }
902       }
903 
904       if ((mode >= PExpr::LOSSLESS) && (expr_width > width_cap)
905           && (expr_width > pos_context_width)) {
906             cerr << pe->get_fileline() << ": warning: excessive unsized "
907                  << "expression width detected." << endl;
908             cerr << pe->get_fileline() << ":        : The expression width "
909                  << "is capped at " << width_cap << " bits." << endl;
910 	    expr_width = width_cap;
911       }
912 
913       unsigned flags = PExpr::NO_FLAGS;
914       if (need_const)
915             flags |= PExpr::NEED_CONST;
916       if (annotatable)
917             flags |= PExpr::ANNOTATABLE;
918 
919       if (debug_elaborate) {
920 	    cerr << pe->get_fileline() << ": elab_and_eval: "
921 		 << "Calculated width is " << expr_width << "." << endl;
922       }
923 
924       NetExpr*tmp = pe->elaborate_expr(des, scope, expr_width, flags);
925       if (tmp == 0) return 0;
926 
927       if ((cast_type != IVL_VT_NO_TYPE) && (cast_type != tmp->expr_type())) {
928             switch (tmp->expr_type()) {
929                 case IVL_VT_BOOL:
930                 case IVL_VT_LOGIC:
931                 case IVL_VT_REAL:
932                   break;
933                 default:
934                   cerr << tmp->get_fileline() << ": error: "
935                           "The expression '" << *pe << "' cannot be implicitly "
936                           "cast to the target type." << endl;
937                   des->errors += 1;
938                   delete tmp;
939                   return 0;
940             }
941             switch (cast_type) {
942                 case IVL_VT_REAL:
943                   tmp = cast_to_real(tmp);
944                   break;
945                 case IVL_VT_BOOL:
946                   tmp = cast_to_int2(tmp, pos_context_width);
947                   break;
948                 case IVL_VT_LOGIC:
949                   tmp = cast_to_int4(tmp, pos_context_width);
950                   break;
951                 default:
952                   break;
953             }
954       }
955 
956 	// If the context_width sent is is actually the minimum width,
957 	// then raise the context_width to be big enough for the
958 	// lossless expression.
959       if (force_expand && context_width > 0) {
960 	    context_width = max(context_width, (int)expr_width);
961       }
962 
963       eval_expr(tmp, context_width);
964 
965       if (NetEConst*ce = dynamic_cast<NetEConst*>(tmp)) {
966             if ((mode >= PExpr::LOSSLESS) && (context_width < 0))
967                   ce->trim();
968       }
969 
970       return tmp;
971 }
972 
elab_and_eval(Design * des,NetScope * scope,PExpr * pe,int context_width,bool need_const,bool annotatable,ivl_variable_type_t cast_type,bool force_unsigned)973 NetExpr* elab_and_eval(Design*des, NetScope*scope, PExpr*pe,
974 		       int context_width, bool need_const, bool annotatable,
975 		       ivl_variable_type_t cast_type, bool force_unsigned)
976 {
977       return do_elab_and_eval(des, scope, pe, context_width,
978 			      need_const, annotatable, false,
979 			      cast_type, force_unsigned);
980 }
981 
982 /*
983  * This variant of elab_and_eval does the expression losslessly, no
984  * matter what the generation of Verilog. This is in support of
985  * certain special contexts, notably index expressions.
986  */
elab_and_eval_lossless(Design * des,NetScope * scope,PExpr * pe,int context_width,bool need_const,bool annotatable,ivl_variable_type_t cast_type)987 NetExpr* elab_and_eval_lossless(Design*des, NetScope*scope, PExpr*pe,
988 				 int context_width, bool need_const, bool annotatable,
989 				 ivl_variable_type_t cast_type)
990 {
991       return do_elab_and_eval(des, scope, pe, context_width,
992 			      need_const, annotatable, true,
993 			      cast_type, false);
994 }
995 
elab_and_eval(Design * des,NetScope * scope,PExpr * pe,ivl_type_t lv_net_type,bool need_const)996 NetExpr* elab_and_eval(Design*des, NetScope*scope, PExpr*pe,
997 		       ivl_type_t lv_net_type, bool need_const)
998 {
999       if (debug_elaborate) {
1000 	    cerr << pe->get_fileline() << ": elab_and_eval: "
1001 		 << "pe=" << *pe
1002 		 << ", lv_net_type=" << *lv_net_type << endl;
1003       }
1004 
1005 	// Elaborate the expression using the more general
1006 	// elaborate_expr method.
1007       unsigned flags = PExpr::NO_FLAGS;
1008       if (need_const)
1009             flags |= PExpr::NEED_CONST;
1010 
1011       NetExpr*tmp = pe->elaborate_expr(des, scope, lv_net_type, flags);
1012       if (tmp == 0) return 0;
1013 
1014       ivl_variable_type_t cast_type = ivl_type_base(lv_net_type);
1015       if ((cast_type != IVL_VT_NO_TYPE) && (cast_type != tmp->expr_type())) {
1016 	      // Catch some special cases.
1017 	    switch (cast_type) {
1018 		case IVL_VT_DARRAY:
1019 		case IVL_VT_QUEUE:
1020 		  if (NetESignal*net = dynamic_cast<NetESignal*>(tmp)) {
1021 			ivl_variable_type_t type = net->expr_type();
1022 			if ((type == IVL_VT_DARRAY) || (type == IVL_VT_QUEUE))
1023 			      return tmp;
1024 		  }
1025 		  if (dynamic_cast<PEAssignPattern*>(pe))
1026 			return tmp;
1027 		  // fall through
1028 		case IVL_VT_STRING:
1029 		  if (dynamic_cast<PEConcat*>(pe))
1030 			return tmp;
1031 		  break;
1032 		case IVL_VT_CLASS:
1033 		  if (dynamic_cast<PENull*>(pe))
1034 			return tmp;
1035 		  break;
1036 		default:
1037 		  break;
1038 	    }
1039 
1040 	    cerr << tmp->get_fileline() << ": error: "
1041 		    "The expression '" << *pe << "' cannot be implicitly "
1042 		    "cast to the target type." << endl;
1043 	    des->errors += 1;
1044 	    delete tmp;
1045 	    return 0;
1046       }
1047 
1048       return tmp;
1049 }
1050 
elab_sys_task_arg(Design * des,NetScope * scope,perm_string name,unsigned arg_idx,PExpr * pe,bool need_const)1051 NetExpr* elab_sys_task_arg(Design*des, NetScope*scope, perm_string name,
1052                            unsigned arg_idx, PExpr*pe, bool need_const)
1053 {
1054       PExpr::width_mode_t mode = PExpr::SIZED;
1055       pe->test_width(des, scope, mode);
1056 
1057       if (debug_elaborate) {
1058             cerr << pe->get_fileline() << ": debug: test_width of "
1059                  << name << " argument " << (arg_idx+1) << " " << *pe << endl;
1060             cerr << pe->get_fileline() << ":        "
1061                  << "returns type=" << pe->expr_type()
1062                  << ", width=" << pe->expr_width()
1063                  << ", signed=" << pe->has_sign()
1064                  << ", mode=" << PExpr::width_mode_name(mode) << endl;
1065       }
1066 
1067       unsigned flags = PExpr::SYS_TASK_ARG;
1068       if (need_const)
1069             flags |= PExpr::NEED_CONST;
1070 
1071       NetExpr*tmp = pe->elaborate_expr(des, scope, pe->expr_width(), flags);
1072       if (tmp == 0) return 0;
1073 
1074       eval_expr(tmp, -1);
1075 
1076       if (NetEConst*ce = dynamic_cast<NetEConst*>(tmp)) {
1077               // For lossless/unsized constant expressions, we can now
1078               // determine the exact width required to hold the result.
1079               // But leave literal numbers exactly as the user supplied
1080               // them.
1081             if ((mode >= PExpr::LOSSLESS) && !dynamic_cast<PENumber*>(pe) && tmp->expr_width()>32)
1082                   ce->trim();
1083       }
1084 
1085       return tmp;
1086 }
1087 
evaluate_range(Design * des,NetScope * scope,const LineInfo * li,const pform_range_t & range,long & index_l,long & index_r)1088 bool evaluate_range(Design*des, NetScope*scope, const LineInfo*li,
1089 		    const pform_range_t&range, long&index_l, long&index_r)
1090 {
1091       bool dimension_ok = true;
1092 
1093         // Unsized and queue dimensions should be handled before calling
1094         // this function. If we find them here, we are in a context where
1095         // they are not allowed.
1096       if (range.first == 0) {
1097             cerr << li->get_fileline() << ": error: "
1098                     "An unsized dimension is not allowed here." << endl;
1099             dimension_ok = false;
1100             des->errors += 1;
1101       } else if (dynamic_cast<PENull*>(range.first)) {
1102             cerr << li->get_fileline() << ": error: "
1103                     "A queue dimension is not allowed here." << endl;
1104             dimension_ok = false;
1105             des->errors += 1;
1106       } else {
1107             NetExpr*texpr = elab_and_eval(des, scope, range.first, -1, true);
1108             if (! eval_as_long(index_l, texpr)) {
1109                   cerr << range.first->get_fileline() << ": error: "
1110                           "Dimensions must be constant." << endl;
1111                   cerr << range.first->get_fileline() << "       : "
1112                        << (range.second ? "This MSB" : "This size")
1113                        << " expression violates the rule: "
1114                        << *range.first << endl;
1115                   dimension_ok = false;
1116                   des->errors += 1;
1117             }
1118             delete texpr;
1119 
1120             if (range.second == 0) {
1121                     // This is a SystemVerilog [size] dimension. The IEEE
1122                     // standard does not allow this in a packed dimension,
1123                     // but we do. At least one commercial simulator does too.
1124                   if (!dimension_ok) {
1125                         // bail out
1126                   } else if (index_l > 0) {
1127                         index_l = index_l - 1;
1128                         index_r = 0;
1129                   } else {
1130                         cerr << range.first->get_fileline() << ": error: "
1131                                 "Dimension size must be greater than zero." << endl;
1132                         cerr << range.first->get_fileline() << "       : "
1133                                 "This size expression violates the rule: "
1134                              << *range.first << endl;
1135                         dimension_ok = false;
1136                         des->errors += 1;
1137                   }
1138             } else {
1139                   texpr = elab_and_eval(des, scope, range.second, -1, true);
1140                   if (! eval_as_long(index_r, texpr)) {
1141                         cerr << range.second->get_fileline() << ": error: "
1142                                 "Dimensions must be constant." << endl;
1143                         cerr << range.second->get_fileline() << "       : "
1144                                 "This LSB expression violates the rule: "
1145                              << *range.second << endl;
1146                         dimension_ok = false;
1147                         des->errors += 1;
1148                   }
1149                   delete texpr;
1150             }
1151       }
1152 
1153         /* Error recovery */
1154       if (!dimension_ok) {
1155             index_l = 0;
1156             index_r = 0;
1157       }
1158 
1159       return dimension_ok;
1160 }
1161 
evaluate_ranges(Design * des,NetScope * scope,const LineInfo * li,vector<netrange_t> & llist,const list<pform_range_t> & rlist)1162 bool evaluate_ranges(Design*des, NetScope*scope, const LineInfo*li,
1163 		     vector<netrange_t>&llist,
1164 		     const list<pform_range_t>&rlist)
1165 {
1166       bool dimensions_ok = true;
1167 
1168       for (list<pform_range_t>::const_iterator cur = rlist.begin()
1169 		 ; cur != rlist.end() ; ++cur) {
1170             long index_l, index_r;
1171             dimensions_ok &= evaluate_range(des, scope, li, *cur, index_l, index_r);
1172             llist.push_back(netrange_t(index_l, index_r));
1173       }
1174 
1175       return dimensions_ok;
1176 }
1177 
eval_expr(NetExpr * & expr,int context_width)1178 void eval_expr(NetExpr*&expr, int context_width)
1179 {
1180       assert(expr);
1181       if (dynamic_cast<NetECReal*>(expr)) return;
1182 
1183       NetExpr*tmp = expr->eval_tree();
1184       if (tmp != 0) {
1185 	    tmp->set_line(*expr);
1186 	    delete expr;
1187 	    expr = tmp;
1188       }
1189 
1190       if (context_width <= 0) return;
1191 
1192       NetEConst *ce = dynamic_cast<NetEConst*>(expr);
1193       if (ce == 0) return;
1194 
1195         // The expression is a constant, so resize it if needed.
1196       if (ce->expr_width() < (unsigned)context_width) {
1197             expr = pad_to_width(expr, context_width, *expr);
1198       } else if (ce->expr_width() > (unsigned)context_width) {
1199             verinum value(ce->value(), context_width);
1200             ce = new NetEConst(value);
1201             ce->set_line(*expr);
1202             delete expr;
1203             expr = ce;
1204       }
1205 }
1206 
eval_as_long(long & value,const NetExpr * expr)1207 bool eval_as_long(long&value, const NetExpr*expr)
1208 {
1209       if (const NetEConst*tmp = dynamic_cast<const NetEConst*>(expr) ) {
1210 	    value = tmp->value().as_long();
1211 	    return true;
1212       }
1213 
1214       if (const NetECReal*rtmp = dynamic_cast<const NetECReal*>(expr)) {
1215 	    value = rtmp->value().as_long();
1216 	    return true;
1217       }
1218 
1219       return false;
1220 }
1221 
eval_as_double(double & value,NetExpr * expr)1222 bool eval_as_double(double&value, NetExpr*expr)
1223 {
1224       if (NetEConst*tmp = dynamic_cast<NetEConst*>(expr) ) {
1225 	    value = tmp->value().as_double();
1226 	    return true;
1227       }
1228 
1229       if (NetECReal*rtmp = dynamic_cast<NetECReal*>(expr)) {
1230 	    value = rtmp->value().as_double();
1231 	    return true;
1232       }
1233 
1234       return false;
1235 }
1236 
1237 /*
1238  * At the parser level, a name component is a name with a collection
1239  * of expressions. For example foo[N] is the name "foo" and the index
1240  * expression "N". This function takes as input the name component and
1241  * returns the path component name. It will evaluate the index
1242  * expression if it is present.
1243  */
eval_path_component(Design * des,NetScope * scope,const name_component_t & comp,bool & error_flag)1244 hname_t eval_path_component(Design*des, NetScope*scope,
1245 			    const name_component_t&comp,
1246 			    bool&error_flag)
1247 {
1248 	// No index expression, so the path component is an undecorated
1249 	// name, for example "foo".
1250       if (comp.index.empty())
1251 	    return hname_t(comp.name);
1252 
1253       vector<int> index_values;
1254 
1255       for (list<index_component_t>::const_iterator cur = comp.index.begin()
1256 		 ; cur != comp.index.end() ; ++cur) {
1257 	    const index_component_t&index = *cur;
1258 
1259 	    if (index.sel != index_component_t::SEL_BIT) {
1260 		  cerr << index.msb->get_fileline() << ": error: "
1261 		       << "Part select is not valid for this kind of object." << endl;
1262 		  des->errors += 1;
1263 		  return hname_t(comp.name, 0);
1264 	    }
1265 
1266 	      // The parser will assure that path components will have only
1267 	      // bit select index expressions. For example, "foo[n]" is OK,
1268 	      // but "foo[n:m]" is not.
1269 	    assert(index.sel == index_component_t::SEL_BIT);
1270 
1271 	      // Evaluate the bit select to get a number.
1272 	    NetExpr*tmp = elab_and_eval(des, scope, index.msb, -1);
1273 	    ivl_assert(*index.msb, tmp);
1274 
1275 	    if (NetEConst*ctmp = dynamic_cast<NetEConst*>(tmp)) {
1276 		  index_values.push_back(ctmp->value().as_long());
1277 		  delete ctmp;
1278 		  continue;
1279 	    }
1280 #if 1
1281 	      // Darn, the expression doesn't evaluate to a constant. That's
1282 	      // an error to be reported. And make up a fake index value to
1283 	      // return to the caller.
1284 	    cerr << index.msb->get_fileline() << ": error: "
1285 		 << "Scope index expression is not constant: "
1286 		 << *index.msb << endl;
1287 	    des->errors += 1;
1288 #endif
1289 	    error_flag = true;
1290 
1291 	    delete tmp;
1292       }
1293 
1294       return hname_t(comp.name, index_values);
1295 }
1296 
eval_scope_path(Design * des,NetScope * scope,const pform_name_t & path)1297 std::list<hname_t> eval_scope_path(Design*des, NetScope*scope,
1298 				   const pform_name_t&path)
1299 {
1300       bool path_error_flag = false;
1301       list<hname_t> res;
1302 
1303       typedef pform_name_t::const_iterator pform_path_it;
1304 
1305       for (pform_path_it cur = path.begin() ; cur != path.end(); ++ cur ) {
1306 	    const name_component_t&comp = *cur;
1307 	    res.push_back( eval_path_component(des,scope,comp,path_error_flag) );
1308       }
1309 #if 0
1310       if (path_error_flag) {
1311 	    cerr << "XXXXX: Errors evaluating path " << path << endl;
1312       }
1313 #endif
1314       return res;
1315 }
1316 
1317 /*
1318  * Human readable version of op. Used in elaboration error messages.
1319  */
human_readable_op(const char op,bool unary)1320 const char *human_readable_op(const char op, bool unary)
1321 {
1322 	const char *type;
1323 	switch (op) {
1324 	    case '~': type = "~";  break;  // Negation
1325 
1326 	    case '+': type = "+";  break;
1327 	    case '-': type = "-";  break;
1328 	    case '*': type = "*";  break;
1329 	    case '/': type = "/";  break;
1330 	    case '%': type = "%";  break;
1331 
1332 	    case '<': type = "<";  break;
1333 	    case '>': type = ">";  break;
1334 	    case 'L': type = "<="; break;
1335 	    case 'G': type = ">="; break;
1336 
1337 	    case '^': type = "^";  break;  // XOR
1338 	    case 'X': type = "~^"; break;  // XNOR
1339 	    case '&': type = "&";  break;  // Bitwise AND
1340 	    case 'A': type = "~&"; break;  // NAND (~&)
1341 	    case '|': type = "|";  break;  // Bitwise OR
1342 	    case 'O': type = "~|"; break;  // NOR
1343 
1344 	    case '!': type = "!"; break;    // Logical NOT
1345 	    case 'a': type = "&&"; break;   // Logical AND
1346 	    case 'o': type = "||"; break;   // Logical OR
1347 	    case 'q': type = "->"; break;   // Logical implication
1348 	    case 'Q': type = "<->"; break;  // Logical equivalence
1349 
1350 	    case 'e': type = "==";  break;
1351 	    case 'n': type = "!=";  break;
1352 	    case 'E': type = "==="; break;  // Case equality
1353 	    case 'N':
1354 		if (unary) type = "~|";     // NOR
1355 		else type = "!==";          // Case inequality
1356 		break;
1357 	    case 'w': type = "==?"; break;  // Wild equality
1358 	    case 'W': type = "!=?"; break;  // Wild inequality
1359 
1360 	    case 'l': type = "<<(<)"; break;  // Left shifts
1361 	    case 'r': type = ">>";    break;  // Logical right shift
1362 	    case 'R': type = ">>>";   break;  // Arithmetic right shift
1363 
1364 	    case 'p': type = "**"; break; // Power
1365 
1366 	    case 'i':
1367 	    case 'I': type = "++"; break; /* increment */
1368 	    case 'd':
1369 	    case 'D': type = "--"; break; /* decrement */
1370 
1371 	    default:
1372 	      type = "???";
1373 	      assert(0);
1374 	}
1375 	return type;
1376 }
1377 
const_logical(const NetExpr * expr)1378 const_bool const_logical(const NetExpr*expr)
1379 {
1380       switch (expr->expr_type()) {
1381 	  case IVL_VT_REAL: {
1382 	    const NetECReal*val = dynamic_cast<const NetECReal*> (expr);
1383 	    if (val == 0) return C_NON;
1384 	    if (val->value().as_double() == 0.0) return C_0;
1385 	    else return C_1;
1386 	  }
1387 
1388 	  case IVL_VT_BOOL:
1389 	  case IVL_VT_LOGIC: {
1390 	    const NetEConst*val = dynamic_cast<const NetEConst*> (expr);
1391 	    if (val == 0) return C_NON;
1392 	    verinum cval = val->value();
1393 	    const_bool res = C_0;
1394 	    for (unsigned idx = 0; idx < cval.len(); idx += 1) {
1395 		  switch (cval.get(idx)) {
1396 		      case verinum::V1:
1397 			return C_1;
1398 			break;
1399 
1400 		      case verinum::V0:
1401 			break;
1402 
1403 		      default:
1404 			if (res == C_0) res = C_X;
1405 			break;
1406 		  }
1407 	    }
1408 	    return res;
1409 	  }
1410 
1411 	  default:
1412 	    break;
1413       }
1414 
1415       return C_NON;
1416 }
1417 
get_scaled_time_from_real(Design * des,NetScope * scope,NetECReal * val)1418 uint64_t get_scaled_time_from_real(Design*des, NetScope*scope, NetECReal*val)
1419 {
1420       verireal fn = val->value();
1421 
1422       int shift = scope->time_unit() - scope->time_precision();
1423       assert(shift >= 0);
1424       int64_t delay = fn.as_long64(shift);
1425 
1426 
1427       shift = scope->time_precision() - des->get_precision();
1428       assert(shift >= 0);
1429       for (int lp = 0; lp < shift; lp += 1) delay *= 10;
1430 
1431       return delay;
1432 }
1433 
1434 /*
1435  * This function looks at the NetNet signal to see if there are any
1436  * NetPartSelect::PV nodes driving this signal. If so, See if they can
1437  * be collapsed into a single concatenation.
1438  */
collapse_partselect_pv_to_concat(Design * des,NetNet * sig)1439 void collapse_partselect_pv_to_concat(Design*des, NetNet*sig)
1440 {
1441       NetScope*scope = sig->scope();
1442       vector<NetPartSelect*> ps_map (sig->vector_width());
1443 
1444       Nexus*nex = sig->pin(0).nexus();
1445 
1446       for (Link*cur = nex->first_nlink(); cur ; cur = cur->next_nlink()) {
1447 	    NetPins*obj;
1448 	    unsigned obj_pin;
1449 	    cur->cur_link(obj, obj_pin);
1450 
1451 	      // Look for NetPartSelect devices, where this signal is
1452 	      // connected to pin 1 of a NetPartSelect::PV.
1453 	    NetPartSelect*ps_obj = dynamic_cast<NetPartSelect*> (obj);
1454 	    if (ps_obj == 0)
1455 		  continue;
1456 	    if (ps_obj->dir() != NetPartSelect::PV)
1457 		  continue;
1458 	    if (obj_pin != 1)
1459 		  continue;
1460 
1461 	      // Don't support overrun selects here.
1462 	    if (ps_obj->base()+ps_obj->width() > ps_map.size())
1463 		  continue;
1464 
1465 	    ivl_assert(*ps_obj, ps_obj->base() < ps_map.size());
1466 	    ps_map[ps_obj->base()] = ps_obj;
1467       }
1468 
1469 	// Check the collected NetPartSelect::PV objects to see if
1470 	// they cover the vector.
1471       unsigned idx = 0;
1472       unsigned device_count = 0;
1473       while (idx < ps_map.size()) {
1474 	    NetPartSelect*ps_obj = ps_map[idx];
1475 	    if (ps_obj == 0)
1476 		  return;
1477 
1478 	    idx += ps_obj->width();
1479 	    device_count += 1;
1480       }
1481 
1482       ivl_assert(*sig, idx == ps_map.size());
1483 
1484 	/* The vlog95 and possibly other code generators do not want
1485 	 * to have a group of part selects turned into a transparent
1486 	 * concatenation. */
1487       if (disable_concatz_generation) {
1488 // HERE: If the part selects have matching strengths then we can use
1489 //       a normal concat with a buf-Z after if the strengths are not
1490 //       both strong. We would ideally delete any buf-Z driving the
1491 //       concat, but that is not required for the vlog95 generator.
1492 	    return;
1493       }
1494 
1495 	// Ah HAH! The NetPartSelect::PV objects exactly cover the
1496 	// target signal. We can replace all of them with a single
1497 	// concatenation.
1498 
1499       if (debug_elaborate) {
1500 	    cerr << sig->get_fileline() << ": debug: "
1501 		 << "Collapse " << device_count
1502 		 << " NetPartSelect::PV devices into a concatenation." << endl;
1503       }
1504 
1505       NetConcat*cat = new NetConcat(scope, scope->local_symbol(),
1506 				    ps_map.size(), device_count,
1507 				    true);
1508       des->add_node(cat);
1509       cat->set_line(*sig);
1510 
1511       connect(cat->pin(0), sig->pin(0));
1512 
1513       idx = 0;
1514       unsigned concat_position = 1;
1515       while (idx < ps_map.size()) {
1516 	    assert(ps_map[idx]);
1517 	    NetPartSelect*ps_obj = ps_map[idx];
1518 	    connect(cat->pin(concat_position), ps_obj->pin(0));
1519 	    concat_position += 1;
1520 	    idx += ps_obj->width();
1521 	    delete ps_obj;
1522       }
1523 }
1524 
1525 /*
1526  * Evaluate the prefix indices. All but the final index in a
1527  * chain of indices must be a single value and must evaluate
1528  * to constants at compile time. For example:
1529  *    [x]          - OK
1530  *    [1][2][x]    - OK
1531  *    [1][x:y]     - OK
1532  *    [2:0][x]     - BAD
1533  *    [y][x]       - BAD
1534  * Leave the last index for special handling.
1535  */
evaluate_index_prefix(Design * des,NetScope * scope,list<long> & prefix_indices,const list<index_component_t> & indices)1536 bool evaluate_index_prefix(Design*des, NetScope*scope,
1537 			   list<long>&prefix_indices,
1538 			   const list<index_component_t>&indices)
1539 {
1540       list<index_component_t>::const_iterator icur = indices.begin();
1541       for (size_t idx = 0 ; (idx+1) < indices.size() ; idx += 1, ++icur) {
1542 	    assert(icur != indices.end());
1543 	    if (icur->sel != index_component_t::SEL_BIT) {
1544 		  cerr << icur->msb->get_fileline() << ": error: "
1545 			"All but the final index in a chain of indices must be "
1546 			"a single value, not a range." << endl;
1547 		  des->errors += 1;
1548 		  return false;
1549 	    }
1550 	    NetExpr*texpr = elab_and_eval(des, scope, icur->msb, -1, true);
1551 
1552 	    long tmp;
1553 	    if (texpr == 0 || !eval_as_long(tmp, texpr)) {
1554 		  cerr << icur->msb->get_fileline() << ": error: "
1555 			"Array index expressions must be constant here." << endl;
1556 		  des->errors += 1;
1557 		  return false;
1558 	    }
1559 
1560 	    prefix_indices.push_back(tmp);
1561 	    delete texpr;
1562       }
1563 
1564       return true;
1565 }
1566 
1567 /*
1568  * Evaluate the indices. The chain of indices are applied to the
1569  * packed indices of a NetNet to generate a canonical expression to
1570  * replace the exprs.
1571  */
collapse_array_exprs(Design * des,NetScope * scope,const LineInfo * loc,NetNet * net,const list<index_component_t> & indices)1572 NetExpr*collapse_array_exprs(Design*des, NetScope*scope,
1573 			     const LineInfo*loc, NetNet*net,
1574 			     const list<index_component_t>&indices)
1575 {
1576 	// First elaborate all the expressions as far as possible.
1577       list<NetExpr*> exprs;
1578       list<long> exprs_const;
1579       indices_flags flags;
1580       indices_to_expressions(des, scope, loc, indices,
1581                              net->packed_dimensions(),
1582                              false, flags, exprs, exprs_const);
1583       ivl_assert(*loc, exprs.size() == net->packed_dimensions());
1584 
1585 	// Special Case: there is only 1 packed dimension, so the
1586 	// single expression should already be naturally canonical.
1587       if (net->slice_width(1) == 1) {
1588 	    return *exprs.begin();
1589       }
1590 
1591       const std::vector<netrange_t>&pdims = net->packed_dims();
1592       std::vector<netrange_t>::const_iterator pcur = pdims.begin();
1593 
1594       list<NetExpr*>::iterator ecur = exprs.begin();
1595       NetExpr* base = 0;
1596       for (size_t idx = 0 ; idx < net->packed_dimensions() ; idx += 1, ++pcur, ++ecur) {
1597 	    unsigned cur_slice_width = net->slice_width(idx+1);
1598 	    long lsb = pcur->get_lsb();
1599 	    long msb = pcur->get_msb();
1600 	      // This normalizes the expression of this index based on
1601 	      // the msb/lsb values.
1602 	    NetExpr*tmp = normalize_variable_base(*ecur, msb, lsb,
1603 						  cur_slice_width, msb > lsb);
1604 
1605 	      // If this slice has width, then scale it.
1606 	    if (net->slice_width(idx+1) != 1) {
1607 		  unsigned min_wid = tmp->expr_width();
1608 		  if (num_bits(cur_slice_width) >= min_wid) {
1609 			min_wid = num_bits(cur_slice_width)+1;
1610 			tmp = pad_to_width(tmp, min_wid, *loc);
1611 		  }
1612 
1613 		  tmp = make_mult_expr(tmp, cur_slice_width);
1614 	    }
1615 
1616 	      // Now add it to the position we've accumulated so far.
1617 	    if (base) {
1618 		  base = make_add_expr(loc, base, tmp);
1619 	    } else {
1620 		  base = tmp;
1621 	    }
1622       }
1623 
1624       return base;
1625 }
1626 
1627 /*
1628  * Given a list of indices, treat them as packed indices and convert
1629  * them to an expression that normalizes the list to a single index
1630  * expression over a canonical equivalent 1-dimensional array.
1631  */
collapse_array_indices(Design * des,NetScope * scope,NetNet * net,const list<index_component_t> & indices)1632 NetExpr*collapse_array_indices(Design*des, NetScope*scope, NetNet*net,
1633 			       const list<index_component_t>&indices)
1634 {
1635       list<long>prefix_indices;
1636       bool rc = evaluate_index_prefix(des, scope, prefix_indices, indices);
1637       assert(rc);
1638 
1639       const index_component_t&back_index = indices.back();
1640       assert(back_index.sel == index_component_t::SEL_BIT);
1641       assert(back_index.msb && !back_index.lsb);
1642 
1643       NetExpr*base = elab_and_eval(des, scope, back_index.msb, -1, true);
1644 
1645       NetExpr*res = normalize_variable_bit_base(prefix_indices, base, net);
1646 
1647       eval_expr(res, -1);
1648       return res;
1649 }
1650 
assign_unpacked_with_bufz(Design * des,NetScope * scope,const LineInfo * loc,NetNet * lval,NetNet * rval)1651 void assign_unpacked_with_bufz(Design*des, NetScope*scope,
1652 			       const LineInfo*loc,
1653 			       NetNet*lval, NetNet*rval)
1654 {
1655       ivl_assert(*loc, lval->pin_count()==rval->pin_count());
1656 
1657       for (unsigned idx = 0 ; idx < lval->pin_count() ; idx += 1) {
1658 	    NetBUFZ*driver = new NetBUFZ(scope, scope->local_symbol(),
1659 					 lval->vector_width(), false);
1660 	    driver->set_line(*loc);
1661 	    des->add_node(driver);
1662 
1663 	    connect(lval->pin(idx), driver->pin(0));
1664 	    connect(driver->pin(1), rval->pin(idx));
1665       }
1666 }
1667 
1668 /*
1669  * synthesis sometimes needs to unpack assignment to a part
1670  * select. That looks like this:
1671  *
1672  *    foo[N] <= <expr> ;
1673  *
1674  * The NetAssignBase::synth_async() method will turn that into a
1675  * netlist like this:
1676  *
1677  *   NetAssignBase(PV) --> base()==<N>
1678  *    (0)      (1)
1679  *     |        |
1680  *     v        v
1681  *   <expr>    foo
1682  *
1683  * This search will return a pointer to the NetAssignBase(PV) object,
1684  * but only if it matches this pattern.
1685  */
detect_partselect_lval(Link & pin)1686 NetPartSelect* detect_partselect_lval(Link&pin)
1687 {
1688       NetPartSelect*found_ps = 0;
1689 
1690       Nexus*nex = pin.nexus();
1691       for (Link*cur = nex->first_nlink() ; cur ; cur = cur->next_nlink()) {
1692 	    NetPins*obj;
1693 	    unsigned obj_pin;
1694 	    cur->cur_link(obj, obj_pin);
1695 
1696 	      // Skip NexusSet objects.
1697 	    if (obj == 0)
1698 		  continue;
1699 
1700 	      // NetNet pins have no effect on this search.
1701 	    if (dynamic_cast<NetNet*> (obj))
1702 		  continue;
1703 
1704 	    if (NetPartSelect*ps = dynamic_cast<NetPartSelect*> (obj)) {
1705 
1706 		    // If this is the input side of a NetPartSelect, skip.
1707 		  if (ps->pin(obj_pin).get_dir()==Link::INPUT)
1708 			continue;
1709 
1710 		    // Oops, driven by the wrong size of a
1711 		    // NetPartSelect, so this is not going to work out.
1712 		  if (ps->dir()==NetPartSelect::VP)
1713 			return 0;
1714 
1715 		    // So now we know this is a NetPartSelect::PV. It
1716 		    // is a candidate for our part-select assign. If
1717 		    // we already have a candidate, then give up.
1718 		  if (found_ps)
1719 			return 0;
1720 
1721 		    // This is our candidate. Carry on.
1722 		  found_ps = ps;
1723 		  continue;
1724 
1725 	    }
1726 
1727 	      // If this is a driver to the Nexus that is not a
1728 	      // NetPartSelect device. This cannot happen to
1729 	      // part selected lval nets, so quit now.
1730 	    if (obj->pin(obj_pin).get_dir() == Link::OUTPUT)
1731 		  return 0;
1732 
1733       }
1734 
1735       return found_ps;
1736 }
1737 
find_class_containing_scope(const LineInfo & loc,const NetScope * scope)1738 const netclass_t* find_class_containing_scope(const LineInfo&loc, const NetScope*scope)
1739 {
1740       while (scope && scope->type() != NetScope::CLASS)
1741 	    scope = scope->parent();
1742 
1743       if (scope == 0)
1744 	    return 0;
1745 
1746       const netclass_t*found_in = scope->class_def();
1747       ivl_assert(loc, found_in);
1748       return found_in;
1749 }
1750 /*
1751  * Find the scope that contains this scope, that is the method for a
1752  * class scope. Look for the scope whose PARENT is the scope for a
1753  * class. This is going to be a method.
1754  */
find_method_containing_scope(const LineInfo &,NetScope * scope)1755 NetScope* find_method_containing_scope(const LineInfo&, NetScope*scope)
1756 {
1757       NetScope*up = scope->parent();
1758 
1759       while (up && up->type() != NetScope::CLASS) {
1760 	    scope = up;
1761 	    up = up->parent();
1762       }
1763 
1764       if (up == 0) return 0;
1765 
1766 	// Should I check if this scope is a TASK or FUNC?
1767 
1768       return scope;
1769 }
1770 
1771 
1772 /*
1773  * Print a warning if we find a mixture of default and explicit timescale
1774  * based delays in the design, since this is likely an error.
1775  */
check_for_inconsistent_delays(NetScope * scope)1776 void check_for_inconsistent_delays(NetScope*scope)
1777 {
1778       static bool used_implicit_timescale = false;
1779       static bool used_explicit_timescale = false;
1780       static bool display_ts_dly_warning = true;
1781 
1782       if (scope->time_from_timescale())
1783 	    used_explicit_timescale = true;
1784       else
1785 	    used_implicit_timescale = true;
1786 
1787       if (display_ts_dly_warning &&
1788 	  used_explicit_timescale &&
1789 	  used_implicit_timescale) {
1790 	    if (gn_system_verilog()) {
1791 		  cerr << "warning: Found both default and explicit "
1792 			  "timescale based delays. Use" << endl;
1793 		  cerr << "       : -Wtimescale to find the design "
1794 			  "element(s) with no explicit" << endl;
1795 		  cerr << "       : timescale." << endl;
1796 	    } else {
1797 		  cerr << "warning: Found both default and "
1798 			  "`timescale based delays. Use" << endl;
1799 		  cerr << "       : -Wtimescale to find the "
1800 			  "module(s) with no `timescale." << endl;
1801 	    }
1802 	    display_ts_dly_warning = false;
1803       }
1804 }
1805