1 /*++
2 Copyright (c) 2006 Microsoft Corporation
3
4 Module Name:
5
6 bit_blaster_tpl_def.h
7
8 Abstract:
9
10 Template for bit-blaster operations
11
12 Author:
13
14 Leonardo de Moura (leonardo) 2011-05-02.
15
16 Revision History:
17
18 --*/
19 #include "util/rational.h"
20 #include "util/common_msgs.h"
21 #include "ast/rewriter/bit_blaster/bit_blaster_tpl.h"
22 #include "ast/ast_pp.h"
23 #include "ast/rewriter/rewriter_types.h"
24
25
26 template<typename Cfg>
checkpoint()27 void bit_blaster_tpl<Cfg>::checkpoint() {
28 if (memory::get_allocation_size() > m_max_memory)
29 throw rewriter_exception(Z3_MAX_MEMORY_MSG);
30 if (!m().inc())
31 throw rewriter_exception(m().limit().get_cancel_msg());
32 }
33
34 /**
35 \brief Return true if all bits are true or false.
36 */
37 template<typename Cfg>
is_numeral(unsigned sz,expr * const * bits)38 bool bit_blaster_tpl<Cfg>::is_numeral(unsigned sz, expr * const * bits) const {
39 for (unsigned i = 0; i < sz; i++)
40 if (!is_bool_const(bits[i]))
41 return false;
42 return true;
43 }
44
45 /**
46 \brief Return true if all bits are true or false, and store the number represent by
47 these bits in r.
48 */
49 template<typename Cfg>
is_numeral(unsigned sz,expr * const * bits,numeral & r)50 bool bit_blaster_tpl<Cfg>::is_numeral(unsigned sz, expr * const * bits, numeral & r) const {
51 r.reset();
52 for (unsigned i = 0; i < sz; i++) {
53 if (m().is_true(bits[i]))
54 r += power(i);
55 else if (!m().is_false(bits[i]))
56 return false;
57 }
58 return true;
59 }
60
61 /**
62 \brief Return true if all bits are true.
63 */
64 template<typename Cfg>
is_minus_one(unsigned sz,expr * const * bits)65 bool bit_blaster_tpl<Cfg>::is_minus_one(unsigned sz, expr * const * bits) const {
66 for (unsigned i = 0; i < sz; i++)
67 if (!m().is_true(bits[i]))
68 return false;
69 return true;
70 }
71
72 // hack to avoid GCC compilation error.
_num2bits(ast_manager & m,rational const & v,unsigned sz,expr_ref_vector & out_bits)73 static void _num2bits(ast_manager & m, rational const & v, unsigned sz, expr_ref_vector & out_bits) {
74 SASSERT(v.is_nonneg());
75 rational aux = v;
76 rational two(2), base32(1ull << 32ull, rational::ui64());
77 for (unsigned i = 0; i < sz; i++) {
78 if (i + 32 < sz) {
79 unsigned u = (aux % base32).get_unsigned();
80 for (unsigned j = 0; j < 32; ++j) {
81 if (0 != (u & (1 << j))) {
82 out_bits.push_back(m.mk_true());
83 }
84 else {
85 out_bits.push_back(m.mk_false());
86 }
87 }
88 aux = div(aux, base32);
89 i += 31;
90 continue;
91 }
92 else if ((aux % two).is_zero())
93 out_bits.push_back(m.mk_false());
94 else
95 out_bits.push_back(m.mk_true());
96 aux = div(aux, two);
97 }
98 }
99
100
101 template<typename Cfg>
num2bits(numeral const & v,unsigned sz,expr_ref_vector & out_bits)102 void bit_blaster_tpl<Cfg>::num2bits(numeral const & v, unsigned sz, expr_ref_vector & out_bits) const {
103 _num2bits(m(), v, sz, out_bits);
104 }
105
106 template<typename Cfg>
mk_half_adder(expr * a,expr * b,expr_ref & out,expr_ref & cout)107 void bit_blaster_tpl<Cfg>::mk_half_adder(expr * a, expr * b, expr_ref & out, expr_ref & cout) {
108 mk_xor(a, b, out);
109 mk_and(a, b, cout);
110 }
111
112 template<typename Cfg>
mk_full_adder(expr * a,expr * b,expr * cin,expr_ref & out,expr_ref & cout)113 void bit_blaster_tpl<Cfg>::mk_full_adder(expr * a, expr * b, expr * cin, expr_ref & out, expr_ref & cout) {
114 mk_xor3(a, b, cin, out);
115 mk_carry(a, b, cin, cout);
116 }
117
118 template<typename Cfg>
mk_neg(unsigned sz,expr * const * a_bits,expr_ref_vector & out_bits)119 void bit_blaster_tpl<Cfg>::mk_neg(unsigned sz, expr * const * a_bits, expr_ref_vector & out_bits) {
120 SASSERT(sz > 0);
121 expr_ref cin(m()), cout(m()), out(m());
122 cin = m().mk_true();
123 for (unsigned idx = 0; idx < sz; idx++) {
124 expr_ref not_a(m());
125 mk_not(a_bits[idx], not_a);
126 if (idx < sz - 1)
127 mk_half_adder(not_a, cin, out, cout);
128 else
129 mk_xor(not_a, cin, out);
130 out_bits.push_back(out);
131 cin = cout;
132 }
133 }
134
135 template<typename Cfg>
mk_adder(unsigned sz,expr * const * a_bits,expr * const * b_bits,expr_ref_vector & out_bits)136 void bit_blaster_tpl<Cfg>::mk_adder(unsigned sz, expr * const * a_bits, expr * const * b_bits, expr_ref_vector & out_bits) {
137 SASSERT(sz > 0);
138 expr_ref cin(m()), cout(m()), out(m());
139 cin = m().mk_false();
140 for (unsigned idx = 0; idx < sz; idx++) {
141 if (idx < sz - 1)
142 mk_full_adder(a_bits[idx], b_bits[idx], cin, out, cout);
143 else
144 mk_xor3(a_bits[idx], b_bits[idx], cin, out);
145 out_bits.push_back(out);
146 cin = cout;
147 }
148
149 #if 0
150 static unsigned counter = 0;
151 counter++;
152 verbose_stream() << "MK_ADDER: " << counter << std::endl;
153 #endif
154 }
155
156 template<typename Cfg>
mk_subtracter(unsigned sz,expr * const * a_bits,expr * const * b_bits,expr_ref_vector & out_bits,expr_ref & cout)157 void bit_blaster_tpl<Cfg>::mk_subtracter(unsigned sz, expr * const * a_bits, expr * const * b_bits, expr_ref_vector & out_bits, expr_ref & cout) {
158 SASSERT(sz > 0);
159 expr_ref cin(m()), out(m());
160 cin = m().mk_true();
161 for (unsigned j = 0; j < sz; j++) {
162 expr_ref not_b(m());
163 mk_not(b_bits[j], not_b);
164 mk_full_adder(a_bits[j], not_b, cin, out, cout);
165 out_bits.push_back(out);
166 cin = cout;
167 }
168 #if 0
169 for (unsigned j = 0; j < sz; ++j) {
170 std::cout << j << "\n";
171 std::cout << mk_pp(a_bits[j], m()) << "\n";
172 std::cout << mk_pp(b_bits[j], m()) << "\n";
173 std::cout << mk_pp(out_bits.get(j), m()) << "\n";
174 }
175 #endif
176 SASSERT(out_bits.size() == sz);
177 }
178
179 template<typename Cfg>
mk_multiplier(unsigned sz,expr * const * a_bits,expr * const * b_bits,expr_ref_vector & out_bits)180 void bit_blaster_tpl<Cfg>::mk_multiplier(unsigned sz, expr * const * a_bits, expr * const * b_bits, expr_ref_vector & out_bits) {
181 SASSERT(sz > 0);
182 numeral n_a, n_b;
183 out_bits.reset();
184 if (is_numeral(sz, a_bits, n_b))
185 std::swap(a_bits, b_bits);
186 if (is_minus_one(sz, b_bits)) {
187 mk_neg(sz, a_bits, out_bits);
188 SASSERT(sz == out_bits.size());
189 return;
190 }
191 if (is_numeral(sz, a_bits, n_a)) {
192 n_a *= n_b;
193 num2bits(n_a, sz, out_bits);
194 SASSERT(sz == out_bits.size());
195 return;
196 }
197
198 if (mk_const_case_multiplier(sz, a_bits, b_bits, out_bits)) {
199 SASSERT(sz == out_bits.size());
200 return;
201 }
202 out_bits.reset();
203 #if 0
204 static unsigned counter = 0;
205 counter++;
206 verbose_stream() << "MK_MULTIPLIER: " << counter << std::endl;
207 #endif
208
209 expr_ref_vector cins(m()), couts(m());
210 expr_ref out(m()), cout(m());
211
212 mk_and(a_bits[0], b_bits[0], out);
213 out_bits.push_back(out);
214
215 /*
216 out = a*b is encoded using the following circuit.
217
218 a[0]&b[0] a[0]&b[1] a[0]&b[2] a[0]&b[3] ...
219 | | | |
220 | a[1]&b[0] - HA a[1]&b[1] - HA a[1]&b[2] - HA
221 | | \ | \ | \
222 | | --------------- | -------------- | --- ...
223 | | \| \
224 | | a[2]&b[0] - FA a[2]&b[1] - FA
225 | | | \ | \
226 | | | -------------- | -- ...
227 | | | \|
228 | | | a[3]&b[0] - FA
229 | | | | \
230 | | | | -- ....
231 ... ... ... ...
232 out[0] out[1] out[2] out[3]
233
234 HA denotes a half-adder.
235 FA denotes a full-adder.
236 */
237
238 for (unsigned i = 1; i < sz; i++) {
239 checkpoint();
240 couts.reset();
241 expr_ref i1(m()), i2(m());
242 mk_and(a_bits[0], b_bits[i], i1);
243 mk_and(a_bits[1], b_bits[i - 1], i2);
244 if (i < sz - 1) {
245 mk_half_adder(i1, i2, out, cout);
246 couts.push_back(cout);
247 for (unsigned j = 2; j <= i; j++) {
248 expr_ref prev_out(m());
249 prev_out = out;
250 expr_ref i3(m());
251 mk_and(a_bits[j], b_bits[i - j], i3);
252 mk_full_adder(i3, prev_out, cins.get(j - 2), out, cout);
253 couts.push_back(cout);
254 }
255 out_bits.push_back(out);
256 cins.swap(couts);
257 }
258 else {
259 // last step --> I don't need to generate/store couts.
260 mk_xor(i1, i2, out);
261 for (unsigned j = 2; j <= i; j++) {
262 expr_ref i3(m());
263 mk_and(a_bits[j], b_bits[i - j], i3);
264 mk_xor3(i3, out, cins.get(j - 2), out);
265 }
266 out_bits.push_back(out);
267 }
268 }
269
270 }
271
272
273 template<typename Cfg>
mk_umul_no_overflow(unsigned sz,expr * const * a_bits,expr * const * b_bits,expr_ref & result)274 void bit_blaster_tpl<Cfg>::mk_umul_no_overflow(unsigned sz, expr* const* a_bits, expr* const* b_bits, expr_ref& result) {
275 SASSERT(sz > 0);
276 expr_ref zero(m());
277 zero = m().mk_false();
278 ptr_buffer<expr, 128> ext_a_bits;
279 ptr_buffer<expr, 128> ext_b_bits;
280 ext_a_bits.append(sz, a_bits);
281 ext_b_bits.append(sz, b_bits);
282 ext_a_bits.push_back(zero);
283 ext_b_bits.push_back(zero);
284 SASSERT(ext_a_bits.size() == 1 + sz);
285 SASSERT(ext_b_bits.size() == 1 + sz);
286 expr_ref_vector mult_cout(m());
287 //
288 // mk_multiplier will simplify output taking into account that
289 // the most significant bits of ext_a_bits and ext_b_bits are zero.
290 //
291 mk_multiplier(1 + sz, ext_a_bits.data(), ext_b_bits.data(), mult_cout);
292 expr_ref overflow1(m()), overflow2(m()), overflow(m());
293 //
294 // ignore bits [0, sz-1] of mult_cout
295 //
296 overflow1 = mult_cout[sz].get();
297
298 expr_ref ovf(m()), v(m()), tmp(m());
299 ovf = m().mk_false();
300 v = m().mk_false();
301 for (unsigned i = 1; i < sz; ++i) {
302 mk_or(ovf, a_bits[sz-i], ovf);
303 mk_and(ovf, b_bits[i], tmp);
304 mk_or(tmp, v, v);
305 }
306
307 overflow2 = v;
308 mk_or(overflow1, overflow2, overflow);
309 mk_not(overflow, result);
310 }
311
312 template<typename Cfg>
mk_smul_no_overflow_core(unsigned sz,expr * const * a_bits,expr * const * b_bits,bool is_overflow,expr_ref & result)313 void bit_blaster_tpl<Cfg>::mk_smul_no_overflow_core(unsigned sz, expr * const * a_bits, expr * const * b_bits, bool is_overflow, expr_ref & result) {
314 SASSERT(sz > 0);
315 expr_ref zero(m());
316 zero = m().mk_false();
317 ptr_buffer<expr,128> ext_a_bits;
318 ptr_buffer<expr,128> ext_b_bits;
319 ext_a_bits.append(sz, a_bits);
320 ext_b_bits.append(sz, b_bits);
321 ext_a_bits.push_back(a_bits[sz-1]);
322 ext_b_bits.push_back(b_bits[sz-1]);
323 SASSERT(ext_a_bits.size() == 1 + sz);
324 SASSERT(ext_b_bits.size() == 1 + sz);
325 expr_ref_vector mult_cout(m());
326 mk_multiplier(1 + sz, ext_a_bits.data(), ext_b_bits.data(), mult_cout);
327 expr_ref overflow1(m()), overflow2(m()), overflow(m());
328
329 //
330 // The two most significant bits are different.
331 //
332 mk_xor(mult_cout[sz].get(), mult_cout[sz-1].get(), overflow1);
333
334 //
335 // let
336 // a_i = a[sz-1] xor a[i]
337 // b_i = b[sz-1] xor b[i]
338 // a_acc_i = a_{sz-2} or ... or a_{sz-1-i}
339 // b = (a_acc_1 and b_1) or (a_acc_2 and b_2) or ... or (a_acc_{n-2} and b_{n-2})
340 //
341 expr_ref v(m()), tmp(m()), a(m()), b(m()), a_acc(m()), sign(m());
342 a_acc = m().mk_false();
343 v = m().mk_false();
344 for (unsigned i = 1; i + 1 < sz; ++i) {
345 mk_xor(b_bits[sz-1], b_bits[i], b);
346 mk_xor(a_bits[sz-1], a_bits[sz-1-i], a);
347 mk_or(a, a_acc, a_acc);
348 mk_and(a_acc, b, tmp);
349 mk_or(tmp, v, v);
350 }
351
352 overflow2 = v;
353 mk_or(overflow1, overflow2, overflow);
354
355 if (is_overflow) {
356 // check for proper overflow
357 // can only happen when the sign bits are the same.
358 mk_iff(a_bits[sz-1], b_bits[sz-1], sign);
359 }
360 else {
361 // check for proper underflow
362 // can only happen when the sign bits are different.
363 mk_xor(a_bits[sz-1], b_bits[sz-1], sign);
364 }
365 mk_and(sign, overflow, overflow);
366 mk_not(overflow, result);
367 }
368
369 template<typename Cfg>
mk_smul_no_overflow(unsigned sz,expr * const * a_bits,expr * const * b_bits,expr_ref & result)370 void bit_blaster_tpl<Cfg>::mk_smul_no_overflow(unsigned sz, expr * const * a_bits, expr * const * b_bits, expr_ref & result) {
371 mk_smul_no_overflow_core(sz, a_bits, b_bits, true, result);
372 }
373
374 template<typename Cfg>
mk_smul_no_underflow(unsigned sz,expr * const * a_bits,expr * const * b_bits,expr_ref & result)375 void bit_blaster_tpl<Cfg>::mk_smul_no_underflow(unsigned sz, expr * const * a_bits, expr * const * b_bits, expr_ref & result) {
376 mk_smul_no_overflow_core(sz, a_bits, b_bits, false, result);
377 }
378
379 template<typename Cfg>
mk_udiv_urem(unsigned sz,expr * const * a_bits,expr * const * b_bits,expr_ref_vector & q_bits,expr_ref_vector & r_bits)380 void bit_blaster_tpl<Cfg>::mk_udiv_urem(unsigned sz, expr * const * a_bits, expr * const * b_bits, expr_ref_vector & q_bits, expr_ref_vector & r_bits) {
381 SASSERT(sz > 0);
382
383 // p is the residual of each stage of the division.
384 expr_ref_vector & p = r_bits;
385
386 // t is an auxiliary vector used to store the result of a subtraction
387 expr_ref_vector t(m());
388
389 // init p
390 p.push_back(a_bits[sz-1]);
391 for (unsigned i = 1; i < sz; i++)
392 p.push_back(m().mk_false());
393
394 q_bits.resize(sz);
395
396 for (unsigned i = 0; i < sz; i++) {
397 checkpoint();
398 // generate p - b
399 expr_ref q(m());
400 t.reset();
401 mk_subtracter(sz, p.data(), b_bits, t, q);
402 q_bits.set(sz - i - 1, q);
403
404 // update p
405 if (i < sz - 1) {
406 for (unsigned j = sz - 1; j > 0; j--) {
407 expr_ref ie(m());
408 mk_ite(q, t.get(j-1), p.get(j-1), ie);
409 p.set(j, ie);
410 }
411 p.set(0, a_bits[sz - i - 2]);
412 }
413 else {
414 // last step: p contains the remainder
415 for (unsigned j = 0; j < sz; j++) {
416 expr_ref ie(m());
417 mk_ite(q, t.get(j), p.get(j), ie);
418 p.set(j, ie);
419 }
420 }
421 }
422 DEBUG_CODE({
423 for (unsigned i = 0; i < sz; i++) {
424 SASSERT(q_bits.get(i) != 0);
425 }});
426 TRACE("bit_blaster",
427 tout << "a: ";
428 for (unsigned i = 0; i < sz; ++i) tout << mk_pp(a_bits[i], m()) << " ";
429 tout << "\nb: ";
430 for (unsigned i = 0; i < sz; ++i) tout << mk_pp(b_bits[i], m()) << " ";
431 tout << "\nq: ";
432 for (unsigned i = 0; i < sz; ++i) tout << mk_pp(q_bits[i].get(), m()) << " ";
433 tout << "\nr: ";
434 for (unsigned i = 0; i < sz; ++i) tout << mk_pp(r_bits[i].get(), m()) << " ";
435 tout << "\n";
436 );
437 }
438
439 template<typename Cfg>
mk_udiv(unsigned sz,expr * const * a_bits,expr * const * b_bits,expr_ref_vector & q_bits)440 void bit_blaster_tpl<Cfg>::mk_udiv(unsigned sz, expr * const * a_bits, expr * const * b_bits, expr_ref_vector & q_bits) {
441 expr_ref_vector aux(m());
442 mk_udiv_urem(sz, a_bits, b_bits, q_bits, aux);
443 }
444
445 template<typename Cfg>
mk_urem(unsigned sz,expr * const * a_bits,expr * const * b_bits,expr_ref_vector & r_bits)446 void bit_blaster_tpl<Cfg>::mk_urem(unsigned sz, expr * const * a_bits, expr * const * b_bits, expr_ref_vector & r_bits) {
447 expr_ref_vector aux(m());
448 mk_udiv_urem(sz, a_bits, b_bits, aux, r_bits);
449 }
450
451 template<typename Cfg>
mk_multiplexer(expr * c,unsigned sz,expr * const * t_bits,expr * const * e_bits,expr_ref_vector & out_bits)452 void bit_blaster_tpl<Cfg>::mk_multiplexer(expr * c, unsigned sz, expr * const * t_bits, expr * const * e_bits, expr_ref_vector & out_bits) {
453 for (unsigned i = 0; i < sz; i++) {
454 expr_ref t(m());
455 mk_ite(c, t_bits[i], e_bits[i], t);
456 out_bits.push_back(t);
457 }
458 }
459
460 template<typename Cfg>
mk_abs(unsigned sz,expr * const * a_bits,expr_ref_vector & out_bits)461 void bit_blaster_tpl<Cfg>::mk_abs(unsigned sz, expr * const * a_bits, expr_ref_vector & out_bits) {
462 expr * a_msb = a_bits[sz - 1];
463 if (m().is_false(a_msb)) {
464 out_bits.append(sz, a_bits);
465 }
466 else if (m().is_true(a_msb)) {
467 mk_neg(sz, a_bits, out_bits);
468 }
469 else {
470 expr_ref_vector neg_a_bits(m());
471 mk_neg(sz, a_bits, neg_a_bits);
472 mk_multiplexer(a_msb, sz, neg_a_bits.data(), a_bits, out_bits);
473 }
474 }
475
476 #define SDIV 0
477 #define SREM 1
478 #define SMOD 2
479
480 template<typename Cfg>
481 template<unsigned k>
mk_sdiv_srem_smod(unsigned sz,expr * const * a_bits,expr * const * b_bits,expr_ref_vector & out_bits)482 void bit_blaster_tpl<Cfg>::mk_sdiv_srem_smod(unsigned sz, expr * const * a_bits, expr * const * b_bits, expr_ref_vector & out_bits) {
483 // This definition is only good when the most significant bits are set.
484 // Otherwise, it will create 4 copies of the expensive sdiv/srem/smod
485 SASSERT(k == SDIV || k == SREM || k == SMOD);
486
487 expr * a_msb = a_bits[sz - 1];
488 expr * b_msb = b_bits[sz - 1];
489
490 expr_ref_vector neg_a_bits(m());
491 expr_ref_vector neg_b_bits(m());
492
493 mk_neg(sz, a_bits, neg_a_bits);
494 mk_neg(sz, b_bits, neg_b_bits);
495
496 expr_ref_vector pp_q(m()), pp_r(m()), pn_q(m()), pn_r(m()), np_q(m()), np_r(m()), nn_q(m()), nn_r(m());
497
498 if (!m().is_true(a_msb) && !m().is_true(b_msb)) {
499 mk_udiv_urem(sz, a_bits, b_bits, pp_q, pp_r);
500 }
501 else {
502 pp_q.resize(sz, m().mk_false());
503 pp_r.resize(sz, m().mk_false());
504 }
505
506 if (!m().is_false(a_msb) && !m().is_true(b_msb)) {
507 mk_udiv_urem(sz, neg_a_bits.data(), b_bits, np_q, np_r);
508 }
509 else {
510 np_q.resize(sz, m().mk_false());
511 np_r.resize(sz, m().mk_false());
512 }
513
514 if (!m().is_true(a_msb) && !m().is_false(b_msb)) {
515 mk_udiv_urem(sz, a_bits, neg_b_bits.data(), pn_q, pn_r);
516 }
517 else {
518 pn_q.resize(sz, m().mk_false());
519 pn_r.resize(sz, m().mk_false());
520 }
521
522 if (!m().is_false(a_msb) && !m().is_false(b_msb)) {
523 mk_udiv_urem(sz, neg_a_bits.data(), neg_b_bits.data(), nn_q, nn_r);
524 }
525 else {
526 nn_q.resize(sz, m().mk_false());
527 nn_r.resize(sz, m().mk_false());
528 }
529
530 expr_ref_vector ite1(m()), ite2(m());
531 if (k == SDIV) {
532 expr_ref_vector & pp_out = pp_q;
533 expr_ref_vector np_out(m());
534 expr_ref_vector pn_out(m());
535 expr_ref_vector & nn_out = nn_q;
536
537 if (!m().is_false(a_msb) && !m().is_true(b_msb))
538 mk_neg(sz, np_q.data(), np_out);
539 else
540 np_out.resize(sz, m().mk_false());
541
542 if (!m().is_true(a_msb) && !m().is_false(b_msb))
543 mk_neg(sz, pn_q.data(), pn_out);
544 else
545 pn_out.resize(sz, m().mk_false());
546
547 #define MK_MULTIPLEXER() \
548 mk_multiplexer(b_msb, sz, nn_out.data(), np_out.data(), ite1); \
549 mk_multiplexer(b_msb, sz, pn_out.data(), pp_out.data(), ite2); \
550 mk_multiplexer(a_msb, sz, ite1.data(), ite2.data(), out_bits)
551
552 MK_MULTIPLEXER();
553 }
554 else if (k == SREM) {
555 expr_ref_vector & pp_out = pp_r;
556 expr_ref_vector np_out(m());
557 expr_ref_vector & pn_out = pn_r;
558 expr_ref_vector nn_out(m());
559
560 if (!m().is_false(a_msb) && !m().is_true(b_msb))
561 mk_neg(sz, np_r.data(), np_out);
562 else
563 np_out.resize(sz, m().mk_false());
564
565 if (!m().is_false(a_msb) && !m().is_false(b_msb))
566 mk_neg(sz, nn_r.data(), nn_out);
567 else
568 nn_out.resize(sz, m().mk_false());
569 MK_MULTIPLEXER();
570 }
571 else {
572 SASSERT(k == SMOD);
573 expr_ref_vector & pp_out = pp_r;
574 expr_ref_vector np_out(m());
575 expr_ref_vector pn_out(m());
576 expr_ref_vector nn_out(m());
577
578 if (!m().is_false(a_msb) && !m().is_true(b_msb)) {
579 expr_ref cout(m());
580 mk_subtracter(sz, b_bits, np_r.data(), np_out, cout);
581 }
582 else
583 np_out.resize(sz, m().mk_false());
584
585 if (!m().is_true(a_msb) && !m().is_false(b_msb))
586 mk_adder(sz, b_bits, pn_r.data(), pn_out);
587 else
588 pn_out.resize(sz, m().mk_false());
589
590 if (!m().is_false(a_msb) && !m().is_false(b_msb))
591 mk_neg(sz, nn_r.data(), nn_out);
592 else
593 nn_out.resize(sz, m().mk_false());
594
595 MK_MULTIPLEXER();
596 }
597 }
598
599 template<typename Cfg>
mk_sdiv(unsigned sz,expr * const * a_bits,expr * const * b_bits,expr_ref_vector & out_bits)600 void bit_blaster_tpl<Cfg>::mk_sdiv(unsigned sz, expr * const * a_bits, expr * const * b_bits, expr_ref_vector & out_bits) {
601 expr * a_msb = a_bits[sz - 1];
602 expr * b_msb = b_bits[sz - 1];
603 if (m().is_false(a_msb) && m().is_false(b_msb)) {
604 mk_udiv(sz, a_bits, b_bits, out_bits);
605 }
606 else if (m().is_false(a_msb) && m().is_true(b_msb)) {
607 expr_ref_vector neg_b_bits(m());
608 mk_neg(sz, b_bits, neg_b_bits);
609 expr_ref_vector tmp(m());
610 mk_udiv(sz, a_bits, neg_b_bits.data(), tmp);
611 mk_neg(sz, tmp.data(), out_bits);
612 }
613 else if (m().is_true(a_msb) && m().is_false(b_msb)) {
614 expr_ref_vector neg_a_bits(m());
615 mk_neg(sz, a_bits, neg_a_bits);
616 expr_ref_vector tmp(m());
617 mk_udiv(sz, neg_a_bits.data(), b_bits, tmp);
618 mk_neg(sz, tmp.data(), out_bits);
619 }
620 else if (m().is_true(a_msb) && m().is_true(b_msb)) {
621 expr_ref_vector neg_a_bits(m());
622 mk_neg(sz, a_bits, neg_a_bits);
623 expr_ref_vector neg_b_bits(m());
624 mk_neg(sz, b_bits, neg_b_bits);
625 mk_udiv(sz, neg_a_bits.data(), neg_b_bits.data(), out_bits);
626 }
627 else {
628 #if 0
629 // creates 4 dividers
630 mk_sdiv_srem_smod<SDIV>(sz, a_bits, b_bits, out_bits);
631 #else
632 // creates only 1
633 expr_ref_vector abs_a_bits(m());
634 expr_ref_vector abs_b_bits(m());
635 mk_abs(sz, a_bits, abs_a_bits);
636 mk_abs(sz, b_bits, abs_b_bits);
637 expr_ref_vector udiv_bits(m());
638 mk_udiv(sz, abs_a_bits.data(), abs_b_bits.data(), udiv_bits);
639 expr_ref_vector neg_udiv_bits(m());
640 mk_neg(sz, udiv_bits.data(), neg_udiv_bits);
641 expr_ref c(m());
642 mk_iff(a_msb, b_msb, c);
643 mk_multiplexer(c, sz, udiv_bits.data(), neg_udiv_bits.data(), out_bits);
644 #endif
645 }
646 }
647
648 template<typename Cfg>
mk_srem(unsigned sz,expr * const * a_bits,expr * const * b_bits,expr_ref_vector & out_bits)649 void bit_blaster_tpl<Cfg>::mk_srem(unsigned sz, expr * const * a_bits, expr * const * b_bits, expr_ref_vector & out_bits) {
650 expr * a_msb = a_bits[sz - 1];
651 expr * b_msb = b_bits[sz - 1];
652 if (m().is_false(a_msb) && m().is_false(b_msb)) {
653 mk_urem(sz, a_bits, b_bits, out_bits);
654 }
655 else if (m().is_false(a_msb) && m().is_true(b_msb)) {
656 expr_ref_vector neg_b_bits(m());
657 mk_neg(sz, b_bits, neg_b_bits);
658 mk_urem(sz, a_bits, neg_b_bits.data(), out_bits);
659 }
660 else if (m().is_true(a_msb) && m().is_false(b_msb)) {
661 expr_ref_vector neg_a_bits(m());
662 mk_neg(sz, a_bits, neg_a_bits);
663 expr_ref_vector tmp(m());
664 mk_urem(sz, neg_a_bits.data(), b_bits, tmp);
665 mk_neg(sz, tmp.data(), out_bits);
666 }
667 else if (m().is_true(a_msb) && m().is_true(b_msb)) {
668 expr_ref_vector neg_a_bits(m());
669 mk_neg(sz, a_bits, neg_a_bits);
670 expr_ref_vector neg_b_bits(m());
671 mk_neg(sz, b_bits, neg_b_bits);
672 expr_ref_vector tmp(m());
673 mk_urem(sz, neg_a_bits.data(), neg_b_bits.data(), tmp);
674 mk_neg(sz, tmp.data(), out_bits);
675 }
676 else {
677 #if 0
678 // creates 4 urem
679 mk_sdiv_srem_smod<SREM>(sz, a_bits, b_bits, out_bits);
680 #else
681 // creates only 1
682 expr_ref_vector abs_a_bits(m());
683 expr_ref_vector abs_b_bits(m());
684 mk_abs(sz, a_bits, abs_a_bits);
685 mk_abs(sz, b_bits, abs_b_bits);
686 expr_ref_vector urem_bits(m());
687 numeral n_b;
688 unsigned shift;
689 // a urem 2^n -> a & ((2^n)-1)
690 if (is_numeral(sz, abs_b_bits.data(), n_b) && n_b.is_power_of_two(shift)) {
691 mk_zero_extend(shift, abs_a_bits.data(), sz - shift, urem_bits);
692 } else {
693 mk_urem(sz, abs_a_bits.data(), abs_b_bits.data(), urem_bits);
694 }
695 expr_ref_vector neg_urem_bits(m());
696 mk_neg(sz, urem_bits.data(), neg_urem_bits);
697 mk_multiplexer(a_msb, sz, neg_urem_bits.data(), urem_bits.data(), out_bits);
698 #endif
699 }
700 }
701
702 /**
703 \brief Generate circuit for signed mod.
704
705 This function implements the semantics of bvsmod given below for two bits:
706
707 (define-fun bvsmod_def ((s (_ BitVec 2)) (t (_ BitVec 2))) (_ BitVec 2)
708 (let ((msb_s ((_ extract 1 1) s))
709 (msb_t ((_ extract 1 1) t)))
710 (let ((abs_s (ite (= msb_s #b0) s (bvneg s)))
711 (abs_t (ite (= msb_t #b0) t (bvneg t))))
712 (let ((u (bvurem abs_s abs_t)))
713 (ite (= u (_ bv0 2))
714 u
715 (ite (and (= msb_s #b0) (= msb_t #b0))
716 u
717 (ite (and (= msb_s #b1) (= msb_t #b0))
718 (bvadd (bvneg u) t)
719 (ite (and (= msb_s #b0) (= msb_t #b1))
720 (bvadd u t)
721 (bvneg u)))))))))
722
723 Note: The semantics is sensitive to the order of these tests.
724 It is unsound to test first for whether the most significant
725 bits of s and t are known and use the cases for those. If
726 u is 0 then the result is 0.
727 */
728
729 template<typename Cfg>
mk_smod(unsigned sz,expr * const * a_bits,expr * const * b_bits,expr_ref_vector & out_bits)730 void bit_blaster_tpl<Cfg>::mk_smod(unsigned sz, expr * const * a_bits, expr * const * b_bits, expr_ref_vector & out_bits) {
731 expr * a_msb = a_bits[sz - 1];
732 expr * b_msb = b_bits[sz - 1];
733
734 expr_ref_vector abs_a_bits(m());
735 expr_ref_vector abs_b_bits(m());
736 mk_abs(sz, a_bits, abs_a_bits);
737 mk_abs(sz, b_bits, abs_b_bits);
738 expr_ref_vector u_bits(m());
739 mk_urem(sz, abs_a_bits.data(), abs_b_bits.data(), u_bits);
740 expr_ref_vector neg_u_bits(m());
741 mk_neg(sz, u_bits.data(), neg_u_bits);
742 expr_ref_vector neg_u_add_b(m());
743 mk_adder(sz, neg_u_bits.data(), b_bits, neg_u_add_b);
744 expr_ref_vector u_add_b(m());
745 mk_adder(sz, u_bits.data(), b_bits, u_add_b);
746 expr_ref_vector zero(m());
747 num2bits(numeral(0), sz, zero);
748 expr_ref u_eq_0(m());
749 mk_eq(sz, u_bits.data(), zero.data(), u_eq_0);
750
751 expr_ref_vector & pp_bits = u_bits; // pos & pos case
752 expr_ref_vector & pn_bits = u_add_b; // pos & neg case
753 expr_ref_vector & np_bits = neg_u_add_b; // neg & pos case
754 expr_ref_vector & nn_bits = neg_u_bits; // neg & neg case
755
756 expr_ref_vector ite1(m());
757 expr_ref_vector ite2(m());
758 expr_ref_vector body(m());
759 mk_multiplexer(b_msb, sz, nn_bits.data(), np_bits.data(), ite1);
760 mk_multiplexer(b_msb, sz, pn_bits.data(), pp_bits.data(), ite2);
761 mk_multiplexer(a_msb, sz, ite1.data(), ite2.data(), body);
762 mk_multiplexer(u_eq_0, sz, u_bits.data(), body.data(), out_bits);
763
764 }
765
766 template<typename Cfg>
mk_eq(unsigned sz,expr * const * a_bits,expr * const * b_bits,expr_ref & out)767 void bit_blaster_tpl<Cfg>::mk_eq(unsigned sz, expr * const * a_bits, expr * const * b_bits, expr_ref & out) {
768 expr_ref_vector out_bits(m());
769 for (unsigned i = 0; i < sz; i++) {
770 mk_iff(a_bits[i], b_bits[i], out);
771 out_bits.push_back(out);
772 }
773 mk_and(out_bits.size(), out_bits.data(), out);
774 }
775
776 template<typename Cfg>
mk_rotate_left(unsigned sz,expr * const * a_bits,unsigned n,expr_ref_vector & out_bits)777 void bit_blaster_tpl<Cfg>::mk_rotate_left(unsigned sz, expr * const * a_bits, unsigned n, expr_ref_vector & out_bits) {
778 TRACE("bit_blaster", tout << n << ": " << sz << " ";
779 for (unsigned i = 0; i < sz; ++i) {
780 tout << mk_pp(a_bits[i], m()) << " ";
781 }
782 tout << "\n";
783 );
784 n = n % sz;
785 for (unsigned i = sz - n; i < sz; i++)
786 out_bits.push_back(a_bits[i]);
787 for (unsigned i = 0 ; i < sz - n; i++)
788 out_bits.push_back(a_bits[i]);
789 }
790
791 template<typename Cfg>
mk_rotate_right(unsigned sz,expr * const * a_bits,unsigned n,expr_ref_vector & out_bits)792 void bit_blaster_tpl<Cfg>::mk_rotate_right(unsigned sz, expr * const * a_bits, unsigned n, expr_ref_vector & out_bits) {
793 n = n % sz;
794 mk_rotate_left(sz, a_bits, sz - n, out_bits);
795 }
796
797 template<typename Cfg>
mk_sign_extend(unsigned sz,expr * const * a_bits,unsigned n,expr_ref_vector & out_bits)798 void bit_blaster_tpl<Cfg>::mk_sign_extend(unsigned sz, expr * const * a_bits, unsigned n, expr_ref_vector & out_bits) {
799 for (unsigned i = 0; i < sz; i++)
800 out_bits.push_back(a_bits[i]);
801 expr * high_bit = a_bits[sz - 1];
802 for (unsigned i = sz; i < sz + n; i++)
803 out_bits.push_back(high_bit);
804 }
805
806 template<typename Cfg>
mk_zero_extend(unsigned sz,expr * const * a_bits,unsigned n,expr_ref_vector & out_bits)807 void bit_blaster_tpl<Cfg>::mk_zero_extend(unsigned sz, expr * const * a_bits, unsigned n, expr_ref_vector & out_bits) {
808 for (unsigned i = 0; i < sz; i++)
809 out_bits.push_back(a_bits[i]);
810 expr * high_bit = m().mk_false();
811 for (unsigned i = sz; i < sz + n; i++)
812 out_bits.push_back(high_bit);
813 }
814
815 /**
816 \brief Return an expression that is true iff a_bits represents the number n.
817 */
818 template<typename Cfg>
mk_is_eq(unsigned sz,expr * const * a_bits,unsigned n,expr_ref & out)819 void bit_blaster_tpl<Cfg>::mk_is_eq(unsigned sz, expr * const * a_bits, unsigned n, expr_ref & out) {
820 numeral two(2);
821 expr_ref_vector out_bits(m());
822 for (unsigned i = 0; i < sz; i++) {
823 if (n % 2 == 0) {
824 expr_ref not_a(m());
825 mk_not(a_bits[i], not_a);
826 out_bits.push_back(not_a);
827 }
828 else {
829 out_bits.push_back(a_bits[i]);
830 }
831 n = n / 2;
832 }
833 mk_and(out_bits.size(), out_bits.data(), out);
834 }
835
836 /**
837 \brief Store in eqs the equalities a_bits = 0, a_bits = 1, ..., a_bits = sz -1.
838 */
839 template<typename Cfg>
mk_eqs(unsigned sz,expr * const * a_bits,expr_ref_vector & eqs)840 void bit_blaster_tpl<Cfg>::mk_eqs(unsigned sz, expr * const * a_bits, expr_ref_vector & eqs) {
841 for (unsigned i = 0; i < sz; i++) {
842 expr_ref eq(m());
843 mk_is_eq(sz, a_bits, i, eq);
844 eqs.push_back(eq);
845 }
846 }
847
848 template<typename Cfg>
mk_shl(unsigned sz,expr * const * a_bits,expr * const * b_bits,expr_ref_vector & out_bits)849 void bit_blaster_tpl<Cfg>::mk_shl(unsigned sz, expr * const * a_bits, expr * const * b_bits, expr_ref_vector & out_bits) {
850 numeral k;
851 if (is_numeral(sz, b_bits, k)) {
852 if (k > numeral(sz)) k = numeral(sz);
853 unsigned n = static_cast<unsigned>(k.get_int64());
854 if (n >= sz) n = sz;
855 unsigned pos;
856 for (pos = 0; pos < n; pos++)
857 out_bits.push_back(m().mk_false());
858 for (unsigned i = 0; pos < sz; pos++, i++)
859 out_bits.push_back(a_bits[i]);
860 }
861 else {
862 out_bits.append(sz, a_bits);
863
864 unsigned i = 0;
865 expr_ref_vector new_out_bits(m());
866 for (; i < sz; ++i) {
867 checkpoint();
868 unsigned shift_i = 1 << i;
869 if (shift_i >= sz) break;
870 for (unsigned j = 0; j < sz; ++j) {
871 expr_ref new_out(m());
872 expr* a_j = m().mk_false();
873 if (shift_i <= j) a_j = out_bits[j-shift_i].get();
874 mk_ite(b_bits[i], a_j, out_bits[j].get(), new_out);
875 new_out_bits.push_back(new_out);
876 }
877 out_bits.reset();
878 out_bits.append(new_out_bits);
879 new_out_bits.reset();
880 }
881 expr_ref is_large(m());
882 is_large = m().mk_false();
883 for (; i < sz; ++i) {
884 mk_or(is_large, b_bits[i], is_large);
885 }
886 for (unsigned j = 0; j < sz; ++j) {
887 expr_ref new_out(m());
888 mk_ite(is_large, m().mk_false(), out_bits[j].get(), new_out);
889 out_bits[j] = new_out;
890 }
891 }
892 }
893
894 template<typename Cfg>
mk_lshr(unsigned sz,expr * const * a_bits,expr * const * b_bits,expr_ref_vector & out_bits)895 void bit_blaster_tpl<Cfg>::mk_lshr(unsigned sz, expr * const * a_bits, expr * const * b_bits, expr_ref_vector & out_bits) {
896 numeral k;
897 if (is_numeral(sz, b_bits, k)) {
898 if (k > numeral(sz)) k = numeral(sz);
899 unsigned n = static_cast<unsigned>(k.get_int64());
900 unsigned pos = 0;
901 for (unsigned i = n; i < sz; pos++, i++)
902 out_bits.push_back(a_bits[i]);
903 for (; pos < sz; pos++)
904 out_bits.push_back(m().mk_false());
905 }
906 else {
907 out_bits.append(sz, a_bits);
908 unsigned i = 0;
909 for (; i < sz; ++i) {
910 checkpoint();
911 expr_ref_vector new_out_bits(m());
912 unsigned shift_i = 1 << i;
913 if (shift_i >= sz) break;
914 for (unsigned j = 0; j < sz; ++j) {
915 expr_ref new_out(m());
916 expr* a_j = m().mk_false();
917 if (shift_i + j < sz) a_j = out_bits[j+shift_i].get();
918 mk_ite(b_bits[i], a_j, out_bits[j].get(), new_out);
919 new_out_bits.push_back(new_out);
920 }
921 out_bits.reset();
922 out_bits.append(new_out_bits);
923 }
924 expr_ref is_large(m());
925 is_large = m().mk_false();
926 for (; i < sz; ++i) {
927 mk_or(is_large, b_bits[i], is_large);
928 }
929 for (unsigned j = 0; j < sz; ++j) {
930 expr_ref new_out(m());
931 mk_ite(is_large, m().mk_false(), out_bits[j].get(), new_out);
932 out_bits[j] = new_out;
933 }
934 }
935 }
936
937 template<typename Cfg>
mk_ashr(unsigned sz,expr * const * a_bits,expr * const * b_bits,expr_ref_vector & out_bits)938 void bit_blaster_tpl<Cfg>::mk_ashr(unsigned sz, expr * const * a_bits, expr * const * b_bits, expr_ref_vector & out_bits) {
939 numeral k;
940 if (is_numeral(sz, b_bits, k)) {
941 if (k > numeral(sz)) k = numeral(sz);
942 unsigned n = static_cast<unsigned>(k.get_int64());
943 unsigned pos = 0;
944 for (unsigned i = n; i < sz; pos++, i++)
945 out_bits.push_back(a_bits[i]);
946 for (; pos < sz; pos++)
947 out_bits.push_back(a_bits[sz-1]);
948 }
949 else {
950 out_bits.append(sz, a_bits);
951 unsigned i = 0;
952 for (; i < sz; ++i) {
953 checkpoint();
954 expr_ref_vector new_out_bits(m());
955 unsigned shift_i = 1 << i;
956 if (shift_i >= sz) break;
957 for (unsigned j = 0; j < sz; ++j) {
958 expr_ref new_out(m());
959 expr* a_j = a_bits[sz-1];
960 if (shift_i + j < sz) a_j = out_bits[j+shift_i].get();
961 mk_ite(b_bits[i], a_j, out_bits[j].get(), new_out);
962 new_out_bits.push_back(new_out);
963 }
964 out_bits.reset();
965 out_bits.append(new_out_bits);
966 }
967 expr_ref is_large(m());
968 is_large = m().mk_false();
969 for (; i < sz; ++i) {
970 mk_or(is_large, b_bits[i], is_large);
971 }
972 for (unsigned j = 0; j < sz; ++j) {
973 expr_ref new_out(m());
974 mk_ite(is_large, a_bits[sz-1], out_bits[j].get(), new_out);
975 out_bits[j] = new_out;
976 }
977 }
978 }
979
980 template<typename Cfg>
981 template<bool Left>
mk_ext_rotate_left_right(unsigned sz,expr * const * a_bits,expr * const * b_bits,expr_ref_vector & out_bits)982 void bit_blaster_tpl<Cfg>::mk_ext_rotate_left_right(unsigned sz, expr * const * a_bits, expr * const * b_bits, expr_ref_vector & out_bits) {
983 numeral k;
984 if (is_numeral(sz, b_bits, k) && k.is_unsigned()) {
985 if (Left)
986 mk_rotate_left(sz, a_bits, static_cast<unsigned>(k.get_uint64()), out_bits);
987 else
988 mk_rotate_right(sz, a_bits, static_cast<unsigned>(k.get_uint64()), out_bits);
989 }
990 else {
991 //
992 // Review: a better tuned implementation is possible by using shifts by power of two.
993 // e.g., looping over the bits of b_bits, then rotate by a power of two depending
994 // on the bit-position. This would get rid of the mk_urem.
995 //
996 expr_ref_vector sz_bits(m());
997 expr_ref_vector masked_b_bits(m());
998 expr_ref_vector eqs(m());
999 numeral sz_numeral(sz);
1000 num2bits(sz_numeral, sz, sz_bits);
1001 mk_urem(sz, b_bits, sz_bits.data(), masked_b_bits);
1002 mk_eqs(sz, masked_b_bits.data(), eqs);
1003 for (unsigned i = 0; i < sz; i++) {
1004 checkpoint();
1005 expr_ref out(m());
1006 out = a_bits[i];
1007 for (unsigned j = 1; j < sz; j++) {
1008 expr_ref new_out(m());
1009 unsigned src = (Left ? (sz + i - j) : (i + j)) % sz;
1010 mk_ite(eqs.get(j), a_bits[src], out, new_out);
1011 out = new_out;
1012 }
1013 out_bits.push_back(out);
1014 }
1015 }
1016 }
1017
1018 template<typename Cfg>
mk_ext_rotate_left(unsigned sz,expr * const * a_bits,expr * const * b_bits,expr_ref_vector & out_bits)1019 void bit_blaster_tpl<Cfg>::mk_ext_rotate_left(unsigned sz, expr * const * a_bits, expr * const * b_bits, expr_ref_vector & out_bits) {
1020 mk_ext_rotate_left_right<true>(sz, a_bits, b_bits, out_bits);
1021 }
1022
1023 template<typename Cfg>
mk_ext_rotate_right(unsigned sz,expr * const * a_bits,expr * const * b_bits,expr_ref_vector & out_bits)1024 void bit_blaster_tpl<Cfg>::mk_ext_rotate_right(unsigned sz, expr * const * a_bits, expr * const * b_bits, expr_ref_vector & out_bits) {
1025 mk_ext_rotate_left_right<false>(sz, a_bits, b_bits, out_bits);
1026 }
1027
1028 template<typename Cfg>
1029 template<bool Signed>
mk_le(unsigned sz,expr * const * a_bits,expr * const * b_bits,expr_ref & out)1030 void bit_blaster_tpl<Cfg>::mk_le(unsigned sz, expr * const * a_bits, expr * const * b_bits, expr_ref & out) {
1031 SASSERT(sz > 0);
1032 expr_ref not_a(m());
1033 mk_not(a_bits[0], not_a);
1034 mk_or(not_a, b_bits[0], out);
1035 for (unsigned idx = 1; idx < (Signed ? sz - 1 : sz); idx++) {
1036 mk_not(a_bits[idx], not_a);
1037 mk_ge2(not_a, b_bits[idx], out, out);
1038 }
1039 if (Signed) {
1040 expr_ref not_b(m());
1041 mk_not(b_bits[sz-1], not_b);
1042 mk_ge2(not_b, a_bits[sz-1], out, out);
1043 }
1044 }
1045
1046 template<typename Cfg>
mk_sle(unsigned sz,expr * const * a_bits,expr * const * b_bits,expr_ref & out)1047 void bit_blaster_tpl<Cfg>::mk_sle(unsigned sz, expr * const * a_bits, expr * const * b_bits, expr_ref & out) {
1048 mk_le<true>(sz, a_bits, b_bits, out);
1049 }
1050
1051 template<typename Cfg>
mk_ule(unsigned sz,expr * const * a_bits,expr * const * b_bits,expr_ref & out)1052 void bit_blaster_tpl<Cfg>::mk_ule(unsigned sz, expr * const * a_bits, expr * const * b_bits, expr_ref & out) {
1053 mk_le<false>(sz, a_bits, b_bits, out);
1054 }
1055
1056 template<typename Cfg>
mk_not(unsigned sz,expr * const * a_bits,expr_ref_vector & out_bits)1057 void bit_blaster_tpl<Cfg>::mk_not(unsigned sz, expr * const * a_bits, expr_ref_vector & out_bits) {
1058 for (unsigned i = 0; i < sz; i++) {
1059 expr_ref t(m());
1060 mk_not(a_bits[i], t);
1061 out_bits.push_back(t);
1062 }
1063 }
1064
1065 #define MK_BINARY(NAME, OP) \
1066 template<typename Cfg> \
1067 void bit_blaster_tpl<Cfg>::NAME(unsigned sz, expr * const * a_bits, expr * const * b_bits, expr_ref_vector & out_bits) { \
1068 for (unsigned i = 0; i < sz; i++) { \
1069 expr_ref t(m()); \
1070 OP(a_bits[i], b_bits[i], t); \
1071 out_bits.push_back(t); \
1072 } \
1073 }
1074
1075 MK_BINARY(mk_and, mk_and);
1076 MK_BINARY(mk_or, mk_or);
1077 MK_BINARY(mk_xor, mk_xor);
1078 MK_BINARY(mk_xnor, mk_iff);
1079 MK_BINARY(mk_nand, mk_nand);
1080 MK_BINARY(mk_nor, mk_nor);
1081
1082 template<typename Cfg>
mk_redand(unsigned sz,expr * const * a_bits,expr_ref_vector & out_bits)1083 void bit_blaster_tpl<Cfg>::mk_redand(unsigned sz, expr * const * a_bits, expr_ref_vector & out_bits) {
1084 expr_ref tmp(m());
1085 mk_and(sz, a_bits, tmp);
1086 out_bits.push_back(tmp);
1087 }
1088
1089 template<typename Cfg>
mk_redor(unsigned sz,expr * const * a_bits,expr_ref_vector & out_bits)1090 void bit_blaster_tpl<Cfg>::mk_redor(unsigned sz, expr * const * a_bits, expr_ref_vector & out_bits) {
1091 expr_ref tmp(m());
1092 mk_or(sz, a_bits, tmp);
1093 out_bits.push_back(tmp);
1094 }
1095
1096 template<typename Cfg>
mk_comp(unsigned sz,expr * const * a_bits,expr * const * b_bits,expr_ref_vector & out_bits)1097 void bit_blaster_tpl<Cfg>::mk_comp(unsigned sz, expr * const * a_bits, expr * const * b_bits, expr_ref_vector & out_bits) {
1098 expr_ref tmp(m());
1099 mk_eq(sz, a_bits, b_bits, tmp);
1100 out_bits.push_back(tmp);
1101 }
1102
1103 template<typename Cfg>
mk_carry_save_adder(unsigned sz,expr * const * a_bits,expr * const * b_bits,expr * const * c_bits,expr_ref_vector & sum_bits,expr_ref_vector & carry_bits)1104 void bit_blaster_tpl<Cfg>::mk_carry_save_adder(unsigned sz, expr * const * a_bits, expr * const * b_bits, expr * const * c_bits, expr_ref_vector & sum_bits, expr_ref_vector & carry_bits) {
1105 expr_ref t(m());
1106 for (unsigned i = 0; i < sz; i++) {
1107 mk_xor3(a_bits[i], b_bits[i], c_bits[i], t);
1108 sum_bits.push_back(t);
1109 mk_carry(a_bits[i], b_bits[i], c_bits[i], t);
1110 carry_bits.push_back(t);
1111 }
1112 }
1113
1114 template<typename Cfg>
mk_const_case_multiplier(unsigned sz,expr * const * a_bits,expr * const * b_bits,expr_ref_vector & out_bits)1115 bool bit_blaster_tpl<Cfg>::mk_const_case_multiplier(unsigned sz, expr * const * a_bits, expr * const * b_bits, expr_ref_vector & out_bits) {
1116 unsigned case_size = 1;
1117 unsigned circuit_size = sz*sz*5;
1118 for (unsigned i = 0; case_size < circuit_size && i < sz; ++i) {
1119 if (!is_bool_const(a_bits[i]))
1120 case_size *= 2;
1121 if (!is_bool_const(b_bits[i]))
1122 case_size *= 2;
1123 }
1124 if (case_size >= circuit_size)
1125 return false;
1126
1127 SASSERT(out_bits.empty());
1128 ptr_buffer<expr, 128> na_bits;
1129 na_bits.append(sz, a_bits);
1130 ptr_buffer<expr, 128> nb_bits;
1131 nb_bits.append(sz, b_bits);
1132 mk_const_case_multiplier(true, 0, sz, na_bits, nb_bits, out_bits);
1133 return true;
1134 }
1135
1136 template<typename Cfg>
mk_const_case_multiplier(bool is_a,unsigned i,unsigned sz,ptr_buffer<expr,128> & a_bits,ptr_buffer<expr,128> & b_bits,expr_ref_vector & out_bits)1137 void bit_blaster_tpl<Cfg>::mk_const_case_multiplier(bool is_a, unsigned i, unsigned sz, ptr_buffer<expr, 128>& a_bits, ptr_buffer<expr, 128>& b_bits, expr_ref_vector & out_bits) {
1138 while (is_a && i < sz && is_bool_const(a_bits[i])) ++i;
1139 if (is_a && i == sz) {
1140 is_a = false;
1141 i = 0;
1142 }
1143 while (!is_a && i < sz && is_bool_const(b_bits[i]))
1144 ++i;
1145 if (i < sz) {
1146 expr_ref_vector out1(m()), out2(m());
1147 expr_ref x(m());
1148 x = is_a?a_bits[i]:b_bits[i];
1149 if (is_a) a_bits[i] = m().mk_true(); else b_bits[i] = m().mk_true();
1150 mk_const_case_multiplier(is_a, i+1, sz, a_bits, b_bits, out1);
1151 if (is_a) a_bits[i] = m().mk_false(); else b_bits[i] = m().mk_false();
1152 mk_const_case_multiplier(is_a, i+1, sz, a_bits, b_bits, out2);
1153 if (is_a) a_bits[i] = x; else b_bits[i] = x;
1154 SASSERT(out_bits.empty());
1155 expr_ref bit(m());
1156 for (unsigned j = 0; j < sz; ++j) {
1157 mk_ite(x, out1.get(j), out2.get(j), bit);
1158 out_bits.push_back(bit);
1159 }
1160 }
1161 else {
1162 numeral n_a, n_b;
1163 SASSERT(i == sz && !is_a);
1164 VERIFY(is_numeral(sz, a_bits.data(), n_a));
1165 VERIFY(is_numeral(sz, b_bits.data(), n_b));
1166 n_a *= n_b;
1167 num2bits(n_a, sz, out_bits);
1168 }
1169 SASSERT(out_bits.size() == sz);
1170 }
1171