1 /*
2  * Copyright (c) 2005 - 2010, Nils R. Weller
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  * notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  * notice, this list of conditions and the following disclaimer in the
13  * documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 #include "reg.h"
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <assert.h>
32 #include <limits.h>
33 #include "scope.h"
34 #include "decl.h"
35 #include "type.h"
36 #include "functions.h"
37 #include "control.h"
38 #include "debug.h"
39 #include "token.h"
40 #include "error.h"
41 #include "functions.h"
42 #include "icode.h"
43 #include "stack.h"
44 #include "zalloc.h"
45 #include "reg.h"
46 #include "backend.h"
47 #include "subexpr.h"
48 #include "cc1_main.h"
49 #include "features.h"
50 #include "n_libc.h"
51 
52 int
reg_unused(struct reg * r)53 reg_unused(struct reg *r) {
54 	struct reg	**comp;
55 	int		i;
56 
57 	if (r->used) return 0;
58 
59 	comp = r->composed_of;
60 	if (comp == NULL) {
61 		return 1;
62 	}
63 	for (i = 0; comp[i] != NULL; ++i) {
64 		if (!reg_unused(comp[i])) {
65 			return 0;
66 		}
67 	}
68 	return 1;
69 }
70 
71 int
reg_allocatable(struct reg * r)72 reg_allocatable(struct reg *r) {
73 	struct reg	**comp;
74 	int		i;
75 
76 	if (!r->allocatable) return 0;
77 	comp = r->composed_of;
78 	if (comp == NULL) {
79 		return 1;
80 	}
81 	if (r->size == 8) {
82 		/* AMD64 */
83 		return reg_allocatable(r->composed_of[0]);
84 	}
85 
86 	for (i = 0; comp[i] != NULL; ++i) {
87 		if (!reg_unused(comp[i])
88 			&& !reg_allocatable(comp[i])) {
89 			return 0;
90 		}
91 	}
92 	return 1;
93 }
94 
95 
96 void
reg_set_allocatable(struct reg * r)97 reg_set_allocatable(struct reg *r) {
98 	/*
99 	 * XXX for some reason gpr0 always becomes allocatable
100 	 * somewhere :-(, and stuff like   stw foo, 0(0) breaks.
101 	 * Here's a kludge to fix this for now.
102 	 * To debug this, avoid calling free_preg() on gpr0 in
103 	 * change_preg_size(), and then throw in an abort() in
104 	 * free_preg() if the operand is ever gpr0
105 	 *
106 	 *          ---> To hell with PowerPC!!! <---
107 	 */
108 	if (r == &power_gprs[0]) {
109 		return;
110 	}
111 #ifdef DEBUG6
112 	debug_log_regstuff(r, r->vreg, DEBUG_LOG_ALLOCATABLE);
113 #endif
114 	r->allocatable = 1;
115 }
116 
117 void
reg_set_unallocatable(struct reg * r)118 reg_set_unallocatable(struct reg *r) {
119 #ifdef DEBUG6
120 	debug_log_regstuff(r, r->vreg, DEBUG_LOG_UNALLOCATABLE);
121 #endif
122 	r->allocatable = 0;
123 }
124 
125 /*
126  * 11/26/08: Mark register as dedicated to avoid it ever being used for
127  * register allocation and saving (e.g. for stack pointer/frame pointer
128  * registers)
129  */
130 void
reg_set_dedicated(struct reg * r)131 reg_set_dedicated(struct reg *r) {
132 	reg_set_unallocatable(r);
133 	r->used = 0;
134 	r->dedicated = 1;
135 }
136 
137 int
is_x87_trash(struct vreg * vr)138 is_x87_trash(struct vreg *vr) {
139 	if (sysflag == OS_OSX) {
140 		if (vr->type->code != TY_LDOUBLE) {
141 			return 0;
142 		}
143 	}
144 	if (is_floating_type(vr->type)
145 		&& (backend->arch == ARCH_X86
146 			|| (vr->type->code == TY_LDOUBLE
147 				&& backend->arch == ARCH_AMD64))) {
148 		return 1;
149 	}
150 	return 0;
151 }
152 
153 void
vreg_set_unallocatable(struct vreg * vr)154 vreg_set_unallocatable(struct vreg *vr) {
155 	if (vr->pregs[0] && vr->pregs[0]->vreg == vr) {
156 		reg_set_unallocatable(vr->pregs[0]);
157 		if (vr->is_multi_reg_obj) {
158 			reg_set_unallocatable(vr->pregs[1]);
159 		}
160 	}
161 	if (vr->from_ptr != NULL
162 		&& vr->from_ptr->pregs[0] != NULL
163 		&& vr->from_ptr->pregs[0]->vreg == vr->from_ptr) {
164 		reg_set_unallocatable(vr->from_ptr->pregs[0]);
165 	} else if (vr->parent) {
166 		struct vreg	*vr2 = get_parent_struct(vr);
167 
168 		if (vr2->from_ptr
169 			&& vr2->from_ptr->pregs[0]
170 			&& vr2->from_ptr->pregs[0]->vreg == vr2->from_ptr) {
171 			reg_set_unallocatable(vr2->from_ptr->pregs[0]);
172 		}
173 	}
174 }
175 
176 void
vreg_set_allocatable(struct vreg * vr)177 vreg_set_allocatable(struct vreg *vr) {
178 	if (vr->pregs[0] && vr->pregs[0]->vreg == vr) {
179 		reg_set_allocatable(vr->pregs[0]);
180 		if (vr->is_multi_reg_obj) {
181 			reg_set_allocatable(vr->pregs[1]);
182 		}
183 	}
184 	if (vr->from_ptr != NULL
185 		&& vr->from_ptr->pregs[0] != NULL
186 		&& vr->from_ptr->pregs[0]->vreg == vr->from_ptr) {
187 		reg_set_allocatable(vr->from_ptr->pregs[0]);
188 	} else if (vr->parent) {
189 		struct vreg	*vr2 = get_parent_struct(vr);
190 
191 		if (vr2->from_ptr
192 			&& vr2->from_ptr->pregs[0]
193 			&& vr2->from_ptr->pregs[0]->vreg == vr2->from_ptr) {
194 			reg_set_allocatable(vr2->from_ptr->pregs[0]);
195 		}
196 	}
197 }
198 
199 static struct vreg *
alloc_vreg(void)200 alloc_vreg(void) {
201 	struct vreg	*res;
202 
203 #if USE_ZONE_ALLOCATOR
204 	res = zalloc_buf(Z_VREG);
205 #else
206 	struct vreg	*ret = n_xmalloc(sizeof *ret);
207 	static struct vreg	nullvreg;
208 	*ret = nullvreg;
209 	res = ret;
210 #endif
211 
212 #if VREG_SEQNO
213 	{
214 		static int	seqno;
215 		res->seqno = ++seqno;
216 	}
217 #endif
218 	return res;
219 }
220 
221 struct vreg *
dup_vreg(struct vreg * vr)222 dup_vreg(struct vreg *vr) {
223 	struct vreg	*ret = alloc_vreg();
224 #if VREG_SEQNO
225 	int		saved_seqno = vr->seqno;
226 #endif
227 	*ret = *vr;
228 #if VREG_SEQNO
229 	ret->seqno = saved_seqno;
230 #endif
231 
232 	return ret;
233 }
234 
235 
236 struct vreg *poi;
237 
238 
239 struct vreg *
vreg_alloc(struct decl * dec,struct token * constant,struct vreg * from_ptr,struct type * ty0)240 vreg_alloc(struct decl *dec, struct token *constant, struct vreg *from_ptr,
241 	struct type *ty0) {
242 
243 	struct vreg	*vreg;
244 	struct type	*ty;
245 
246 	vreg = alloc_vreg();
247 
248 	if (dec != NULL) {
249 		/* Variable-backed */
250 		vreg->var_backed = dec;
251 		/*
252 		 * 05/26/11: Don't set size member to 0 for pointers to VLAs
253 		 * (int (*p)[N]) because those do have a known size - the
254 		 * standard pointer size
255 		 */
256 		if (IS_VLA(dec->dtype->flags)
257 			&& is_immediate_vla_type(dec->dtype)) {
258 			vreg->size = 0;
259 		} else {
260 			/* 07/09/10: Array has ptr size as far as vregs are
261 			 * concerned */
262 			if (dec->dtype->tlist != NULL) {
263 				vreg->size = backend->get_ptr_size();
264 			} else {
265 				vreg->size = backend->get_sizeof_type(dec->dtype, NULL);
266 			}
267 		}
268 		vreg->type = dec->dtype;
269 	} else if (from_ptr != NULL) {
270 		vreg->from_ptr = from_ptr;
271 
272 		/* Get size of what pointer points to */
273 		ty = n_xmemdup(from_ptr->type, sizeof *ty);
274 		copy_tlist(&ty->tlist, ty->tlist->next);
275 		vreg->type = ty;
276 
277 		if (IS_VLA(ty->flags) && is_immediate_vla_type(ty)) {
278 			vreg->size = 0;
279 		} else {
280 			if (ty->tlist != NULL) {
281 				vreg->size = backend->get_ptr_size();
282 			} else {
283 				vreg->size = backend->get_sizeof_type(ty, NULL);
284 			}
285 		}
286 	} else if (constant != NULL) {
287 		/* Const-backed */
288 		vreg->from_const = constant;
289 		vreg->size = backend->get_sizeof_const(constant);
290 		vreg->type = n_xmalloc(sizeof *vreg->type);
291 		if (constant->type == TOK_STRING_LITERAL) {
292 			struct ty_string	*ts = constant->data;
293 			copy_type(vreg->type, ts->ty, 0);
294 		} else {
295 			struct type	*ty =
296 				make_basic_type(constant->type);
297 
298 			copy_type(vreg->type, ty, 0);
299 			if (vreg->type->code == TY_INT
300 				|| vreg->type->code == TY_LONG
301 				|| vreg->type->code == TY_LLONG) {
302 				vreg->type->sign = TOK_KEY_SIGNED;
303 			} else if (vreg->type->code == TY_UINT
304 				|| vreg->type->code == TY_ULONG
305 				|| vreg->type->code == TY_ULLONG) {
306 				vreg->type->sign = TOK_KEY_UNSIGNED;
307 			}
308 			vreg->is_nullptr_const = is_nullptr_const(constant, ty);
309 		}
310 	} else if (ty0 != NULL) {
311 		vreg->type = ty0;
312 		if (ty0->code != TY_VOID || ty0->tlist != NULL) {
313 			if (IS_VLA(ty0->flags) && is_immediate_vla_type(ty0)) {
314 				vreg->size = 0;
315 			} else if (ty0->tlist != NULL) {
316 				/*
317 				 * 07/09/10: This was missing. When we are
318 				 * dealing with arrays, we probably want the
319 				 * pointer size instead of the array size. See
320 				 * do_struct_member()
321 				 */
322 				vreg->size = backend->get_ptr_size();
323 			} else {
324 				vreg->size = backend->get_sizeof_type(ty0, NULL);
325 			}
326 		} else {
327 			vreg->size = 0;
328 		}
329 	} else {
330 		/*
331 		 * Anonymous register. Will be saved on stack if it
332 		 * gets ``faulted in'', is modified, and then needs
333 		 * to be saved again
334 		 */
335 		;
336 		vreg->size = backend->get_ptr_size(); /* XXX hm */
337 	}
338 	vreg->pregs[0] = NULL; /* No physical register yet */
339 	if (vreg->type != NULL) {
340 		vreg->is_multi_reg_obj
341 			= backend->is_multi_reg_obj(vreg->type);
342 	}
343 
344 	return vreg;
345 }
346 
347 
348 /*
349  * 08/11/08: Basic routine to actually anonymify a vreg which is already
350  * disconnected and register-resident, but may still be backed by something
351  * else as well.
352  *
353  * vreg_disconnect() + vreg_do_anonymify() is probably what we had intended
354  * for vreg_anonymify();
355  */
356 void
vreg_do_anonymify(struct vreg * vr)357 vreg_do_anonymify(struct vreg *vr) {
358 	vr->var_backed = NULL;
359 	vr->from_const = NULL;
360 	vr->from_ptr = NULL;
361 	vr->parent = NULL;
362 	vr->stack_addr = NULL;
363 }
364 
365 
366 void
vreg_anonymify(struct vreg ** vr,struct reg * r,struct reg * r2,struct icode_list * il)367 vreg_anonymify(struct vreg **vr, struct reg *r,
368 		struct reg *r2, struct icode_list *il) {
369 	struct vreg	*tmp;
370 
371 
372 	if (is_x87_trash(*vr)) {
373 	}
374 
375 
376 	if ((*vr)->var_backed
377 		|| (*vr)->from_const
378 		|| (*vr)->from_ptr
379 		|| (*vr)->parent
380 		|| (*vr)->stack_addr
381 		) {
382 #if VREG_SEQNO
383 		static int	saved_seqno;
384 #endif
385 
386 		vreg_faultin_x87(r, r2, *vr, il, 0);
387 
388 		tmp = vreg_alloc(NULL, NULL, NULL, (*vr)->type);
389 
390 
391 
392 #if VREG_SEQNO
393 		saved_seqno = tmp->seqno;
394 #endif
395 		*tmp = **vr;
396 #if VREG_SEQNO
397 		tmp->seqno = saved_seqno;
398 #endif
399 
400 		/*
401 		 * 11/02/07: The stuff below was missing. When
402 		 * anonymifying an array, it must decay into a
403 		 * pointer. Otherwise reloading it will go wrong.
404 		 * This is a VERY fundamental bug which I'm
405 		 * surprised to find, and which needs verification
406 		 * and testing
407 		 */
408 		if (tmp->type->tlist != NULL
409 			&& (tmp->type->tlist->type == TN_ARRAY_OF
410 			|| tmp->type->tlist->type == TN_VARARRAY_OF)) {
411 			int	is_vla;
412 
413 			is_vla = tmp->type->tlist->type == TN_VARARRAY_OF;
414 
415 			tmp->type = n_xmemdup(tmp->type,
416 				 sizeof *tmp->type);
417 			copy_tlist(&tmp->type->tlist, tmp->type->tlist);
418 			tmp->type->tlist->type = TN_POINTER_TO;
419 
420 			if (is_vla) {
421 				/*
422 				 * 05/20/11: XXX
423 				 */
424 				tmp->size = backend->get_ptr_size();
425 			} else {
426 				tmp->size = backend->get_sizeof_type(
427 					tmp->type, NULL);
428 			}
429 		} else if (IS_THREAD(tmp->type->flags)) {
430 			tmp->type = n_xmemdup(tmp->type, sizeof *tmp->type);
431 			tmp->type->flags &= ~FLAGS_THREAD;
432 		}
433 
434 		vreg_map_preg(tmp, tmp->pregs[0]);
435 		if (tmp->pregs[1] != NULL) {
436 			vreg_map_preg2(tmp, tmp->pregs[1]);
437 		}
438 		*vr = tmp;
439 #if 0
440 		tmp->var_backed = NULL;
441 		tmp->from_ptr = NULL;
442 		tmp->from_const = NULL;
443 		tmp->parent = NULL;
444 		tmp->stack_addr = NULL;
445 #endif
446 		vreg_do_anonymify(tmp);
447 		if (is_x87_trash(*vr)) {
448 			free_preg((*vr)->pregs[0], il, 1, 1);
449 			(*vr)->pregs[0] = NULL;
450 			tmp->pregs[0] = NULL;
451 		}
452 #if 0
453 	} else if ((*vr)->preg == NULL || (*vr)->preg->vreg != *vr) {
454 		/* Already anonymous but saved on stack */
455 		vreg_faultin(NULL, NULL, *vr, il, 0);
456 #endif
457 	}
458 }
459 
460 /*
461  * XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
462  * does not set var_backed, etc, to null! so it does not really
463  * disconnect much at all
464  */
465 struct vreg *
vreg_disconnect(struct vreg * vr)466 vreg_disconnect(struct vreg *vr) {
467 	struct vreg	*ret;
468 #if VREG_SEQNO
469 	int		saved_seqno = vr->seqno;
470 #endif
471 
472 
473 	/*
474 	 * 12/23/08: Is there a good reason not to use
475 	 * dup_vreg()? That would have done the seqno copying...
476 	 */
477 #if USE_ZONE_ALLOCATOR
478 	ret = alloc_vreg();
479 	*ret = *vr;
480 #else
481 	ret = n_xmemdup(vr, sizeof *vr);
482 #endif
483 
484 #if VREG_SEQNO
485 	ret->seqno = saved_seqno;
486 #endif
487 
488 	if (ret->pregs[0] && vr->pregs[0]->vreg == vr) {
489 		vreg_map_preg(ret, ret->pregs[0]);
490 	}
491 	if (ret->is_multi_reg_obj && vr->pregs[1]->vreg == vr) {
492 		vreg_map_preg2(ret, ret->pregs[1]);
493 	}
494 	return ret;
495 }
496 
497 static int	yes_really = 0;
498 
499 
500 struct reg *
vreg_faultin_x87(struct reg * r0,struct reg * r0_2,struct vreg * vr,struct icode_list * il,int whatfor)501 vreg_faultin_x87(
502 	struct reg *r0,
503 	struct reg *r0_2,
504 	struct vreg *vr,
505 	struct icode_list *il,
506 	int whatfor) {
507 
508 	struct reg	*ret;
509 
510 	yes_really = 1;
511 	/*
512 	 * XXX sometimes x87 registers wrongly remain associated with
513 	 * data items, which is never allowed. We probably aren't using
514 	 * free_preg() rigorously enough
515 	 */
516 	if (is_x87_trash(vr)) {
517 		vr->pregs[0] = NULL;
518 	}
519 
520 	ret = vreg_faultin(r0, r0_2, vr, il, whatfor);
521 	yes_really = 0;
522 	return ret;
523 }
524 
525 static int	doing_dedicated_mapping;
526 
527 
528 
529 /*
530  * Faults virtual register ``vr'' into a physical register, if necessary,
531  * and returns a pointer to that register.
532  * If a physical register needs to be saved before we can use it, it will
533  * be saved on curfunc's stack.
534  * The caller can supply physical register r0 to use instead of calling
535  * alloc_gpr()
536  *
537  * XXX this assumes that every item will fit into two registers, which
538  * is not be true for all cpus :(. In particular, SPARC supports quad-
539  * precision floating point by combining four FPRs. Perhaps change to
540  * ``struct reg **r0''.
541  * I also note that only one GPR is returned, but that does not seem to
542  * be a problem because currently the return value is only used for pointer
543  * faultins (and pointers should occupy only one GPR on most every arch
544  * that will ever be supported.)
545  */
546 struct reg *
vreg_faultin(struct reg * r0,struct reg * r0_2,struct vreg * vr,struct icode_list * il,int whatfor)547 vreg_faultin(
548 	struct reg *r0,
549 	struct reg *r0_2,
550 	struct vreg *vr,
551 	struct icode_list *il,
552 	int whatfor) {
553 
554 	struct reg		*r = NULL;
555 	struct reg		*r2 = NULL;
556 	struct vreg		*vr2 = NULL;
557 
558 
559 	if (is_x87_trash(vr) && !yes_really) {
560 		puts("BUG: Attempt to treat x87 register like something");
561 		puts("     that is actually useful.");
562 		abort();
563 	}
564 	if (r0 != NULL && !doing_dedicated_mapping) {
565 		if (!r0->allocatable && !r0->used && r0->dedicated) {
566 			(void) fprintf(stderr, "BUG: Attempt to map vreg \n"
567 			"to dedicated register. Use vreg_faultin_dedicated()\n"
568 			"instead\n");
569 			abort();
570 		}
571 	}
572 
573 	if (vr->pregs[0] != NULL && vr->pregs[0]->vreg == vr) {
574 		/* XXX needs work for long long .... */
575 		if (vr->is_multi_reg_obj) {
576 			if (vr->pregs[1] == NULL
577 				|| vr->pregs[1]->vreg != vr) {
578 				/* Second register isn't mapped! */
579 				free_preg(vr->pregs[0], il, 1, 1);
580 				goto dofault;
581 			}
582 
583 			debug_log_regstuff(vr->pregs[1], vr, DEBUG_LOG_REVIVE);
584 		}
585 		debug_log_regstuff(vr->pregs[0], vr, DEBUG_LOG_REVIVE);
586 		if (r0 == NULL) {
587 			/* Already loaded */
588 			vreg_map_preg(vr, vr->pregs[0]);
589 			if (vr->is_multi_reg_obj) {
590 				vreg_map_preg2( vr, vr->pregs[1]);
591 			}
592 			r = vr->pregs[0];
593 			goto out;
594 		} else {
595 			/*
596 			 * The item is already loaded, but the caller wishes
597 			 * it to be stored in a specific register, so it
598 			 * may have to be relocated. Note that this is tricky
599 			 * for multi-register objects. Consider a ``long long''
600 			 * loaded into ebx,eax that shall be relocated to
601 			 * eax,edx; We need to ensure that the copy from ebx
602 			 * to eax does not trash the other part of the object!
603 			 */
604 			if (vr->pregs[0] != r0) {
605 				if (vr->is_multi_reg_obj
606 					&& r0 == vr->pregs[1]) {
607 					icode_make_xchg(r0, vr->pregs[0], il);
608 					vr->pregs[1] = vr->pregs[0];
609 				} else {
610 
611 					icode_make_copyreg(r0, vr->pregs[0],
612 						vr->type, vr->type, il);
613 					/* XXX shouldn't caller do this? */
614 					vr->pregs[0]->used = 0;
615 					/* Record new preg */
616 				}
617 			}
618 			if (vr->is_multi_reg_obj) {
619 				/* XXX very broken if the regs overlap */
620 				if (vr->pregs[1] != r0_2) {
621 					icode_make_copyreg(r0_2, vr->pregs[1],
622 						vr->type, vr->type, il);
623 					/* XXX shouldn't caller do this? */
624 					vr->pregs[1]->used = 0;
625 				}
626 				vreg_map_preg2(vr, r0_2);
627 			}
628 			r = r0;
629 			vreg_map_preg(vr, r);
630 			goto out;
631 		}
632 	} else {
633 		/* Register is owned by someone else ... */
634 		;
635 	}
636 dofault:
637 
638 	if (vr->from_ptr) {
639 		/*
640 		 * We may need to fault in the pointer itself first before
641 		 * we can indirect through it
642 		 */
643 		if (r2 != NULL) {
644 			/*
645 			 * 11/01/07:
646 			 * This was missing... for a multi-reg
647 			 * item, the pointer may not be trashed
648 			 * when loading the first part, so we
649 			 * better separate pointer reg clearly
650 			 * from data regs
651 			 */
652 			reg_set_unallocatable(r);
653 			reg_set_unallocatable(r2);
654 		}
655 
656 		if (vr->from_ptr->pregs[0] == NULL
657 			|| vr->from_ptr->pregs[0]->vreg != vr->from_ptr) {
658 			vreg_faultin(NULL, NULL, vr->from_ptr, il, 0);
659 		}
660 
661 		if (r2 != NULL) {
662 			reg_set_allocatable(r);
663 			reg_set_allocatable(r2);
664 		}
665 
666 		if ((vr->from_ptr->type->tlist->type == TN_ARRAY_OF
667 			|| vr->from_ptr->type->tlist->type == TN_VARARRAY_OF)
668 			&& vr->type->tlist
669 			&& (vr->type->tlist->type == TN_ARRAY_OF
670 			|| vr->type->tlist->type == TN_VARARRAY_OF)) {
671 			/*
672 			 * The ``pointer'' is really an array, i.e.
673 			 * the address of the first element, so there
674 			 * is no need to indirect
675 			 */
676 			vr->pregs[0] = r = vr->from_ptr->pregs[0];
677 			vr->pregs[0]->vreg = vr;
678 			goto out;
679 		}
680 	} else if (vr->parent) {
681 		vr2 = get_parent_struct(vr);
682 		if (vr2->from_ptr) {
683 			vreg_faultin(NULL, NULL, vr2->from_ptr, il, 0);
684 		}
685 	}
686 
687 	if (r0 == NULL) {
688 		int	size = 0;
689 		int	is_floating;
690 
691 		is_floating = is_floating_type(vr->type);
692 
693 		if (vr->type->tlist != NULL
694 			&& (vr->type->tlist->type == TN_ARRAY_OF
695 			|| vr->type->tlist->type == TN_VARARRAY_OF)) { /* 05/26/11 */
696 			/* Array decays into pointer */
697 			size = backend->get_ptr_size();
698 			/*size = 4;*/
699 #if 0
700 		} else if (vr->size > 4
701 			&& !is_floating
702 			&& !IS_LLONG(vr->type->code)) {
703 			printf("BAD REGISTER LOAD OF SIZE %d\n", vr->size);
704 			printf("hm type is %d\n", vr->type->code);
705 			abort();
706 #endif
707 		} else {
708 			size = vr->size;
709 		}
710 
711 		/* XXX pass whatfor to alloc_gpr() */
712 		(void) whatfor;
713 		if (vr->from_ptr) {
714 			reg_set_unallocatable(vr->from_ptr->pregs[0]);
715 		}
716 		if (is_floating) {
717 			/* XXX backend->multi_fpr_object ??? */
718 			r = backend->alloc_fpr(curfunc, size, il, NULL);
719 			if (vr->is_multi_reg_obj) {
720 				/* 11/18/08: New for PPC long double */
721 				r2 = backend->alloc_fpr(curfunc, size, il, NULL);
722 			}
723 		} else {
724 			r = ALLOC_GPR(curfunc, size, il, NULL);
725 
726 			/*
727 			 * 06/20/08: This used to check the backend flag
728 			 * multi_gpr_object, which indicates whether the last
729 			 * ALLOC_GPR() requested a multi-gpr-sized item (size
730 			 * 8). Then it set vr->is_multi_reg_obj.
731 			 *
732 			 * The first one is bad because the state stuff is
733 			 * dangerous and easy to mess up (by omitting the
734 			 * second alloc, which did in fact happen. The second
735 			 * part - setting vr->is_multi_reg_obj - is also bad
736 			 * because it should be set at this point already
737 			 */
738 			if (backend->multi_gpr_object
739 				&& !vr->is_multi_reg_obj) {
740 				warningfl(NULL, "Compiler bug? Multi-GPR "
741 					"settings do not agree with each "
742 					"other");
743 			}
744 
745 			if (vr->is_multi_reg_obj /*backend->multi_gpr_object*/) {
746 				/* XXX hardcoded x86 */
747 /*				vr->is_multi_reg_obj = 2;*/
748 				r2 = ALLOC_GPR(curfunc, size, il, NULL);
749 			}
750 		}
751 		if (vr->from_ptr) {
752 			reg_set_allocatable(vr->from_ptr->pregs[0]);
753 		}
754 	} else {
755 		r = r0;
756 		r2 = r0_2;
757 	}
758 
759 	debug_log_regstuff(r, vr, DEBUG_LOG_FAULTIN);
760 	vreg_map_preg(vr, r);
761 	/*
762 	 * 07/12/08: icode_make_load() may perform a register invalidation.
763 	 * This means that, if we are handling a multi-register object, then
764 	 * the vreg_map_preg() above may leave an inconsistent state (the
765 	 * vreg being mapped to only one register). So map both here already
766 	 *
767 	 * Also, set both registers to unused so they are not saved to the
768 	 * stack
769 	 */
770 	if (r2 != NULL) {
771 		vreg_map_preg2(vr, r2);
772 /*		r2->used = 0;*/
773 	}
774 /*	r->used = 0;*/
775 
776 	/*
777 	 * 12/07/07: Protect r2 because icode_make_load() may have to
778 	 * allocate a register
779 	 */
780 	if (r2 != NULL) reg_set_unallocatable(r2);
781 	icode_make_load(r, vr2, 0, il);
782 	if (r2 != NULL) reg_set_allocatable(r2);
783 
784 	if (r2 != NULL) {
785 		vreg_map_preg2(vr, r2);
786 		icode_make_load(r2, vr2, 1, il);
787 	}
788 
789 	if (vr->type->tbit != NULL) {
790 	/*	extract_bitfield(vr);*/
791 	}
792 #if 0
793 	if (vr->from_ptr) {
794 		free_preg(vr->from_ptr->preg, il, 0, 0);
795 	} else if (vr->parent && vr2->from_ptr) {
796 		free_preg(vr2->from_ptr->preg, il, 0, 0);
797 	}
798 #endif
799 
800 out:
801 	return r;
802 }
803 
804 /*
805  * 12/27/08: New function to fault to ``dedicated'' registers
806  * which are not generally allocable, and for which the ``used''
807  * flag may not be set (e.g. temp GPRS)
808  */
809 struct reg *
vreg_faultin_dedicated(struct reg * r0,struct reg * r0_2,struct vreg * vr,struct icode_list * il,int whatfor)810 vreg_faultin_dedicated(
811 	struct reg *r0,
812 	struct reg *r0_2,
813 	struct vreg *vr,
814 	struct icode_list *il,
815 	int whatfor) {
816 
817 	struct reg	*ret;
818 
819 	if (r0 == NULL || !r0->dedicated /*r0->allocatable || r0->used*/) {
820 		if (r0_2 != NULL && r0_2->dedicated) {
821 			/* OK other register is dedicated */
822 			;
823 		} else {
824 			(void) fprintf(stderr,
825 				"BUG: vreg_faultin_dedicated() used for\n"
826 				"non-dedicated register\n");
827 			abort();
828 		}
829 	}
830 	doing_dedicated_mapping = 1;
831 	ret = vreg_faultin(r0, r0_2, vr, il, whatfor);
832 	if (r0 != NULL) {
833 		r0->used = 0;
834 		r0->allocatable = 0;
835 	}
836 	doing_dedicated_mapping = 0;
837 	return ret;
838 }
839 
840 
841 struct reg *
vreg_faultin_protected(struct vreg * protectme,struct reg * r0,struct reg * r0_2,struct vreg * vr,struct icode_list * il,int whatfor)842 vreg_faultin_protected(
843 	struct vreg *protectme,
844 	struct reg *r0,
845 	struct reg *r0_2,
846 	struct vreg *vr,
847 	struct icode_list *il,
848 	int whatfor) {
849 
850 	struct reg	*preg = NULL;
851 	struct reg	*preg2 = NULL;
852 	struct reg	*ret;
853 
854 	if (protectme->pregs[0] != NULL
855 		&& protectme->pregs[0]->vreg == protectme) {
856 		preg = protectme->pregs[0];
857 	} else if (protectme->from_ptr != NULL
858 		&& protectme->from_ptr->pregs[0] != NULL
859 		&& protectme->from_ptr->pregs[0]->vreg == protectme->from_ptr) {
860 		preg = protectme->from_ptr->pregs[0];
861 	}
862 
863 	if (preg != NULL) {
864 		reg_set_unallocatable(preg);
865 		if (protectme->is_multi_reg_obj) {
866 			if (protectme->pregs[1]
867 				&& protectme->pregs[1]->vreg == protectme) {
868 				/*
869 				 * 04/15/08: This additional check above was
870 				 * needed for long long accessed through
871 				 * pointers, where the pointers need
872 				 * protection but the multi-reg values them-
873 				 * selves don't
874 				 */
875 				reg_set_unallocatable(preg2 = protectme->pregs[1]);
876 			}
877 		}
878 	}
879 
880 	ret = vreg_faultin(r0, r0_2, vr, il, whatfor);
881 
882 	if (preg != NULL) {
883 		reg_set_allocatable(preg);
884 		if (preg2 != NULL) {
885 			reg_set_allocatable(preg2);
886 		}
887 	}
888 	return ret;
889 }
890 
891 
892 /*
893  * If the vreg comes from a pointer, load that pointer into a register. The
894  * interface is kept simple because it is not likely to be used in many
895  * places. Extend as necessary.
896  */
897 struct reg *
vreg_faultin_ptr(struct vreg * vr,struct icode_list * il)898 vreg_faultin_ptr(struct vreg *vr, struct icode_list *il) {
899 	struct vreg	*faultme = NULL;
900 
901 	if (vr->from_ptr) {
902 		faultme = vr->from_ptr;
903 	} else if (vr->parent != NULL) {
904 		struct vreg	*vr2 = get_parent_struct(vr);
905 
906 		if (vr2->from_ptr) {
907 			faultme = vr2->from_ptr;
908 		}
909 	} else if (vr->type->tlist != NULL
910 		&& (vr->type->tlist->type == TN_ARRAY_OF
911 			|| vr->type->tlist->type == TN_VARARRAY_OF
912 			|| vr->type->tlist->type == TN_FUNCTION)) {
913 		/*
914 		 * 07/01/07: Extended for arrays! Loading an array or
915 		 * a function ``into a register'' loads its address
916 		 *
917 		 * 10/31/07: This appears to be nonsense. It was
918 		 * introduced for variable-length arrays, so at the
919 		 * very least the tlist->type check was missing. I
920 		 * don't know whether this ever makes any sense for
921 		 * functions
922 		 *
923 		 * 01/05/10: This
924 		 */
925 		faultme = vr;
926 	}
927 	if (faultme != NULL) {
928 		vreg_faultin(NULL, NULL, faultme, il, 0);
929 		return faultme->pregs[0];
930 	}
931 	return NULL;
932 }
933 
934 
935 /*
936  * Variable and functions to forbid vreg_map_preg() - See
937  * comment on backend_vreg_map_preg()
938  */
939 static int	vreg_map_preg_forbidden = 1;
940 
941 void
forbid_vreg_map_preg(void)942 forbid_vreg_map_preg(void) {
943 	vreg_map_preg_forbidden = 1;
944 }
945 
946 void
allow_vreg_map_preg(void)947 allow_vreg_map_preg(void) {
948 	vreg_map_preg_forbidden = 0;
949 }
950 
951 static struct reg *trapreg;
952 
953 int
trap_vreg_map_preg(struct reg * r)954 trap_vreg_map_preg(struct reg *r) {
955 	trapreg = r;
956 	return 0;
957 }
958 
959 /*
960  * Use this instead of manual assignments to better track
961  * the creation of mappings for debugging
962  */
963 void
vreg_map_preg(struct vreg * vr,struct reg * preg)964 vreg_map_preg(struct vreg *vr, struct reg *preg) {
965 	if (preg == trapreg) abort();
966 #if XLATE_IMMEDIATELY
967 	if (vreg_map_preg_forbidden) {
968 		(void) fprintf(stderr,
969 			"BUG: vreg_map_preg() called from backend!\n\n"
970 			"Please use backend_vreg_map_preg() with a\n"
971 			"corresponding backend_vreg_unmap_preg()\n"
972 			"instead. Calling abort() ...\n");
973 		abort();
974 	}
975 #endif
976 	if (!doing_dedicated_mapping) {
977 		/* 12/27/08: Added this */
978 		if (!preg->allocatable && !preg->used && preg->dedicated) {
979 			(void) fprintf(stderr,
980 				"BUG: vreg_map_preg() used for dedicated \n"
981 				"(reserved) register. Use vreg_map_preg_dedicated \n"
982 				"if that was really intended (does not set used\n"
983 				"flag of register)\n");
984 			abort();
985 		}
986 	}
987 	debug_log_regstuff(preg, vr, DEBUG_LOG_MAP);
988 
989 	vr->pregs[0] = preg;
990 	if (!doing_dedicated_mapping) {
991 		/*
992 		 * 27/12/08: Don't set used flag for dedicated
993 		 * register
994 		 */
995 		preg->used = 1;
996 	}
997 	if (preg != NULL) {
998 		vr->pregs[0]->vreg = vr;
999 	}
1000 }
1001 
1002 /*
1003  * 12/27/08: New function to map a temp GPR to a vreg. This is
1004  * forbidden for vreg_map_preg() now because it sets the ``used''
1005  * flag of the register (which in turn trashes the ``dedicated''
1006  * property and causes the nexdt invalidate_gprs() to make the
1007  * register allocatable, which will cause all sorts of conflicts)
1008  */
1009 void
vreg_map_preg_dedicated(struct vreg * vr,struct reg * preg)1010 vreg_map_preg_dedicated(struct vreg *vr, struct reg *preg) {
1011 	if (preg->allocatable || preg->used) {
1012 		(void) fprintf(stderr,
1013 			"BUG: vreg_map_preg_dedicated() applied to\n"
1014 			"register which is not dedicated. What are\n"
1015 			"you really trying to do?\n");
1016 		abort();
1017 	}
1018 	doing_dedicated_mapping = 1;
1019 	vreg_map_preg(vr, preg);
1020 	doing_dedicated_mapping = 0;
1021 }
1022 
1023 /*
1024  * 03/24/08: New functions to map/unmap registers in emission-related
1025  * parts of backend.
1026  *
1027  * Background: The old design (before XLATE_IMMEDIATELY was introduced)
1028  * worked such that all code was first translated, and then emitted.
1029  *
1030  * WIth the new design, however, one function at a time is translated
1031  * and emitted. This has raised the problem that EVEN the emission-
1032  * related backend functions (i.e. those functions that are indirectly
1033  * called by the old gen_program()) used vreg_map_preg()!
1034  *
1035  * This caused the following problem:
1036  *
1037  * The backend only calls vreg_map_preg() but not free_preg(), so the
1038  * registers end up being ``untrusted'' because a SINGLE register may
1039  * end up being mapped to a vreg that has the multi-GPR flag set, so
1040  * the invalidation routine will be misguided by this
1041  */
1042 void
backend_vreg_map_preg(struct vreg * vr,struct reg * preg)1043 backend_vreg_map_preg(struct vreg *vr, struct reg *preg) {
1044 	vr->pregs[0] = preg;
1045 	if (preg != NULL) {
1046 		vr->pregs[0]->vreg = vr;
1047 	}
1048 }
1049 
1050 void
backend_vreg_unmap_preg(struct reg * r)1051 backend_vreg_unmap_preg(struct reg *r) {
1052 	if (r->vreg) {
1053 		r->vreg->pregs[0] = NULL;
1054 		r->vreg = NULL;
1055 	}
1056 }
1057 
1058 void
backend_vreg_map_preg2(struct vreg * vr,struct reg * preg)1059 backend_vreg_map_preg2(struct vreg *vr, struct reg *preg) {
1060 	vr->pregs[1] = preg;
1061 	if (preg != NULL) {
1062 		vr->pregs[1]->vreg = vr;
1063 	}
1064 }
1065 
1066 void
backend_vreg_unmap_preg2(struct reg * r)1067 backend_vreg_unmap_preg2(struct reg *r) {
1068 	if (r->vreg) {
1069 		r->vreg->pregs[1] = NULL;
1070 		r->vreg = NULL;
1071 	}
1072 }
1073 
1074 
1075 void
reg_set_unused(struct reg * r)1076 reg_set_unused(struct reg *r) {
1077 	r->used = 0;
1078 }
1079 
1080 void
vreg_map_preg2(struct vreg * vr,struct reg * preg)1081 vreg_map_preg2(struct vreg *vr, struct reg *preg) {
1082 	vr->pregs[1] = preg;
1083 	preg->used = 1;
1084 	if (preg != NULL) {
1085 		vr->pregs[1]->vreg = vr;
1086 	}
1087 }
1088 
1089 
1090 static void
invalidate_subregs(struct reg * r)1091 invalidate_subregs(struct reg *r) {
1092 	r->vreg = NULL;
1093 	if (r->size == 8 && r->composed_of) {
1094 		/* XXX kludge */
1095 		invalidate_subregs(r->composed_of[0]);
1096 		return;
1097 	}
1098 
1099 	if (r->composed_of) {
1100 		if (r->composed_of[0]) {
1101 			r->composed_of[0]->vreg = NULL;
1102 		}
1103 		if (r->composed_of[1]) {
1104 			r->composed_of[1]->vreg = NULL;
1105 		}
1106 		if (r->composed_of[0]->composed_of) {
1107 			r->composed_of[0]->composed_of[0]->vreg = NULL;
1108 			if (r->composed_of[0]->composed_of[1]) {
1109 				r->composed_of[0]->composed_of[1]->vreg = NULL;
1110 			}
1111 		}
1112 	}
1113 }
1114 
1115 static int
do_free_preg(struct reg * r,struct icode_list * il,int invalidate,int savereg)1116 do_free_preg(struct reg *r, struct icode_list *il,
1117 	int invalidate, int savereg) {
1118 	struct vreg	*vr;
1119 
1120 	if (r == NULL) return 0;
1121 	vr = r->vreg;
1122 
1123 
1124 	reg_set_allocatable(r);
1125 	if (!r->used) {
1126 		if (invalidate) r->vreg = NULL;
1127 		return 0;
1128 	}
1129 	debug_log_regstuff(r, vr, DEBUG_LOG_FREEGPR);
1130 
1131 	if (savereg
1132 		&& vr && !vr->var_backed && !vr->from_const && !vr->from_ptr
1133 		&& !vr->parent) {
1134 		/*
1135 		 * Must be saved. This doesn't interact well with
1136 		 * multi-register objects
1137 		 * XXX for some reason, is_multi_reg_obj sometimes
1138 		 * isn't set even for a long long x86 object :(
1139 		 * this is triggered in change_preg_size(), where
1140 		 * a long long-associated register is freed
1141 		 * hence the workaround below
1142 		 */
1143 		if (backend->arch == ARCH_X86
1144 			&& IS_LLONG(vr->type->code)
1145 			&& vr->type->tlist == NULL) {
1146 			struct vreg	*copyvr;
1147 
1148 			if (vr->pregs[1] == NULL) {
1149 				/*
1150 				 * This apparently happens when freeing
1151 				 * a preg holding part of multi-gpr object.
1152 				 */
1153 				vr->pregs[1] = r;
1154 			}
1155 
1156 #if 0
1157 			copyvr = n_xmemdup(vr, sizeof *vr);
1158 #endif
1159 			copyvr = copy_vreg(vr);
1160 			copyvr->is_multi_reg_obj = 2;
1161 			copyvr->size = 8;
1162 			icode_make_store(curfunc, copyvr,
1163 				copyvr, il);
1164 			if (copyvr->stack_addr != NULL) {
1165 				vr->stack_addr = copyvr->stack_addr;
1166 			}
1167 #if 0
1168 			icode_make_store(curfunc, copyvr,
1169 				copyvr, il);
1170 #endif
1171 		} else {
1172 			icode_make_store(curfunc, vr, vr, il);
1173 		}
1174 	}
1175 
1176 	if (backend->free_preg != NULL) {
1177 		backend->free_preg(r, il);
1178 	}
1179 
1180 	r->used = 0;
1181 	if (invalidate) r->vreg = NULL;
1182 
1183 	return 1;
1184 }
1185 
1186 
1187 /*
1188  * 07/25/09: Allow freeing dedicated registers (freeing generally resets
1189  * special properties like allocatability, so we reset it here)
1190  */
1191 void
free_preg_dedicated(struct reg * r,struct icode_list * il,int invalidate,int savereg)1192 free_preg_dedicated(struct reg *r, struct icode_list *il, int invalidate, int savereg) {
1193 	assert(r->dedicated);
1194 
1195 	r->dedicated = 0;
1196 	free_preg(r, il, invalidate, savereg);
1197 	reg_set_dedicated(r);
1198 }
1199 
1200 /*
1201  * Note that if savereg is specified and the register is subsequently used,
1202  * invalidate also needs to be set in order to avoid bad stale references
1203  */
1204 void
free_preg(struct reg * r,struct icode_list * il,int invalidate,int savereg)1205 free_preg(struct reg *r, struct icode_list *il, int invalidate, int savereg) {
1206 	static int	level;
1207 
1208 	if (r->dedicated) {
1209 		(void) fprintf(stderr, "BUG: freeing dedicated register %s\n", r->name);
1210 		abort();
1211 	}
1212 
1213 	/*
1214 	 * XXX The stuff below ensures that if a GPR holding a ``long long''
1215 	 * is ever freed, the GPR for the other part will also be freed. This
1216 	 * makes it easier to deal with long longs because that way a long
1217 	 * long is always either resident or non-resident, never half-resident.
1218 	 * Of course this approach is less than optimal speedwise, so it
1219 	 * should be changed some time
1220 	 */
1221 	if (r->vreg && r->vreg->is_multi_reg_obj && level == 0) {
1222 		struct reg	*r2;
1223 
1224 		invalidate = 1; /* cached multi-reg objects cause problems */
1225 		if (r->vreg->pregs[0] == r) {
1226 			r2 = r;
1227 			r = r->vreg->pregs[1];
1228 		} else {
1229 			r2 = r->vreg->pregs[0];
1230 		}
1231 		++level;
1232 		/*
1233 		 * 04/18/08: Don't call free_preg() recursively! The problem
1234 		 * here was that do_free_preg() uses icode_make_store() to
1235 		 * save the register, and this will write both registers, so
1236 		 * we save the entire multi-reg object twice.
1237 		 * However, only the invalidation is needed
1238 		 */
1239 #if ! AVOID_DUPED_MULTI_REG_SAVES
1240 		free_preg(r2, il, invalidate, savereg);
1241 #else
1242 		free_preg(r2, il, invalidate, 0); /* Don't save! */
1243 #endif
1244 		--level;
1245 	}
1246 
1247 	if (do_free_preg(r, il, invalidate, savereg)) {
1248 		if (invalidate) {
1249 			invalidate_subregs(r);
1250 		}
1251 		return;
1252 	}
1253 	if (r->size == 8 && r->composed_of) {
1254 		/* XXX ... */
1255 		free_preg(r->composed_of[0], il, invalidate, savereg);
1256 		return;
1257 	}
1258 
1259 	if (r->composed_of) {
1260 		int	rc = 0;
1261 		if (r->composed_of[0]) {
1262 			rc |= do_free_preg(r->composed_of[0], il,
1263 				invalidate, savereg);
1264 		}
1265 		if (r->composed_of[1]) {
1266 			rc |= do_free_preg(r->composed_of[1], il,
1267 				invalidate, savereg);
1268 		}
1269 		if (rc) {
1270 			if (invalidate) {
1271 				invalidate_subregs(r);
1272 			}
1273 			return;
1274 		}
1275 		if (r->composed_of[0]->composed_of) {
1276 			(void) do_free_preg(r->composed_of[0]->
1277 				composed_of[0], il, invalidate, savereg);
1278 			(void) do_free_preg(r->composed_of[0]->
1279 				composed_of[1], il, invalidate, savereg);
1280 		}
1281 	}
1282 }
1283 
1284 /*
1285  * Free all pregs assigned to a vreg. This should be used for all vregs so
1286  * that it is transparent whether we are dealing with a vreg that occupies
1287  * more than preg (case in point: ``long long'' on x86 and objects that
1288  * were loaded through a pointer)
1289  */
1290 void
free_pregs_vreg(struct vreg * vr,struct icode_list * il,int invalidate,int savereg)1291 free_pregs_vreg(
1292 	struct vreg *vr,
1293 	struct icode_list *il,
1294 	int invalidate,
1295 	int savereg) {
1296 
1297 	if (vr->from_ptr != NULL) {
1298 		if (vr->from_ptr->pregs[0] != NULL
1299 			&& vr->from_ptr->pregs[0]->vreg == vr->from_ptr) {
1300 			free_preg(vr->from_ptr->pregs[0], il,
1301 				invalidate, savereg);
1302 		}
1303 	}
1304 	if (vr->pregs[0] != NULL && vr->pregs[0]->vreg == vr) {
1305 		free_preg(vr->pregs[0], il, invalidate, savereg);
1306 	}
1307 	if (vr->pregs[1] != NULL && vr->pregs[1]->vreg == vr) {
1308 		free_preg(vr->pregs[1], il, invalidate, savereg);
1309 	}
1310 }
1311 
1312 int
is_member_of_reg(struct reg * r,struct reg * member)1313 is_member_of_reg(struct reg *r, struct reg *member) {
1314 	if (r->composed_of == NULL) return 0;
1315 	if (member == r) {
1316 		return 1;
1317 	}
1318 	if (r->size == 8) { /* XXX */
1319 		return is_member_of_reg(r->composed_of[0], member);
1320 	}
1321 	if (r->composed_of[0] == member
1322 		|| (r->composed_of[0]->composed_of
1323 		&& (r->composed_of[0]->composed_of[0] == member
1324 		|| r->composed_of[0]->composed_of[1] == member))) {
1325 		return 1;
1326 	}
1327 	return 0;
1328 }
1329 
1330 void
vreg_set_new_type(struct vreg * vr,struct type * ty)1331 vreg_set_new_type(struct vreg *vr, struct type *ty) {
1332 	vr->type = ty;
1333 	if (IS_VLA(ty->flags) && is_immediate_vla_type(ty)) {
1334 		vr->size = 0;
1335 		vr->is_multi_reg_obj = 0;
1336 	} else {
1337 		vr->size = backend->get_sizeof_type(ty, NULL);
1338 		vr->is_multi_reg_obj = backend->is_multi_reg_obj(ty);
1339 	}
1340 }
1341 
1342 /*
1343  * Save a scalar item to the stack, loading it into a register
1344  * to do so if necessary.
1345  * The vreg is copied, and *vr is set to point to the copy
1346  * (because in most cases we want to save a vreg that has already
1347  * been used in other icode instructions.)
1348  */
1349 void
vreg_save_to_stack(struct vreg ** vr0,struct type * ty,struct icode_list * il)1350 vreg_save_to_stack(struct vreg **vr0, struct type *ty, struct icode_list *il) {
1351 	struct vreg	*vr = *vr0;
1352 
1353 	vr = vreg_disconnect(vr);
1354 	vr->type = ty;
1355 	vr->size = backend->get_sizeof_type(ty, NULL);
1356 	vreg_faultin(NULL, NULL, vr, il, 0);
1357 
1358 	/*
1359 	 * 07/14/09: Anonymify was missing! Otherwise, if we have an FP
1360 	 * constant which we want to reinterpret as an integer, then if
1361 	 * from_const still points to the constant (which it did without
1362 	 * anonymify), then the load may bypass our stack indirection and
1363 	 * just load the constant directly, which is wrong
1364 	 */
1365 	vreg_anonymify(&vr, NULL, NULL, il);
1366 	free_preg(vr->pregs[0], il, 1, 1);
1367 	*vr0 = vr;
1368 }
1369 
1370 
1371 void
vreg_reinterpret_as(struct vreg ** vr0,struct type * from,struct type * to,struct icode_list * il)1372 vreg_reinterpret_as(struct vreg **vr0, struct type *from, struct type *to,
1373 	struct icode_list *il) {
1374 
1375 	struct vreg	*vr;
1376 
1377 	vreg_save_to_stack(vr0, from, il);
1378 	vr = vreg_disconnect(*vr0);
1379 	vr->type = to;
1380 	vr->size = backend->get_sizeof_type(to, NULL);
1381 	vreg_faultin(NULL, NULL, vr, il, 0);
1382 	*vr0 = vr;
1383 }
1384 
1385 
1386 struct vreg *
copy_vreg(struct vreg * vr)1387 copy_vreg(struct vreg *vr) {
1388 	struct vreg	*ret = alloc_vreg();
1389 	*ret = *vr;
1390 	return ret;
1391 }
1392 
1393 struct vreg *
vreg_back_by_ptr(struct vreg * vr,struct reg * ptrreg,int is_backend)1394 vreg_back_by_ptr(struct vreg *vr, struct reg *ptrreg, int is_backend) {
1395 	struct vreg	*ptrvr;
1396 
1397 	ptrvr = vreg_alloc(NULL, NULL, NULL, addrofify_type(vr->type));
1398 	if (is_backend) {
1399 		backend_vreg_map_preg(ptrvr, ptrreg);
1400 	} else {
1401 		vreg_map_preg(ptrvr, ptrreg);
1402 	}
1403 #if 0
1404 	vr = /*vreg_disconnect(vr)*/ n_xmemdup(vr, sizeof *vr);
1405 #endif
1406 	vr = copy_vreg(vr);
1407 
1408 	vr->parent = NULL;
1409 	vr->from_ptr = ptrvr;
1410 	vr->stack_addr = NULL;  /* 12/29/07: This was missing!!!! */
1411 	vr->from_const = NULL; /* 02/03/08: was missing.. */
1412 	if (vr->parent != NULL) {
1413 		vr->parent = ptrvr;
1414 	}
1415 	if (vr->var_backed != NULL) {
1416 		vr->var_backed = NULL;
1417 	}
1418 	return vr;
1419 }
1420 
1421 
1422 int
vreg_needs_pic_reloc(struct vreg * vr)1423 vreg_needs_pic_reloc(struct vreg *vr) {
1424 	/*
1425 	 * 02/08/09: Wow this didn't take static parent structs
1426 	 * into account
1427 	 */
1428 	if (vr->parent != NULL) {
1429 		vr = get_parent_struct(vr);
1430 	}
1431 	if (vr->var_backed != NULL) {
1432 		struct decl	*d = vr->var_backed;
1433 
1434 		/*
1435 		 * 02/10/09: XXXXXXXX: This fragment:
1436 		 *   printf("%Lf\n", getlongdouble());
1437 		 * ... causes the printf() argument to get the is_func flag
1438 		 * set. Presumably because it is duplicated from the function
1439 		 * declaration, and the is_func flag is not reset
1440 		 * Thus the extra tlist != NULL check. However, we should
1441 		 * investigate the cause and implications of this event.
1442 		 */
1443 		if (d->dtype->is_func
1444 			&& d->dtype->tlist != NULL
1445 			&& d->dtype->tlist->type == TN_FUNCTION) {
1446 			return 1;
1447 		} else if (vr->var_backed->dtype->storage == TOK_KEY_STATIC
1448 			|| vr->var_backed->dtype->storage == TOK_KEY_EXTERN) {
1449 			return 1;
1450 		}
1451 	} else if (vr->from_const) {
1452 		if (vr->from_const->type == TOK_STRING_LITERAL) {
1453 			return 1;
1454 		} else if (IS_FLOATING(vr->from_const->type)) {
1455 			return 1;
1456 		}
1457 	}
1458 	return 0;
1459 }
1460 
1461