1 /*
2 * Copyright (c) 2005 - 2010, Nils R. Weller
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 *
27 * A grabbag of function for managing the backend, and also for backend stuff
28 * that is generic across two or more platforms
29 */
30 #include "backend.h"
31 #include <stdlib.h>
32 #include <limits.h>
33 #include <assert.h>
34 #include "error.h"
35 #include "icode.h"
36 #include "decl.h"
37 #include "functions.h"
38 #include "expr.h"
39 #include "attribute.h"
40 #include "debug.h"
41 #include "token.h"
42 #include "scope.h"
43 #include "misc.h"
44 #include "type.h"
45 #include "features.h"
46 #include "cc1_main.h"
47 #include "typemap.h"
48 #include "symlist.h"
49 #include "n_libc.h"
50
51 struct backend *backend;
52 struct emitter *emit;
53 struct reg *tmpgpr;
54 struct reg *tmpgpr2;
55 struct reg *tmpfpr;
56
57 /*
58 * 02/15/08: New: Register for PIC pointer. This is optional and currently
59 * only used by SPARC
60 * 02/02/09: ... and now for PPC as well
61 */
62 struct reg *pic_reg;
63 int host_endianness;
64 char *tunit_name;
65 size_t tunit_size;
66 struct init_with_name *init_list_head;
67 struct init_with_name *init_list_tail;
68 extern int archflag;
69 extern int abiflag;
70
71 extern int osx_call_renamed; /* XXX botch */
72
73 int backend_warn_inv;
74
75 struct stupidtrace_entry *stupidtrace_list_head;
76 struct stupidtrace_entry *stupidtrace_list_tail;
77
78 int
init_backend(FILE * fd,struct scope * s)79 init_backend(FILE *fd, struct scope *s) {
80 unsigned int foo = 123;
81
82 if (*(unsigned char *)&foo == 123) {
83 host_endianness = ENDIAN_LITTLE;
84 } else {
85 /* Naive - doesn't work with PDP endianness */
86 host_endianness = ENDIAN_BIG;
87 }
88 /*backend = &x86_backend;*/
89
90 #if 0
91 #ifdef __i386__
92 backend = &x86_backend;
93 #elif defined __amd64__
94 backend = &amd64_backend;
95 #elif defined __sgi
96 backend = &mips_backend;
97 #elif defined _AIX
98 backend = &power_backend;
99 #else
100 #error "Unsupported target architecture"
101 #endif
102 #endif
103 switch (archflag) {
104 case ARCH_X86:
105 backend = &x86_backend;
106 break;
107 case ARCH_AMD64:
108 backend = &amd64_backend;
109 break;
110 case ARCH_POWER:
111 backend = &power_backend;
112 break;
113 case ARCH_MIPS:
114 backend = &mips_backend;
115 break;
116 case ARCH_SPARC:
117 backend = &sparc_backend;
118 break;
119 default:
120 puts("UNKNOWN ARCHITECTURE!!!!!!!!!!");
121 abort();
122 }
123 backend->abi = abiflag;
124
125 /*backend = &power_backend;*/
126
127 return backend->init(fd, s);
128 }
129
130
131 struct vreg *
get_parent_struct(struct vreg * vr)132 get_parent_struct(struct vreg *vr) {
133 struct vreg *vr2 = vr;
134
135 while (vr2->parent != NULL) {
136 if (vr2->from_ptr) break;
137 vr2 = vr2->parent;
138 }
139 return vr2;
140 }
141
142
143 struct vreg *
check_need_indirect_loadstore(struct icode_instr * ip)144 check_need_indirect_loadstore(struct icode_instr *ip) {
145 struct stack_block *sb = NULL;
146 unsigned long offset = 0;
147
148 /*
149 * The instruction has a support register... Is it
150 * needed?
151 */
152 if (ip->src_vreg->stack_addr) {
153 sb = ip->src_vreg->stack_addr;
154 } else if (ip->src_vreg->var_backed
155 && ip->src_vreg->var_backed->stack_addr) {
156 sb = ip->src_vreg->var_backed->stack_addr;
157 } else if (ip->src_parent_struct
158 && ip->src_parent_struct->var_backed
159 && ip->src_parent_struct->var_backed->stack_addr) {
160 sb = ip->src_parent_struct->var_backed->stack_addr;
161 offset = calc_offsets(ip->src_vreg);
162 }
163
164 if (sb != NULL) {
165 if (sb->offset + offset > (unsigned long)backend->max_displacement) {
166 struct vreg *ptr;
167 struct vreg *parent = NULL;
168
169 if (ip->src_vreg->parent != NULL) {
170 parent = get_parent_struct(
171 ip->src_vreg);
172 }
173
174 /*
175 * Maximum offset exceeded! We have to
176 * indirect
177 */
178 emit->addrof(ip->dat, ip->src_vreg,
179 parent);
180 ptr = vreg_back_by_ptr(ip->src_vreg,
181 ip->dat, 1);
182 return ptr;
183 }
184 }
185 return NULL;
186 }
187
188
189 size_t
calc_align_bytes(size_t offset,struct type * curtype,struct type * nexttype,int struct_member)190 calc_align_bytes(size_t offset, struct type *curtype, struct type *nexttype, int struct_member) {
191 size_t cursize;
192 size_t alignto;
193 size_t oldoffset;
194
195 /* 10/08/08: Caller handles bitfield size addition! */
196 if (curtype->tbit == NULL) {
197 cursize = backend->get_sizeof_type(curtype, NULL);
198 offset += cursize;
199 }
200
201 if (struct_member && botch_x86_alignment(nexttype)) {
202 alignto = 4;
203 } else {
204 alignto = backend->get_align_type(nexttype);
205 }
206
207 oldoffset = offset;
208
209 while (offset % alignto) {
210 ++offset;
211 }
212 return offset - oldoffset;
213 }
214
215 static void
map_pregs(struct vreg * vr,struct reg ** pregs)216 map_pregs(struct vreg *vr, struct reg **pregs) {
217 if (vr != NULL) {
218 if (pregs && pregs[0]) {
219 backend_vreg_map_preg(vr, pregs[0]);
220 if (vr->is_multi_reg_obj && pregs[1]) {
221 backend_vreg_map_preg2(vr, pregs[1]);
222 } else {
223 vr->pregs[1] = NULL;
224 }
225 } else {
226 vr->pregs[0] = NULL;
227 }
228 }
229 }
230
231 static void
unmap_pregs(struct vreg * vr,struct reg ** pregs)232 unmap_pregs(struct vreg *vr, struct reg **pregs) {
233 if (vr != NULL) {
234 if (pregs && pregs[0]) {
235 backend_vreg_unmap_preg(pregs[0]);
236 if (vr->is_multi_reg_obj && pregs[1]) {
237 backend_vreg_unmap_preg2(pregs[1]);
238 }
239 }
240 }
241 }
242
243 int
do_xlate(struct function * f,struct icode_instr ** ipp)244 do_xlate(
245 struct function *f,
246 struct icode_instr **ipp) {
247
248 struct icode_instr *ip = *ipp;
249 struct stack_block *sb;
250 struct allocstack *as;
251 int found;
252
253 map_pregs(ip->src_vreg, ip->src_pregs);
254 map_pregs(ip->dest_vreg, ip->dest_pregs);
255 if (ip->src_parent_struct) {
256 if (ip->src_ptr_preg) {
257 /*
258 * 08/05/07: UNBELIEVABLE! This used to map the
259 * register to the parent struct vreg, but not to
260 * the parent struct from_ptr vreg, whree it would
261 * have belonged! That one is used by the emitters,
262 * so stuff broke
263 */
264 backend_vreg_map_preg(ip->src_parent_struct->from_ptr,
265 ip->src_ptr_preg);
266 }
267 } else if (ip->src_ptr_preg) {
268 backend_vreg_map_preg(ip->src_vreg->from_ptr, ip->src_ptr_preg);
269 }
270 if (ip->dest_parent_struct) {
271 if (ip->dest_ptr_preg) {
272 /*
273 * 08/05/07: See comment above about source parent
274 * pointer
275 */
276 backend_vreg_map_preg(ip->dest_parent_struct->from_ptr,
277 ip->dest_ptr_preg);
278 }
279 } else if (ip->dest_ptr_preg) {
280 backend_vreg_map_preg(ip->dest_vreg->from_ptr, ip->dest_ptr_preg);
281 }
282
283 switch (ip->type) {
284 case INSTR_CALL:
285 if (ip->hints & HINT_INSTR_RENAMED) {
286 osx_call_renamed = 1;
287 }
288 emit->call(ip->dat);
289 osx_call_renamed = 0;
290 break;
291 case INSTR_CALLINDIR:
292 emit->callindir(ip->dat);
293 break;
294 case INSTR_PUSH:
295 emit->push(f, ip);
296 break;
297 case INSTR_ALLOCSTACK:
298 /*
299 * XXX freestack updates total_allocated but allocstack
300 * doesn't. Is this good?
301 */
302 as = ip->dat;
303 emit->allocstack(f, as->nbytes);
304 f->total_allocated += as->nbytes;
305 if (as->patchme != NULL) {
306 /* The space is actually used! */
307 sb = make_stack_block(f->total_allocated, as->nbytes);
308 as->patchme->var_backed->stack_addr = sb;
309 }
310 break;
311 case INSTR_FREESTACK:
312 emit->freestack(f, ip->dat);
313 break;
314 case INSTR_ADJ_ALLOCATED:
315 emit->adj_allocated(f, ip->dat);
316 break;
317 case INSTR_INDIR:
318 break;
319 case INSTR_MOV:
320 emit->mov(ip->dat);
321 break;
322 case INSTR_SETREG:
323 emit->setreg(ip->src_pregs[0], (int *)ip->dat);
324 break;
325 case INSTR_ADDROF: {
326 struct vreg *vr = NULL;
327
328 if (ip->src_vreg != NULL
329 && ip->src_vreg->parent != NULL) {
330 vr = get_parent_struct(ip->src_vreg);
331 }
332
333 emit->addrof(ip->dat, ip->src_vreg, vr);
334 }
335 break;
336 case INSTR_INITIALIZE_PIC:
337 emit->initialize_pic(ip->dat);
338 break;
339 case INSTR_INC:
340 emit->inc(ip);
341 break;
342 case INSTR_DEC:
343 emit->dec(ip);
344 break;
345 case INSTR_LABEL:
346 /* generated labels are always local */
347 emit->label(ip->dat, 0);
348 break;
349 case INSTR_NEG:
350 emit->neg(ip->src_pregs, ip);
351 break;
352 case INSTR_SUB:
353 emit->sub(ip->dest_pregs, ip);
354 break;
355 case INSTR_ADD:
356 emit->add(ip->dest_pregs, ip);
357 break;
358 case INSTR_MUL:
359 emit->mul(ip->dest_pregs, ip);
360 break;
361 case INSTR_DIV:
362 emit->div(ip->dest_pregs, ip, 0);
363 break;
364 case INSTR_MOD:
365 emit->mod(ip->dest_pregs, ip);
366 break;
367 case INSTR_SHL:
368 emit->shl(ip->dest_pregs, ip);
369 break;
370 case INSTR_SHR:
371 emit->shr(ip->dest_pregs, ip);
372 break;
373 case INSTR_OR:
374 emit->or(ip->dest_pregs, ip);
375 break;
376
377 case INSTR_PREG_OR:
378 emit->preg_or(ip->dest_pregs, ip);
379 break;
380
381 case INSTR_AND:
382 emit->and(ip->dest_pregs, ip);
383 break;
384 case INSTR_XOR:
385 emit->xor(ip->dest_pregs, ip);
386 break;
387 case INSTR_NOT:
388 emit->not(ip->src_pregs, ip);
389 break;
390 case INSTR_RET:
391 backend->do_ret(f, ip);
392 break;
393 case INSTR_SEQPOINT:
394 break;
395 case INSTR_STORE:
396 /* XXX confusingly messed up order of args */
397 #if FEAT_DEBUG_DUMP_BOGUS_STORES
398 if (ip->dest_vreg->stack_addr != NULL) {
399 struct icode_instr *tmp;
400 struct stack_block *sb = ip->dest_vreg->stack_addr;
401
402 for (tmp = ip->next; tmp != NULL; tmp = tmp->next) {
403 if (tmp->src_vreg
404 && tmp->src_vreg->stack_addr == sb) {
405 if (tmp->type == INSTR_LOAD) {
406 break;
407 } else {
408 emit->comment("used by ? as src");
409 }
410 } else if (tmp->dest_vreg
411 && tmp->dest_vreg->stack_addr == sb) {
412 emit->comment("used by ? as dest");
413 }
414 }
415 if (tmp == NULL) {
416 emit->comment("seems unneeded");
417 }
418 }
419 #endif
420 if (ip->dat != NULL) {
421 struct vreg *ptrvr;
422
423 ptrvr = check_need_indirect_loadstore(ip);
424 if (ptrvr != NULL) {
425 emit->store(ptrvr, ip->dest_vreg);
426 if (ip->src_vreg->is_multi_reg_obj) {
427 emit->store(ptrvr, ip->dest_vreg);
428 }
429 break;
430 }
431 }
432 emit->store(ip->src_vreg, ip->dest_vreg);
433 if (ip->src_vreg->is_multi_reg_obj) {
434 emit->store(ip->src_vreg, ip->dest_vreg);
435 }
436 break;
437 case INSTR_WRITEBACK:
438 emit->store(ip->src_vreg, ip->src_vreg);
439 break;
440 case INSTR_LOAD:
441 if (ip->dat != NULL) {
442 struct vreg *ptrvr;
443
444 ptrvr = check_need_indirect_loadstore(ip);
445 if (ptrvr != NULL) {
446 emit->load(ip->src_pregs[0], ptrvr);
447 break;
448 }
449 }
450 emit->load(ip->src_pregs[0], ip->src_vreg);
451 break;
452 case INSTR_LOAD_ADDRLABEL:
453 emit->load_addrlabel(ip->dest_pregs[0], ip->dat);
454 break;
455 case INSTR_COMP_GOTO:
456 emit->comp_goto(ip->dat);
457 break;
458 case INSTR_DEBUG:
459 emit->comment(ip->dat);
460 break;
461 case INSTR_DBGINFO_LINE:
462 emit->dwarf2_line(ip->dat);
463 break;
464 case INSTR_UNIMPL:
465 emit->genunimpl();
466 break;
467 case INSTR_CMP:
468 emit->cmp(ip->dest_pregs, ip);
469 break;
470 case INSTR_EXTEND_SIGN:
471 emit->extend_sign(ip);
472 break;
473 case INSTR_CONV_FP:
474 emit->conv_fp(ip);
475 break;
476 case INSTR_CONV_FROM_LDOUBLE:
477 emit->conv_from_ldouble(ip);
478 break;
479 case INSTR_CONV_TO_LDOUBLE:
480 emit->conv_to_ldouble(ip);
481 break;
482 case INSTR_COPYINIT:
483 emit->copyinit(ip->dat);
484 break;
485 case INSTR_PUTSTRUCTREGS:
486 emit->putstructregs(ip->dat);
487 break;
488 case INSTR_COPYSTRUCT:
489 emit->copystruct(ip->dat);
490 break;
491 case INSTR_INTRINSIC_MEMCPY:
492 emit->intrinsic_memcpy(ip->dat);
493 break;
494 case INSTR_ALLOCA: {
495 struct allocadata *ad = ip->dat;
496 static struct vreg vr;
497
498 /*
499 * XXXXXXX 08/27/07 This sucks:
500 * - alloca_ should be renamed alloca
501 * - The store below saves some code
502 * duplication (which is why it's here),
503 * but makes it difficult to find!!! This
504 * should go into icode_make_alloca()
505 *
506 * Was wrong because:
507 *
508 * - Making the register anonymous and
509 * type-less breaks on systems where the
510 * ABI uses pointers of different size
511 * than GPRs; e.g. MIPS/N32. Now we use
512 * make_void_ptr_type() instead
513 */
514 emit->alloca_(ad);
515
516 /* Now save the result pointer */
517 vr.stack_addr = ad->addr;
518 vr.type = make_void_ptr_type();
519 vr.size = backend->get_sizeof_type(vr.type, NULL);
520 backend_vreg_map_preg(&vr, ad->result_reg);
521 emit->store(&vr, &vr);
522 backend_vreg_unmap_preg(ad->result_reg);
523 }
524 break;
525 case INSTR_DEALLOCA:
526 emit->dealloca(ip->dat, ip->src_pregs[0]);
527 break;
528 case INSTR_ALLOC_VLA:
529 emit->alloc_vla(ip->dat);
530 break;
531 case INSTR_DEALLOC_VLA:
532 emit->dealloc_vla(ip->dat, NULL);
533 break;
534 case INSTR_PUT_VLA_SIZE:
535 emit->put_vla_size(ip->dat);
536 break;
537 case INSTR_RETR_VLA_SIZE:
538 emit->retr_vla_size(ip->dat);
539 break;
540 case INSTR_LOAD_VLA:
541 emit->load_vla(ip->dest_pregs[0],
542 ((struct type *)ip->dat)->vla_addr);
543 break;
544 case INSTR_BUILTIN_FRAME_ADDRESS:
545 emit->frame_address(ip->dat);
546 break;
547 case INSTR_ASM:
548 emit->inlineasm(ip->dat);
549 break;
550 case INSTR_BR_EQUAL:
551 case INSTR_BR_NEQUAL:
552 case INSTR_BR_GREATER:
553 case INSTR_BR_SMALLER:
554 case INSTR_BR_GREATEREQ:
555 case INSTR_BR_SMALLEREQ:
556 case INSTR_JUMP:
557 emit->branch(ip);
558 break;
559 case INSTR_XCHG:
560 emit->xchg(ip->src_pregs[0], ip->dest_pregs[0]);
561 break;
562 default:
563 /* Must be machine specific */
564 found = 1; /* XXX uh-huh should this not be 0? :( */
565 if (backend->arch == ARCH_X86
566 || backend->arch == ARCH_AMD64) {
567 switch (ip->type) {
568 case INSTR_X86_FXCH:
569 emit_x86->fxch(ip->dest_pregs[0],
570 ip->src_pregs[0]);
571 break;
572 case INSTR_X86_FFREE:
573 emit_x86->ffree(ip->src_pregs[0]);
574 break;
575 case INSTR_X86_FNSTCW:
576 emit_x86->fnstcw(ip->src_vreg);
577 break;
578 case INSTR_X86_FLDCW:
579 emit_x86->fldcw(ip->src_vreg);
580 break;
581 case INSTR_X86_CDQ:
582 emit_x86->cdq();
583 break;
584 case INSTR_X86_FIST:
585 emit_x86->fist((struct fistdata *)ip->dat);
586 break;
587 case INSTR_X86_FILD:
588 emit_x86->fild((struct filddata *)ip->dat);
589 break;
590 case INSTR_AMD64_CVTTSS2SI:
591 emit_amd64->cvttss2si(ip);
592 break;
593 case INSTR_AMD64_CVTTSS2SIQ:
594 emit_amd64->cvttss2siq(ip);
595 break;
596 case INSTR_AMD64_CVTTSD2SI:
597 emit_amd64->cvttsd2si(ip);
598 break;
599 case INSTR_AMD64_CVTTSD2SIQ:
600 emit_amd64->cvttsd2siq(ip);
601 break;
602 case INSTR_AMD64_CVTSI2SD:
603 emit_amd64->cvtsi2sd(ip);
604 break;
605 case INSTR_AMD64_CVTSI2SS:
606 emit_amd64->cvtsi2ss(ip);
607 break;
608 case INSTR_AMD64_CVTSI2SDQ:
609 emit_amd64->cvtsi2sdq(ip);
610 break;
611 case INSTR_AMD64_CVTSI2SSQ:
612 emit_amd64->cvtsi2ssq(ip);
613 break;
614 case INSTR_AMD64_CVTSD2SS:
615 emit_amd64->cvtsd2ss(ip);
616 break;
617 case INSTR_AMD64_CVTSS2SD:
618 emit_amd64->cvtss2sd(ip);
619 break;
620 case INSTR_AMD64_LOAD_NEGMASK:
621 emit_amd64->load_negmask(ip);
622 break;
623 case INSTR_AMD64_XORPS:
624 emit_amd64->xorps(ip);
625 break;
626 case INSTR_AMD64_XORPD:
627 emit_amd64->xorpd(ip);
628 break;
629 case INSTR_AMD64_ULONG_TO_FLOAT:
630 if (backend->arch == ARCH_AMD64) {
631 emit_amd64->ulong_to_float(ip);
632 } else {
633 emit_x86->ulong_to_float(ip);
634 }
635 break;
636 default:
637 found = 0;
638 }
639 } else if (backend->arch == ARCH_POWER) {
640 switch (ip->type) {
641 case INSTR_POWER_SRAWI:
642 emit_power->srawi(ip);
643 break;
644 case INSTR_POWER_RLDICL:
645 emit_power->rldicl(ip);
646 break;
647 case INSTR_POWER_FCFID:
648 emit_power->fcfid(ip);
649 break;
650 case INSTR_POWER_FRSP:
651 emit_power->frsp(ip);
652 break;
653 case INSTR_POWER_RLWINM:
654 emit_power->rlwinm(ip);
655 break;
656 case INSTR_POWER_SLWI:
657 emit_power->slwi(ip);
658 break;
659 case INSTR_POWER_EXTSB:
660 emit_power->extsb(ip);
661 break;
662 case INSTR_POWER_EXTSH:
663 emit_power->extsh(ip);
664 break;
665 case INSTR_POWER_EXTSW:
666 emit_power->extsw(ip);
667 break;
668 case INSTR_POWER_XORIS:
669 emit_power->xoris(ip);
670 break;
671 case INSTR_POWER_LIS:
672 emit_power->lis(ip);
673 break;
674 case INSTR_POWER_LOADUP4:
675 emit_power->loadup4(ip);
676 break;
677 case INSTR_POWER_FCTIWZ:
678 emit_power->fctiwz(ip);
679 break;
680 default:
681 found = 0;
682 }
683 } else if (backend->arch == ARCH_MIPS) {
684 switch (ip->type) {
685 case INSTR_MIPS_MFC1:
686 emit_mips->mfc1(ip);
687 break;
688 case INSTR_MIPS_MTC1:
689 emit_mips->mtc1(ip);
690 break;
691 case INSTR_MIPS_CVT:
692 emit_mips->cvt(ip);
693 break;
694 case INSTR_MIPS_TRUNC:
695 emit_mips->trunc(ip);
696 break;
697 case INSTR_MIPS_MAKE_32BIT_MASK:
698 emit_mips->make_32bit_mask(ip);
699 break;
700 default:
701 found = 0;
702 }
703 } else if (backend->arch == ARCH_SPARC) {
704 switch (ip->type) {
705 case INSTR_SPARC_LOAD_INT_FROM_LDOUBLE:
706 emit_sparc->load_int_from_ldouble(ip);
707 break;
708 default:
709 found = 0;
710 break;
711 }
712 }
713 if (found) {
714 break;
715 }
716 printf("Unknown instruction - %d\n", ip->type);
717 return -1;
718 }
719
720 #if XLATE_IMMEDIATELY
721 unmap_pregs(ip->src_vreg, ip->src_pregs);
722 unmap_pregs(ip->dest_vreg, ip->dest_pregs);
723 if (ip->src_parent_struct) {
724 if (ip->src_ptr_preg) {
725 /*
726 * 08/05/07: UNBELIEVABLE! This used to map the
727 * register to the parent struct vreg, but not to
728 * the parent struct from_ptr vreg, whree it would
729 * have belonged! That one is used by the emitters,
730 * so stuff broke
731 */
732 backend_vreg_unmap_preg(
733 ip->src_ptr_preg);
734 }
735 } else if (ip->src_ptr_preg) {
736 backend_vreg_unmap_preg(ip->src_ptr_preg);
737 }
738 if (ip->dest_parent_struct) {
739 if (ip->dest_ptr_preg) {
740 /*
741 * 08/05/07: See comment above about source parent
742 * pointer
743 */
744 backend_vreg_unmap_preg(
745 ip->dest_ptr_preg);
746 }
747 } else if (ip->dest_ptr_preg) {
748 backend_vreg_unmap_preg(ip->dest_ptr_preg);
749 }
750 #endif /* XLATE_IMMEDIATELY */
751 return 0;
752 }
753
754 int
xlate_icode(struct function * f,struct icode_list * ilp,struct icode_instr ** lastret)755 xlate_icode(
756 struct function *f,
757 struct icode_list *ilp,
758 struct icode_instr **lastret) {
759 struct icode_instr *ip;
760
761 if (ilp == NULL) {
762 /* Empty function */
763 return 0;
764 }
765 for (ip = ilp->head; ip != NULL; ip = ip->next) {
766 if (do_xlate(f, &ip) != 0) {
767 return -1;
768 }
769 if (ip->type == INSTR_RET) {
770 *lastret = ip;
771 }
772 }
773 return 0;
774 }
775
776 /*
777 * For designated initializers: Allocate static designated initializer
778 * and initialize it with init
779 */
780 struct vreg *
vreg_static_alloc(struct type * ty,struct initializer * init)781 vreg_static_alloc(struct type *ty, struct initializer *init) {
782 struct vreg *ret;
783 static struct decl dec;
784 struct decl *decp;
785 struct decl *dummy[2];
786
787 (void) backend->get_sizeof_type(ty, NULL); /* XXX 12/07/24: needed? */
788 dec.dtype = ty;
789
790 decp = n_xmemdup(&dec, sizeof dec);
791 decp->init = init;
792
793 ret = vreg_alloc(decp, NULL, NULL, NULL);
794
795 decp->dtype = n_xmemdup(decp->dtype, sizeof *decp->dtype);
796 decp->dtype->is_func = 0;
797 decp->dtype->storage = 0;
798 decp->dtype->name = NULL;
799 dummy[0] = decp;
800 dummy[1] = NULL;
801 store_decl_scope(curscope, dummy);
802
803 return ret;
804 }
805
806 struct vreg *
vreg_stack_alloc(struct type * ty,struct icode_list * il,int on_frame,struct initializer * init)807 vreg_stack_alloc(struct type *ty, struct icode_list *il, int on_frame,
808 struct initializer *init) {
809
810 struct vreg *ret;
811 static struct decl dec;
812 struct decl *decp;
813 size_t size;
814
815 size = backend->get_sizeof_type(ty, NULL);
816 dec.dtype = ty;
817
818 decp = n_xmemdup(&dec, sizeof dec);
819 ret = vreg_alloc(decp, NULL, NULL, NULL);
820 /*
821 * 08/18/07: The old icode_make_allocstack() solution was
822 * only suitable for allocating stack at the current stack
823 * pointer position. This is inadequate for allocating
824 * temporary storage like anonymous structs
825 */
826 if (!on_frame) {
827 /*
828 * Allocate at current stack pointer position
829 */
830 icode_make_allocstack(ret, size, il);
831 } else {
832 /*
833 * Allocate when the stack frame is created. We can
834 * easily do this by linking the declaration on the
835 * scope list of declarations!
836 */
837 struct decl *dummy[2];
838
839 decp->dtype = n_xmemdup(decp->dtype, sizeof *decp->dtype);
840 decp->dtype->is_func = 0;
841 decp->dtype->storage = 0;
842 decp->dtype->name = NULL;
843 dummy[0] = decp;
844 dummy[1] = NULL;
845
846 /*
847 * 08/09/08: Mark this as a declaration which was not
848 * explicitly requested (since this function is usually
849 * called for anonymous struct return buffers and
850 * compound literals). This is needed at least for
851 * statements as expressions, where such a dummy
852 * declaration shouldn't be used as value;
853 *
854 * struct s foo();
855 * ({ foo(); expr; })
856 * ^__ will append declaration for
857 * foo's return value here
858 */
859 decp->is_unrequested_decl = 1;
860 store_decl_scope(curscope, dummy);
861 }
862 /*
863 * Careful now if we have an initializer. Since this function is
864 * only called with initializer for compound literals, we have
865 * to generate the initializer exactly here! The slightest
866 * reordering can cause garbage results because the literal is
867 * probably part of a larger expression such as:
868 *
869 * printf("%d\n", (struct foo){ .bar = rand() }.bar);
870 *
871 * ... here reordering may cause the call to be performed before
872 * initialization. Likewise doing the initialization too too early
873 * may cause non-constant initializers to misbehave
874 */
875 if (init != NULL) {
876 /*
877 * Only set init member to initilizer for call to
878 * init_to_icode(). We have to remove it again afterwards
879 * because otherwise it will be initialized again when
880 * the variable is created
881 */
882 decp->init = init;
883
884 backend->invalidate_gprs(il, 1, 0);
885
886 /*
887 * 09/14/07: Forgot to create a vreg for the declaration!
888 * This must be done here because the declaration is not
889 * linked on the declaration list, where it would be done
890 * automatically
891 */
892 #if 0
893 decp->vreg = vreg_alloc(decp, NULL, NULL, NULL);
894 vreg_set_new_type(decp->vreg, decp->dtype);
895 #endif
896 init_to_icode(decp, il);
897 decp->init = NULL;
898 }
899 return ret;
900 }
901
902 struct initializer *
make_null_block(struct sym_entry * se,struct type * ty,struct type * struct_ty,int remaining)903 make_null_block(struct sym_entry *se,
904 struct type *ty,
905 struct type *struct_ty,
906 int remaining) {
907
908 struct initializer *ret;
909 size_t size = 0;
910 size_t msize;
911 size_t align;
912 struct sym_entry *startse = se;
913 int struct_align = 1 ;
914 int start_offset = 0;
915
916 if (struct_ty != NULL) {
917 struct_align = backend->get_align_type(struct_ty);
918 if (se != NULL) {
919 start_offset = se->dec->offset;
920 }
921 }
922
923 /*
924 * 07/19/08: Handle unions correctly! The caller then has to
925 * supply the remaining byte count which is needed to pad
926 * the initializer size to the whole union size (e.g. if an
927 * int union member is initialized and the union contains a
928 * long long, 4 bytes of padding are needed).
929 */
930 if (struct_ty != NULL && struct_ty->code == TY_UNION) {
931 size = remaining;
932 } else {
933 if (se != NULL) {
934 /* Struct */
935 struct decl *last_storage_unit = NULL;
936
937 for (; se != NULL; se = se->next) {
938 /*
939 * 08/07/07: Changed this stuff, hope it's correct
940 * now
941 *
942 * 10/12/08: Handle bitfields
943 */
944 if (se->has_initializer) {
945 /*
946 * 10/12/08: Can (for now) only be an already initialized
947 * bitfield - skip it
948 */
949 ;
950 } else if (se->dec->dtype->tbit != NULL) {
951 /* Bitfield case - don't align */
952 if (se->dec->dtype->tbit->numbits == 0) {
953 /* Storage unit terminator - ignore */
954 ;
955 } else if (se->dec->dtype->tbit->bitfield_storage_unit
956 != last_storage_unit) {
957 /* Create storage unit */
958 last_storage_unit = se->dec->dtype->tbit->
959 bitfield_storage_unit;
960 size += backend->get_sizeof_type(
961 last_storage_unit->dtype, NULL);
962 } else {
963 /* Already handled */
964 ;
965 }
966 } else {
967 align = get_struct_align_type(se->dec->dtype);
968
969 /*
970 * 10/12/08: This was apparently completely
971 * broken, in that it incremented ``align'',
972 * not ``size''
973 */
974 while ((struct_align+start_offset+size) % align) {
975 /* ++align;*/
976 ++size;
977 }
978 /* size += align - orig_align; 10/12/08: Replaced with above*/
979 size += backend->get_sizeof_type(se->dec->dtype, NULL);
980 }
981 }
982 } else {
983 /* Array */
984 align = backend->get_align_type(ty);
985 msize = backend->get_sizeof_type(ty, NULL);
986
987 if (align > msize) {
988 size = align * remaining;
989 } else {
990 size = msize * remaining;
991 }
992 }
993 }
994 /* XXX this is BORKORORENORK */
995
996 /* 08/07/07: Removed line below, should not be necessary anymore */
997 #if 0
998 while (size % struct_align /*backend->struct_align*/) ++size;
999 #endif
1000 if (size == 0) {
1001 /* 10/12/08: False alarm - No null initializer needed! */
1002 return NULL;
1003 }
1004
1005 ret = alloc_initializer();
1006 ret->type = INIT_NULL;
1007
1008 ret->data = n_xmemdup(&size, sizeof size);
1009
1010 if (struct_ty != NULL) {
1011 ret->left_alignment = struct_align;
1012 ret->left_type = startse->dec->dtype;
1013 } else {
1014 ret->left_alignment = align;
1015 }
1016
1017 return ret;
1018 }
1019
1020 struct init_with_name *
make_init_name(struct initializer * init)1021 make_init_name(struct initializer *init) {
1022 static unsigned long count;
1023 char name[128];
1024 struct init_with_name *ret;
1025
1026 sprintf(name, "_Agginit%lu", count++);
1027 ret = n_xmalloc(sizeof *ret);
1028 ret->name = n_xstrdup(name);
1029 ret->init = init;
1030 ret->next = NULL;
1031
1032 if (init_list_head == NULL) {
1033 init_list_head = init_list_tail = ret;
1034 } else {
1035 init_list_tail->next = ret;
1036 init_list_tail = init_list_tail->next;
1037 }
1038 return ret;
1039 }
1040
1041 size_t
get_sizeof_const(struct token * constant)1042 get_sizeof_const(struct token *constant) {
1043 if (constant->type == TOK_STRING_LITERAL) {
1044 struct ty_string *str;
1045
1046 str = constant->data;
1047 return str->size;
1048 } else {
1049 return backend->get_sizeof_basic(constant->type);
1050 }
1051 }
1052
1053 size_t
get_sizeof_elem_type(struct type * t)1054 get_sizeof_elem_type(struct type *t) {
1055 struct type_node *head = t->tlist;
1056 size_t ret;
1057
1058 t->tlist = t->tlist->next;
1059 ret = backend->get_sizeof_type(t, NULL);
1060 t->tlist = head;
1061 return ret;
1062 }
1063
1064 size_t
get_sizeof_decl(struct decl * d,struct token * tok)1065 get_sizeof_decl(struct decl *d, struct token *tok) {
1066 if (d->size == 0) {
1067 d->size = backend->get_sizeof_type(d->dtype, tok);
1068 }
1069 return d->size;
1070 }
1071
1072 int
botch_x86_alignment(struct type * ty)1073 botch_x86_alignment(struct type *ty) {
1074 if (backend->arch == ARCH_X86) {
1075 if (ty->tlist == NULL
1076 && (IS_LLONG(ty->code)
1077 || ty->code == TY_DOUBLE
1078 || ty->code == TY_LDOUBLE)) {
1079 return 1; /* XXX Support gcc command line flags... */
1080 }
1081 }
1082 return 0;
1083 }
1084
1085 static size_t
get_union_align(struct type * ty)1086 get_union_align(struct type *ty) {
1087 struct sym_entry *se;
1088 size_t maxalign = 0;
1089
1090 for (se = ty->tstruc->scope->slist;
1091 se != NULL;
1092 se = se->next) {
1093 size_t align;
1094
1095 if (botch_x86_alignment(se->dec->dtype)) {
1096 align = 4;
1097 } else {
1098 align = backend->
1099 get_align_type(se->dec->dtype);
1100 }
1101
1102 if (align > maxalign) {
1103 maxalign = align;
1104 }
1105 }
1106 return maxalign;
1107 }
1108
1109 size_t
get_struct_align_type(struct type * ty)1110 get_struct_align_type(struct type *ty) {
1111 if ((ty->fastattr & CATTR_ALIGNED) == 0) {
1112 /*
1113 * 08/06/09: Silly gcc behavior: A long long is usually 8-byte aligned,
1114 * but ISN'T if it's a struct member! This will cause problems with
1115 * 64bit ``struct stat'' members and other such library interfaces.
1116 */
1117 if (botch_x86_alignment(ty)) {
1118 return 4;
1119 }
1120 }
1121 return backend->get_align_type(ty);
1122 }
1123
1124 /*
1125 * XXX this isn't as platform-independent and invariable as the author of
1126 * this stuff would have you believe. For starters, consider ``long double''
1127 * on x86 and x86-64.
1128 */
1129 size_t
get_align_type(struct type * ty)1130 get_align_type(struct type *ty) {
1131 int ret;
1132
1133 if (ty == NULL) {
1134 /*
1135 * 05/27/08: Get highest alignment for platform
1136 */
1137 switch (backend->arch) {
1138 case ARCH_X86:
1139 /*
1140 * XXXXXXXXXXXXXXXXXXXX
1141 * This is messed up! ``Standard'' Intel ABI says
1142 * no type aligns higher than 4, but that's what
1143 * gcc does by default for double and long long,
1144 * but it can be turned off too and does not
1145 * behave consistently in structs and arrays!
1146 */
1147 /*return get_align_type(make_basic_type(TY_DOUBLE));*/
1148 return 16;
1149 case ARCH_AMD64:
1150 return get_align_type(make_basic_type(TY_LDOUBLE));
1151 case ARCH_POWER:
1152 /* XXX is this correct? How about long long on 32bit? */
1153 if (sysflag == OS_AIX) {
1154 /*
1155 * 01/29/08: The highest alignment used by gcc is 16.
1156 * I don't know why or which types (if any) are affected
1157 * yet, but we'll just do the same thing
1158 */
1159 return 16;
1160 #if 0
1161 return get_align_type(make_basic_type(TY_LONG));
1162 #endif
1163 } else {
1164 /*
1165 * 11/24/08: 128bit long double for Linux/PPC64
1166 * XXX what about PPC32?
1167 */
1168 return get_align_type(make_basic_type(TY_LDOUBLE));
1169 }
1170 case ARCH_MIPS:
1171 return get_align_type(make_basic_type(TY_LDOUBLE));
1172 case ARCH_SPARC:
1173 return get_align_type(make_basic_type(TY_LDOUBLE));
1174 default:
1175 unimpl();
1176 }
1177 }
1178
1179
1180 /*
1181 * 05/29/08: CANOFWORMS: Use higher alignment on x86
1182 * (8 for double and long long, 16 for unqualified
1183 * __attribute__((aligned))), like gcc does by default
1184 *
1185 * XXX We should use command line options to configure
1186 * this instead of macros
1187 */
1188 #if ALIGN_X86_LIKE_GCC
1189 # define X86IFY(val) /* nothing - keep original value */
1190 #else
1191 # define X86IFY(val) if (backend->arch == ARCH_X86 && val > 4) val = 4
1192 #endif
1193 if (ty->fastattr & CATTR_ALIGNED) {
1194 return lookup_attr(ty->attributes, ATTRS_ALIGNED)->iarg;
1195 }
1196
1197
1198
1199 if (ty->tbit != NULL) {
1200 /*
1201 * 08/17/08: Bitfield alignment!
1202 */
1203
1204 ret = backend->get_align_type(
1205 cross_get_bitfield_promoted_type(ty));
1206 X86IFY(ret);
1207 if (backend->arch == ARCH_X86) {
1208 /*
1209 * unsigned long long bitfields have 4-byte alignment
1210 * in gcc even when a plain long long doesn't! This
1211 * is independent of the size of the bitfield, which
1212 * may also be 64bit and still yield 4-byte-alignment
1213 */
1214 ret = 4;
1215 }
1216 return ret;
1217 } else if (ty->tlist == NULL
1218 || ty->tlist->type != TN_ARRAY_OF) {
1219
1220 if (ty->tlist == NULL) {
1221 if (ty->code == TY_UNION) {
1222 ret = get_union_align(ty);
1223 X86IFY(ret);
1224 return ret;
1225 } else if (ty->code == TY_STRUCT) {
1226 struct attrib *a;
1227
1228 a = lookup_attr(ty->tstruc->attrib, ATTRS_ALIGNED);
1229 if (ty->tstruc->alignment && a == NULL) {
1230 ret = ty->tstruc->alignment;
1231 X86IFY(ret);
1232 return ret;
1233 }
1234
1235 if (a != NULL) {
1236 ty->tstruc->alignment = a->iarg;
1237 #if 0
1238 } else if (ty->tstruc->scope->slist->next == NULL) {
1239 /* Only one member */
1240 if (botch_x86_alignment(ty)) {
1241 ty->tstruc->alignment = 4;
1242 } else {
1243 ty->tstruc->alignment = backend->
1244 get_align_type(
1245 ty->tstruc->scope->slist->dec->dtype);
1246 }
1247 #endif
1248 } else {
1249 ty->tstruc->alignment =
1250 get_union_align(ty);
1251 }
1252
1253 ret = ty->tstruc->alignment;
1254 if (a == NULL) {
1255 X86IFY(ret);
1256 }
1257 return ret;
1258 } else if (ty->code == TY_LDOUBLE) {
1259 if (backend->arch == ARCH_AMD64
1260 || sysflag == OS_OSX) {
1261 return 16;
1262 } else if (backend->arch == ARCH_X86) {
1263 return 4;
1264 }
1265 }
1266 }
1267 ret = backend->get_sizeof_type(ty, NULL);
1268 /* XXX */
1269 X86IFY(ret);
1270 return ret;
1271 } else {
1272 struct type tmp = *ty;
1273 tmp.tlist = tmp.tlist->next;
1274 return get_align_type(&tmp);
1275 }
1276 }
1277
1278 unsigned long
calc_offsets(struct vreg * vr)1279 calc_offsets(struct vreg *vr) {
1280 size_t ret = 0;
1281
1282 do {
1283 if (vr->parent->type->code == TY_STRUCT) {
1284 ret += vr->memberdecl->offset;
1285 }
1286 if (vr->from_ptr) {
1287 break;
1288 }
1289 vr = vr->parent;
1290 } while (vr->parent != NULL);
1291 return ret;
1292 }
1293
1294 int
calc_slot_rightadjust_bytes(int size,int total_size)1295 calc_slot_rightadjust_bytes(int size, int total_size) {
1296 int pad;
1297
1298 if (size < total_size) {
1299 pad = total_size - size;
1300 } else {
1301 pad = 0;
1302 }
1303 return pad;
1304 }
1305
1306
1307 void
as_align_for_type(FILE * out,struct type * ty,int struct_member)1308 as_align_for_type(FILE *out, struct type *ty, int struct_member) {
1309 unsigned long align;
1310 unsigned long alignbits;
1311
1312 (void) struct_member;
1313 assert(backend->arch != ARCH_X86);
1314
1315 align = backend->get_align_type(ty);
1316
1317 if (backend->arch == ARCH_SPARC) {
1318 alignbits = align;
1319 } else {
1320 alignbits = 0;
1321 while (align >>= 1) {
1322 ++alignbits;
1323 }
1324 }
1325
1326 /* Make low-order bits of location counter zero */
1327 x_fprintf(out, "\t.align %lu\n", alignbits);
1328 }
1329
1330 void
as_print_string_init(FILE * o,size_t howmany,struct ty_string * str)1331 as_print_string_init(FILE *o, size_t howmany, struct ty_string *str) {
1332 char *p;
1333 char *wchar_type = NULL;
1334 int wchar_size = 0;
1335 size_t i;
1336
1337 if (str->is_wide_char) {
1338 /* XXX This assumes the size will always be 4 */
1339 switch (backend->arch) {
1340 case ARCH_X86: wchar_type = "long"; break;
1341 case ARCH_AMD64: wchar_type = "long"; break;
1342 case ARCH_MIPS: wchar_type = "word"; break;
1343 case ARCH_POWER: wchar_type = "long"; break;
1344 case ARCH_SPARC: wchar_type = "word"; break;
1345 default: unimpl();
1346 }
1347 x_fprintf(o, ".%s\t", wchar_type);
1348 wchar_size = backend->get_sizeof_type(
1349 backend->get_wchar_t(), NULL);
1350
1351 assert(wchar_size == 4);
1352 } else {
1353 x_fprintf(o, ".byte\t");
1354 }
1355
1356 for (i = 0, p = str->str; i < str->size-1; ++p, ++i) {
1357 if (str->is_wide_char) {
1358 if (wchar_size == 2) {
1359 x_fprintf(o, "0x00%02x", (unsigned char)*p);
1360 } else if (wchar_size == 4) {
1361 x_fprintf(o, "0x000000%02x", (unsigned char)*p);
1362 } else {
1363 unimpl();
1364 }
1365 } else {
1366 x_fprintf(o, "0x%x", (unsigned char)*p);
1367 }
1368
1369 if (i+1 < str->size-1) {
1370 if (i > 0 && (i % 10) == 0) {
1371 if (str->is_wide_char) {
1372 x_fprintf(o, "\n.%s\t", wchar_type);
1373 } else {
1374 x_fprintf(o, "\n.byte\t");
1375 }
1376 } else {
1377 x_fputc(',', o);
1378 }
1379 }
1380 }
1381
1382 if (howmany >= str->size) {
1383 if (str->size > 1) {
1384 (void) fprintf(o, ", ");
1385 }
1386 (void) fprintf(o, "0");
1387 }
1388 x_fputc('\n', o);
1389 }
1390
1391 struct reg *
generic_alloc_gpr(struct function * f,int size,struct icode_list * il,struct reg * dontwipe,struct reg * regset,int nregs,int * csave_map,int line)1392 generic_alloc_gpr(
1393 struct function *f,
1394 int size,
1395 struct icode_list *il,
1396 struct reg *dontwipe,
1397 struct reg *regset,
1398 int nregs,
1399 int *csave_map,
1400 int line) {
1401
1402 int i;
1403 int save = 0;
1404 int least_idx = -1;
1405 int regno;
1406 static int last_alloc;
1407 struct reg *ret = NULL;
1408
1409 (void) size;
1410 (void) line;
1411 (void) dontwipe;
1412 (void) f;
1413 for (i = 0; i < nregs; ++i) {
1414 if (reg_unused(®set[i])
1415 && reg_allocatable(®set[i])) {
1416 ret = &/*mips_gprs*/regset[i];
1417 last_alloc = i;
1418 break;
1419 } else {
1420 if (!optimizing /* || !reg_allocatable(...)*/) {
1421 continue;
1422 }
1423 }
1424 }
1425 if (ret == NULL) {
1426 /*
1427 * Save and hand out register with least
1428 * references
1429 */
1430 save = 1;
1431 if (!optimizing) {
1432 static int cur;
1433 int iterations = 0;
1434
1435 if (cur == last_alloc) {
1436 /*
1437 * Ensure two successive allocs always
1438 * use different registers
1439 */
1440 cur = (cur + 1) % nregs;
1441 }
1442
1443 do {
1444 if (cur == nregs) cur = 0;
1445 ret = ®set[cur++];
1446 if (++iterations == nregs) {
1447 /*
1448 * Ouch, no register can be allocated.
1449 * This will probably only ever happen
1450 * with inline asm statements using too
1451 * many registers .... HOPEFULLY!!
1452 */
1453 return NULL;
1454 }
1455
1456 /*
1457 * 10/18/07: Wow, the check below didn't
1458 * use reg_allocatable(). Thus the AMD64
1459 * sub-registers for r8-r15 were not
1460 * considered when determining
1461 * allocatability
1462 */
1463 } while ((dontwipe != NULL && ret == dontwipe)
1464 /*|| !ret->allocatable*/
1465 || !reg_allocatable(ret));
1466 last_alloc = cur - 1;
1467 } else {
1468 int idx;
1469
1470 idx = least_idx == -1? 0: least_idx;
1471 if (idx == last_alloc) {
1472 idx = (idx + 1) % nregs;
1473 }
1474 ret = &/*mips_gprs*/regset[idx];
1475 last_alloc = idx;
1476 }
1477 }
1478
1479 regno = ret - /*mips_gprs*/regset;
1480 if (csave_map != NULL) {
1481 f->callee_save_used |= csave_map[regno] << regno;
1482 }
1483
1484 if (save) {
1485 struct reg *top_reg = NULL;
1486
1487 /*
1488 * IMPORTANT: It is assumed that an allocatable register
1489 * has a vreg, hence no ret->vreg != NULL check here.
1490 * Reusing a preg without a vreg is obviously a bug
1491 * because without a vreg, it cannot be saved anywhere.
1492 * See reg_set_unallocatable()/vreg_faultin_protected()
1493 *
1494 * 10/30/07: This didn't work for AMD64 sub-registers.
1495 * Example: We are allocating r10, but only r10d is used.
1496 * In that case we can't insist on ret being mapped to a
1497 * vreg. That's a bug. Another thing is that smaller-
1498 * than-GPR size requests weren't honored, which is not
1499 * used on AMD64 anyway, but could be used at some point.
1500 * This is also implemented (but untested) now.
1501 *
1502 * XXX This currently only works with one sub-register
1503 * per register (e.g. ah/al for ax wouldn't work)
1504 */
1505 if (!ret->used) {
1506 /*
1507 * Find register to free (there must be one because
1508 * the ``save'' flag is set)
1509 */
1510 top_reg = ret;
1511 do {
1512 ret = ret->composed_of[0];
1513 } while (!ret->used);
1514 }
1515
1516 if (ret->vreg->from_const == NULL
1517 && ret->vreg->var_backed == NULL
1518 && ret->vreg->from_ptr == NULL
1519 && ret->vreg->parent == NULL) {
1520 /* Anonymous register - must be saved */
1521 free_preg(ret, il, 1, 1);
1522 }
1523 ret->vreg = NULL;
1524
1525 if (size == 0) {
1526 /* Request to allocate top register */
1527 if (top_reg != NULL) {
1528 /* Change ret back to top */
1529 ret = top_reg;
1530 }
1531 } else if (ret->size != (unsigned)size) {
1532 /*
1533 * Freed sub-register does not match
1534 * the desired size
1535 */
1536 if (top_reg != NULL) {
1537 ret = top_reg;
1538 }
1539
1540 /*
1541 * Check for composed_of != NULL because some
1542 * callers supply size info even when there is
1543 * no need to distinguish between sizes. E.g.
1544 * FPRs are usually 8 bytes and have no sub
1545 * registers, but generic_alloc_gpr() may be
1546 * called with a 4 byte size argument for a
1547 * float
1548 */
1549 while (ret->composed_of && ret->size != (unsigned)size) {
1550 ret = ret->composed_of[0];
1551 }
1552 }
1553 }
1554
1555 ret->used = ret->allocatable = 1;
1556 if (ret == NULL) {
1557 debug_log_regstuff(ret, NULL, DEBUG_LOG_FAILEDALLOC);
1558 } else {
1559 debug_log_regstuff(ret, NULL, DEBUG_LOG_ALLOCGPR);
1560 }
1561 #ifdef DEBUG6
1562 if (ret != NULL) {
1563 ret->line = line;
1564 ++ret->nallocs;
1565 }
1566 #endif
1567 return ret;
1568 }
1569
1570 /*
1571 * This function is used to tell us whether a tlist really does include
1572 * a VLA component. This is sometimes necessary to know when we modify
1573 * typelists
1574 */
1575 int
vla_type_has_constant_size(struct type_node * tn)1576 vla_type_has_constant_size(struct type_node *tn) {
1577 for (; tn != NULL; tn = tn->next) {
1578 if (tn->type == TN_VARARRAY_OF) {
1579 /* Not constant */
1580 return 0;
1581 }
1582 }
1583 /* Constant! */
1584 return 1;
1585 }
1586
1587 int
is_immediate_vla_type(struct type * ty)1588 is_immediate_vla_type(struct type *ty) {
1589 if (!IS_VLA(ty->flags)) {
1590 return 0;
1591 }
1592 if (ty->tlist == NULL) {
1593 return 0;
1594 }
1595 if (ty->tlist->type == TN_POINTER_TO) {
1596 return 0;
1597 }
1598
1599 /* XXX */
1600 return 1;
1601 }
1602
1603
1604 size_t
get_sizeof_type(struct type * t,struct token * tok)1605 get_sizeof_type(struct type *t, struct token *tok) {
1606 if (IS_VLA(t->flags) && !vla_type_has_constant_size(t->tlist)) {
1607 if (t->tlist != NULL && t->tlist->type == TN_POINTER_TO) {
1608 /*
1609 * 05/22/11: This is a pointer to a VLA of some sort.
1610 * We can determine the pointer size without knowing
1611 * anything about the VLA because all pointers are
1612 * the same size for all backends, so we generously
1613 * provide it to the caller here instead of throwing
1614 * an error. This is currently needed at least by
1615 * expr_to_icode(), where assignments of the type
1616 *
1617 * char (*gnu)[i], (*foo)[j];
1618 * gnu = foo;
1619 *
1620 * ... set the size of the result expression to
1621 * sizeof(char(*)[i])
1622 */
1623 ;
1624 } else {
1625 puts("BUG: get_sizeof_type() applied to VLA, should use "
1626 "get_sizeof_vla_type() instead!!!!");
1627 abort();
1628 }
1629 }
1630 if (t->tlist != NULL) {
1631 /*
1632 * May not be called with function argument,
1633 * so this has to be a pointer or an array
1634 */
1635 if (t->tlist->type == TN_ARRAY_OF) {
1636 size_t elem_size;
1637 struct type_node *tmp;
1638
1639 #if REMOVE_ARRARG
1640 if (!t->tlist->have_array_size) {
1641 #else
1642 if (t->tlist->arrarg
1643 && t->tlist->arrarg->const_value == NULL) {
1644 #endif
1645 /*
1646 * An unspecified array size is ok if this
1647 * function was called internally because
1648 * then it came from vreg_alloc() or the
1649 * likes. This is to permit something like
1650 * extern struct foo bar[];
1651 * bar;
1652 * XXX probably should never be called in
1653 * contexts where the above is ok
1654 */
1655 if (tok != NULL) {
1656 /* Not called internally */
1657 errorfl(tok,
1658 "Cannot take size of incomplete type");
1659 }
1660 warningfl(tok, "(BUG?:) Cannot take size of incomplete type");
1661 return 0;
1662 }
1663
1664 #if ! REMOVE_ARRARG
1665 if (t->tlist->arrarg_const == 0) {
1666 t->tlist->arrarg_const =
1667 cross_to_host_size_t(
1668 t->tlist->arrarg->const_value);
1669 }
1670 #endif
1671
1672 tmp = t->tlist;
1673 t->tlist = t->tlist->next;
1674 elem_size = backend->get_sizeof_type(t, tok);
1675 t->tlist = tmp;
1676
1677 return t->tlist->arrarg_const * elem_size;
1678 } else if (t->tlist->type == TN_POINTER_TO) {
1679 return backend->get_ptr_size();
1680 } else {
1681 /* TN_FUNCTION */
1682 return backend->get_ptr_size(); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
1683 if (tok != NULL) {
1684 /* User used ``sizeof'' explicitly */
1685 errorfl(tok,
1686 "`sizeof' operator cannot be applied"
1687 " to functions");
1688 } else {
1689 /* Function was called internally */
1690 puts("BUG: sizeof() applied to function");
1691 abort();
1692 }
1693 return 0;
1694 }
1695 return backend->get_ptr_size();
1696 }
1697
1698 if (t->code == TY_STRUCT) {
1699 return t->tstruc->size;
1700 } else if (t->code == TY_UNION) {
1701 return t->tstruc->size;
1702 } else {
1703 /* return backend->get_sizeof_basic(t->code);*/
1704 return cross_get_sizeof_type(t);
1705 }
1706 }
1707
1708
1709
1710
1711 /*
1712 * 07/24/07: NEW! A sizeof for VLAs. This generates code to fetch
1713 * the hidden VLA sizes at runtimes, and to multiply them. Consequently
1714 * it returns a vreg with the result
1715 */
1716 struct vreg *
1717 get_sizeof_vla_type(struct type *ty, struct icode_list *il) {
1718 size_t ulong_size = backend->get_sizeof_type(
1719 make_basic_type(TY_ULONG), NULL);
1720 int base_size;
1721 /*struct reg *res_reg;*/
1722 struct type_node *tn;
1723 struct type_node *saved_tlist;
1724 struct token *tok;
1725 struct vreg *ret;
1726 struct stack_block *block_addr = ty->vla_addr;
1727
1728 /*
1729 * 06/01/11: Nonsensical (unused) register allocation removed! This
1730 * register was never mapped to a vreg, and neither marked unallocatable,
1731 * such that subsequent register allocations stumbled over null pointer
1732 * errors.
1733 res_reg = ALLOC_GPR(curfunc, ulong_size, il, NULL);
1734 */
1735
1736 if (ty->tlist != NULL && ty->tlist->type == TN_POINTER_TO) {
1737 /* Not much to be done */
1738 base_size = backend->get_ptr_size();
1739 } else {
1740 /*
1741 * Get base type size, e.g. sizeot(int), etc., if this is a
1742 * (possibly multi-dimensional) array.
1743 * ATTENTION: If this is an array of pointers, the base
1744 * type size is sizeof(ptr) instead of the genuine base
1745 * type size
1746 */
1747 base_size = 0;
1748 for (tn = ty->tlist; tn != NULL; tn = tn->next) {
1749 if (tn->type == TN_POINTER_TO) {
1750 /* Yes, base is pointer */
1751 base_size = backend->get_ptr_size();
1752 break;
1753 }
1754 }
1755 if (base_size == 0) {
1756 /* This is an array of non-pointer types */
1757 saved_tlist = ty->tlist;
1758 ty->tlist = NULL;
1759 ty->flags &= ~FLAGS_VLA;
1760 base_size = backend->get_sizeof_type(ty, NULL);
1761 ty->flags |= FLAGS_VLA;
1762
1763 ty->tlist = saved_tlist;
1764 }
1765 }
1766
1767 tok = const_from_value(&base_size, make_basic_type(TY_INT));
1768 ret = vreg_alloc(NULL, tok, NULL, make_basic_type(TY_INT));
1769
1770 /*
1771 * Anonymify base size and make it a size_t
1772 * XXX const_from_value should be fixed instead!
1773 */
1774 ret = backend->icode_make_cast(ret, backend->get_size_t(), il);
1775
1776 if (ty->tlist != NULL && ty->tlist->type != TN_POINTER_TO) {
1777 /*
1778 * Must be an array of VLAs, or of pointers to VLAs,
1779 * or a plain VLA. We don't attempt to fold constant
1780 * dimensions yet. Instead we just calculate it all
1781 * dynamically. (XXX)
1782 */
1783 struct vreg *factor_vreg;
1784 struct token *factor_tok;
1785 int factor_size;
1786 int vla_idx = 0;
1787 struct icode_instr *ii;
1788
1789 for (tn = ty->tlist; tn != NULL; tn = tn->next) {
1790 /*
1791 * 05/22/11: Explictly store block index numbering
1792 * information in the type node. This fixes incorrect
1793 * block handling in constructs like
1794 *
1795 * char buf[x][y];
1796 * sizeof *x; // used wrong block
1797 */
1798 vla_idx = tn->vla_block_no;
1799
1800 if (tn->type == TN_POINTER_TO) {
1801 /*
1802 * This already went into the ``base size''
1803 * of this array - nothing left to do
1804 */
1805 break;
1806 } else if (tn->type == TN_ARRAY_OF) {
1807 #if ! REMOVE_ARRARG
1808 if (tn->arrarg_const == 0) {
1809 tn->arrarg_const =
1810 cross_to_host_size_t(
1811 tn->arrarg->const_value);
1812 }
1813 #endif
1814 factor_size = tn->arrarg_const;
1815 factor_tok = const_from_value(&factor_size,
1816 make_basic_type(TY_INT));
1817 factor_vreg = vreg_alloc(NULL, factor_tok,
1818 NULL, make_basic_type(TY_INT));
1819
1820 reg_set_unallocatable(ret->pregs[0]);
1821 factor_vreg = backend->icode_make_cast(
1822 factor_vreg,
1823 make_basic_type(TY_ULONG),
1824 il);
1825 reg_set_allocatable(ret->pregs[0]);
1826 } else {
1827 /* Must be VLA */
1828 struct reg *dimsize;
1829
1830 reg_set_unallocatable(ret->pregs[0]);
1831 dimsize = ALLOC_GPR(curfunc, ulong_size, il, 0);
1832 reg_set_allocatable(ret->pregs[0]);
1833
1834 factor_vreg = icode_make_retr_vla_size(
1835 dimsize, block_addr, vla_idx, il);
1836 /* ++vla_idx;*/
1837 }
1838
1839 backend->icode_prepare_op(&ret, &factor_vreg,
1840 TOK_OP_MULTI, il);
1841 ii = icode_make_mul(ret, factor_vreg);
1842 append_icode_list(il, ii);
1843 }
1844 }
1845 return ret;
1846 }
1847
1848
1849 /*
1850 * 05/22/11: Get element size of the passed pointer or array type
1851 * (i.e. what the pointer points or what the array contains - which
1852 * has previously been determined to be a VLA of some sort)
1853 */
1854 struct vreg *
1855 get_sizeof_elem_vla_type(struct type *ty, struct icode_list *il) {
1856 struct type_node *head = ty->tlist;
1857 struct vreg *ret;
1858
1859 ty->tlist = ty->tlist->next;
1860 ret = get_sizeof_vla_type(ty, il);
1861 ty->tlist = head;
1862 return ret;
1863 }
1864
1865
1866 /* XXX platform-independent?!?! used by amd64 */
1867 void
1868 store_preg_to_var(struct decl *d, size_t size, struct reg *r) {
1869 static struct vreg vr;
1870
1871 vr.type = d->dtype;
1872 vr.size = size;
1873 vr.var_backed = d;
1874
1875 /*
1876 * 01/28/09: This didn't handle multi-GPR items, which broke
1877 * with long long on PPC32!
1878 * XXX how about long double on SPARC? Is this ever encountered
1879 * here?
1880 * 01/31/09: Type can be NULL?! Is that ok?
1881 */
1882 if (d->dtype != NULL) {
1883 vr.is_multi_reg_obj = backend->is_multi_reg_obj(d->dtype);
1884 }
1885 backend_vreg_map_preg(&vr, r);
1886 emit->store(&vr, &vr);
1887 if (vr.is_multi_reg_obj) {
1888 backend_vreg_map_preg2(&vr, r+1 /* XXX */);
1889 emit->store(&vr, &vr);
1890 backend_vreg_unmap_preg2(r+1);
1891 }
1892 backend_vreg_unmap_preg(r);
1893 r->used = 0;
1894 }
1895
1896
1897 void
1898 store_preg_to_var_off(struct decl *d, size_t off, size_t size, struct reg *r,
1899 struct reg *temp) {
1900
1901 static struct vreg vr;
1902 static struct vreg ptr;
1903
1904 vr.type = d->dtype;
1905 vr.size = size;
1906 vr.var_backed = d;
1907 vr.from_ptr = NULL; /* needs reset because vr is static */
1908 /* vr.from_ptr = &ptr;*/
1909
1910 /*
1911 * Load address of variable into register
1912 */
1913 backend_vreg_map_preg(&ptr, temp);
1914
1915 vr.addr_offset = off;
1916 emit->addrof(temp, &vr, NULL);
1917
1918 if (backend->arch != ARCH_POWER && backend->arch != ARCH_MIPS) {
1919 /*
1920 * 07/22/09: addr_offset must be implemented in emit_addrof()
1921 * for every architecture/emitter which uses this function!
1922 * It is optional and currently only used for this purpose
1923 */
1924 unimpl();
1925 }
1926
1927 /*
1928 * Store register through pointer
1929 */
1930 vr.type = backend->get_size_t();
1931 vr.size = backend->get_sizeof_type(vr.type, NULL);
1932 vr.from_ptr = &ptr;
1933 vr.var_backed = NULL;
1934 backend_vreg_map_preg(&vr, r);
1935 emit->store(&vr, &vr);
1936 }
1937
1938
1939
1940
1941
1942 /* XXX generic? */
1943 void
1944 put_arg_into_reg(
1945 struct reg *regset,
1946 int *index, int startat,
1947 struct vreg *vr,
1948 struct icode_list *il) {
1949
1950 struct reg *r;
1951 struct reg *r2 = NULL;
1952
1953 r = ®set[startat + *index];
1954 if (vr->is_multi_reg_obj) {
1955 r2 = ®set[startat + *index + 1];
1956 }
1957 if (vr->pregs[0] == NULL
1958 || vr->pregs[0] != r
1959 || r->vreg != vr
1960 || r2) {
1961
1962 int is_dedicated = 0;
1963
1964 /*
1965 * 07/24/09: On PowerPC, the temporary FPR f13 is also
1966 * a floating point argument register. So it is
1967 * generally dedicated (should not be used for other
1968 * things) with the special expection of using it as
1969 * argument register. So we distinguish for the temp
1970 * FPR (possibly also on for other architectures)
1971 * below. Note that the temp GPR should NOT be an
1972 * argument register because it may be needed to
1973 * compute pointer addresses in the very process of
1974 * passing arguments
1975 */
1976 if (r->dedicated && r == tmpfpr) {
1977 free_preg_dedicated(r, il, 1, 1);
1978 is_dedicated = 1;
1979 } else {
1980 free_preg(r, il, 1, 1);
1981 }
1982 if (r2 != NULL) {
1983 if (r2->dedicated && r2 == tmpfpr) {
1984 free_preg_dedicated(r2, il, 1, 1);
1985 is_dedicated = 1;
1986 } else {
1987 free_preg(r2, il, 1, 1);
1988 }
1989 }
1990
1991 if (is_dedicated) {
1992 vreg_faultin_dedicated(r, r2, vr, il, 0);
1993 } else {
1994 vreg_faultin(r, r2, vr, il, 0);
1995 }
1996 } else {
1997 if (r == tmpfpr || (r2 != NULL && r2 == tmpfpr)) {
1998 vreg_map_preg_dedicated(vr, r);
1999 } else {
2000 vreg_map_preg(vr, r);
2001 }
2002 }
2003 reg_set_unallocatable(r);
2004 if (r2) {
2005 reg_set_unallocatable(r2);
2006 ++*index;
2007 }
2008 ++*index;
2009 }
2010
2011 /*
2012 * XXX the alignment stuff is totally botched.. it should be done in
2013 * this routine and not in print_init_expR()..
2014 * also, this could probably be unified with gas/nasm print_init_list()
2015 */
2016 void
2017 generic_print_init_list(FILE *out, struct decl *dec, struct initializer *init,
2018 void (*print_init_expr)(struct type *, struct expr *)) {
2019
2020 struct sym_entry *se = NULL;
2021 int is_struct = 0;
2022
2023 if (dec
2024 && (dec->dtype->code == TY_STRUCT
2025 || dec->dtype->code == TY_UNION)
2026 && dec->dtype->tlist == NULL) {
2027 se = dec->dtype->tstruc->scope->slist;
2028 is_struct = 1;
2029 }
2030
2031 for (; init != NULL; init = init->next) {
2032 if (init->type == INIT_NESTED) {
2033 struct decl *nested_dec = NULL;
2034 struct type_node *saved_tlist = NULL;
2035
2036 if (se == NULL) {
2037 /*
2038 * May be an array of structs, in
2039 * which case the struct declaration
2040 * is needed for alignment
2041 */
2042 if (dec && dec->dtype->code == TY_STRUCT) {
2043 nested_dec = alloc_decl();
2044 nested_dec->dtype = dec->dtype;
2045 saved_tlist = dec->dtype->tlist;
2046 dec->dtype->tlist = NULL;
2047 }
2048 } else {
2049 nested_dec = se->dec;
2050 }
2051 generic_print_init_list(out, nested_dec, init->data,
2052 print_init_expr);
2053 if (saved_tlist != NULL) {
2054 dec->dtype->tlist = saved_tlist;
2055 free(nested_dec);
2056 }
2057 } else if (init->type == INIT_EXPR) {
2058 struct expr *ex;
2059
2060 ex = init->data;
2061 print_init_expr(ex->const_value->type, ex);
2062 } else if (init->type == INIT_NULL) {
2063 x_fprintf(out, "\t.%s %lu\n",
2064 backend->arch == ARCH_SPARC? "skip": "space",
2065 (unsigned long)*(size_t *)init->data);
2066 /*
2067 * 03/01/10: Don't do this for variable
2068 * initializers. See x86_emit_gas.c
2069 */
2070 if (init->varinit == NULL) {
2071 for (; se != NULL && se->next != NULL; se = se->next) {
2072 ;
2073 }
2074 }
2075 }
2076 if (se != NULL) {
2077 se = se->next;
2078 }
2079 }
2080 if (is_struct) {
2081 int align = backend->get_align_type(dec->dtype);
2082
2083 assert(backend->arch != ARCH_X86);
2084
2085 if (backend->arch != ARCH_SPARC) {
2086 if (align == 2) align = 1;
2087 else if (align == 4) align = 2;
2088 else if (align == 8) align = 3;
2089 }
2090
2091 /* XXX or use .space?! */
2092 x_fprintf(out, "\t.align %d\n", align);
2093 }
2094 }
2095
2096 /*
2097 * This function relocates structure pointer and size values to
2098 * different registers if necessary, in preparation for a structure
2099 * assignment. The point is that on most architectures, like MIPS
2100 * and PPC, the memcpy() arguments go into GPRs, and we have to
2101 * ensure that moving one of these values does not trash another
2102 * value because it is resident in the destination GPR
2103 */
2104 void
2105 relocate_struct_regs(struct copystruct *cs,
2106 struct reg *r0, struct reg *r1, struct reg *r2,
2107 struct icode_list *il) {
2108
2109 struct reg *curregs[4];
2110 struct reg *tmp;
2111 int i;
2112
2113 curregs[0] = cs->src_from_ptr;
2114 curregs[1] = cs->dest_from_ptr;
2115 curregs[2] = cs->src_from_ptr_struct;
2116 curregs[3] = cs->dest_from_ptr_struct;
2117
2118 reg_set_unallocatable(r0);
2119 reg_set_unallocatable(r1);
2120 reg_set_unallocatable(r2);
2121
2122 /*
2123 * 11/01/07: This was missing
2124 */
2125 for (i = 0; i < 4; ++i) {
2126 if (curregs[i] != NULL) {
2127 reg_set_unallocatable(curregs[i]);
2128 }
2129 }
2130
2131 for (i = 0; i < 4; ++i) {
2132 /*
2133 * 11/01/07: This was missing
2134 */
2135 if (curregs[i] == NULL) {
2136 continue;
2137 }
2138 if (curregs[i] == r0
2139 || curregs[i] == r1
2140 || curregs[i] == r2) {
2141 /* Move elsewhere */
2142 tmp = ALLOC_GPR(curfunc, curregs[i]->size, il, NULL);
2143 icode_make_copyreg(tmp, curregs[i],
2144 curregs[i]->vreg->type, /* XXX ok? */
2145 curregs[i]->vreg->type, /* XXX ok? */
2146 il);
2147
2148 /*
2149 * 11/01/07: This was missing!!!!!!!! The
2150 * registers were relocated, but the register
2151 * information was not updated. Terrible!
2152 */
2153 switch (i) {
2154 case 0:
2155 cs->src_from_ptr = tmp;
2156 break;
2157 case 1:
2158 cs->dest_from_ptr = tmp;
2159 break;
2160 case 2:
2161 cs->src_from_ptr_struct = tmp;
2162 break;
2163 case 3:
2164 cs->dest_from_ptr_struct = tmp;
2165 break;
2166 }
2167 }
2168 }
2169 free_preg(r0, il, 0, 0);
2170 free_preg(r1, il, 0, 0);
2171 free_preg(r2, il, 0, 0);
2172 for (i = 0; i < 4; ++i) {
2173 if (curregs[i] != NULL) {
2174 free_preg(curregs[i], il, 0, 0);
2175 }
2176 }
2177 }
2178
2179
2180 /*
2181 * 12/29/07: Saves struct address register to stack - SPARC-specific?
2182 */
2183 void
2184 save_struct_ptr(struct decl *dec) {
2185 static struct vreg dest_vreg;
2186
2187 dest_vreg.var_backed = dec;
2188 dest_vreg.type = make_void_ptr_type();
2189 dest_vreg.size = backend->get_sizeof_type(dest_vreg.type, NULL);
2190 dest_vreg.pregs[0] = dec->stack_addr->from_reg;
2191 emit->store(&dest_vreg, &dest_vreg);
2192 }
2193
2194
2195 /*
2196 * 12/29/07: Reloads struct address from stack - SPARC-specific?
2197 */
2198 void
2199 reload_struct_ptr(struct decl *dec) {
2200 static struct vreg dest_vreg;
2201
2202 dest_vreg.var_backed = dec;
2203 dest_vreg.type = make_void_ptr_type();
2204 dest_vreg.size = backend->get_sizeof_type(dest_vreg.type, NULL);
2205 emit->load(dec->stack_addr->from_reg, &dest_vreg);
2206 }
2207
2208 /*
2209 * This function copies the structure pointed to by dec->stack_addr->
2210 * from_reg to the stack block designated by dec->stack_addr, by
2211 * calling emit->copystruct().
2212 * The purpose is just to set up the data structures required by
2213 * copystruct()
2214 * 12/29/07: SPARC-specific?
2215 */
2216 void
2217 copy_struct_regstack(struct decl *dec) {
2218 static struct copystruct cs;
2219 static struct vreg src_vreg;
2220 static struct vreg dest_vreg;
2221
2222 src_vreg.type = dest_vreg.type = dec->dtype;
2223 src_vreg.size = backend->get_sizeof_type(dec->dtype, NULL);
2224 dest_vreg.size = src_vreg.size;
2225 dest_vreg.var_backed = dec;
2226
2227 cs.src_from_ptr = dec->stack_addr->from_reg;
2228 cs.src_vreg = &src_vreg;
2229 cs.dest_vreg = &dest_vreg;
2230 emit->copystruct(&cs);
2231 }
2232
2233 /*
2234 * This is a new attempt at generic_print_init_list(), which uses gas/UNIX
2235 * as style syntax, but performs alignment in the same way as the x86 one
2236 */
2237 void
2238 new_generic_print_init_list(FILE *out, struct decl *dec, struct initializer *init,
2239 void (*print_init_expr)(struct type *, struct expr *)) {
2240
2241 struct sym_entry *se = NULL;
2242 struct sym_entry *startse = NULL;
2243
2244 if (dec
2245 && (dec->dtype->code == TY_STRUCT
2246 || dec->dtype->code == TY_UNION)
2247 && dec->dtype->tlist == NULL) {
2248 se = dec->dtype->tstruc->scope->slist;
2249 }
2250 for (; init != NULL; init = init->next) {
2251 if (init->type == INIT_NESTED) {
2252 struct decl *nested_dec = NULL;
2253 struct decl *storage_unit = NULL;
2254 struct type_node *saved_tlist = NULL;
2255
2256 if (se == NULL) {
2257 /*
2258 * May be an array of structs, in
2259 * which case the struct declaration
2260 * is needed for alignment
2261 */
2262 if (dec && dec->dtype->code == TY_STRUCT) {
2263 nested_dec = alloc_decl();
2264 nested_dec->dtype = dec->dtype;
2265 saved_tlist = dec->dtype->tlist;
2266 dec->dtype->tlist = NULL;
2267 }
2268 } else {
2269 nested_dec = se->dec;
2270 }
2271 new_generic_print_init_list(out, nested_dec, init->data,
2272 print_init_expr);
2273 if (saved_tlist != NULL) {
2274 dec->dtype->tlist = saved_tlist;
2275 free(nested_dec);
2276 }
2277
2278 /*
2279 * 10/08/08: If this is a bitfield initializer, match
2280 * (skip) all affected bitfield declarations in this
2281 * struct. This is important for alignment
2282 */
2283 if (se != NULL && se->dec->dtype->tbit != NULL) {
2284 storage_unit = se->dec->dtype->tbit->bitfield_storage_unit;
2285 /*
2286 * Skip all but last initialized bitfield, which is needed
2287 * for alignment below
2288 */
2289 if (se->next == NULL) {
2290 /*
2291 * This is already the last struct member, which
2292 * also happens to be a bitfield
2293 */
2294 ;
2295 } else {
2296 do {
2297 se = se->next;
2298 } while (se != NULL
2299 && se->next != NULL
2300 && se->dec->dtype->tbit != NULL
2301 && se->dec->dtype->tbit->bitfield_storage_unit
2302 == storage_unit);
2303
2304 if (se != NULL
2305 && (se->dec->dtype->tbit == NULL
2306 || se->dec->dtype->tbit->bitfield_storage_unit
2307 != storage_unit)) {
2308 /*
2309 * Move back to last BF member -
2310 * so we can align for next
2311 * member
2312 */
2313 se = se->prev;
2314 }
2315 }
2316 }
2317 } else if (init->type == INIT_EXPR) {
2318 struct expr *ex;
2319
2320 ex = init->data;
2321 print_init_expr(ex->const_value->type, ex);
2322 if (se != NULL && se->dec->dtype->tbit != NULL) {
2323 /*
2324 * Skip alignment stuff below, UNLESS
2325 * we are dealing with the last member
2326 * of the struct, in which case we may
2327 * have to pad to align for the start
2328 * of the struct
2329 */
2330 if (se->next != NULL) {
2331 continue;
2332 }
2333 }
2334 } else if (init->type == INIT_NULL) {
2335 if (init->varinit && init->left_type->tbit != NULL) {
2336 continue;
2337 } else {
2338 x_fprintf(out, "\t.%s %lu\n",
2339 backend->arch == ARCH_SPARC? "skip": "space",
2340 (unsigned long)*(size_t *)init->data);
2341 startse = se;
2342 /*
2343 * 03/01/10: Don't do this for variable
2344 * initializers. See x86_emit_gas.c
2345 */
2346 if (init->varinit == NULL) {
2347 for (; se != NULL && se->next != NULL; se = se->next) {
2348 ;
2349 }
2350 }
2351 }
2352 }
2353 if (se != NULL) {
2354 /* May need alignment */
2355 struct decl *d = NULL;
2356 struct type *ty = NULL;
2357 size_t nbytes;
2358
2359 if (se->next != NULL) {
2360 /* We may have to align for the next member */
2361 if (se->next->dec->dtype->tbit != NULL) {
2362 /* Don't align bitfields! */
2363 ;
2364 } else {
2365 d = se->next->dec;
2366 ty = d->dtype;
2367 }
2368 } else if (dec->dtype->tstruc->scope->slist->next) {
2369 /*
2370 * We've reached the end of the struct and
2371 * may have to pad the struct, such that if
2372 * we have an array of structs, every element
2373 * is properly aligned.
2374 *
2375 * Note that we have to use the whole struct
2376 * alignment, not just first member alignment
2377 */
2378 ty = dec->dtype;
2379 if (init->type == INIT_NULL) {
2380 /*
2381 * 08/08/07: Same fix as in x86 struct
2382 * init functions
2383 */
2384 size_t curoff = startse->dec->offset +
2385 *(size_t *)init->data;
2386 size_t alignto = backend->get_align_type(ty);
2387 size_t tmp = 0;
2388
2389 while ((curoff + tmp) % alignto) {
2390 ++tmp;
2391 }
2392 if (tmp > 0) {
2393 x_fprintf(out, "\t.%s %lu\n",
2394 backend->arch == ARCH_SPARC?
2395 "skip": "space",
2396 tmp);
2397 }
2398 } else {
2399 d = dec->dtype->tstruc->scope->slist->dec;
2400 }
2401 }
2402
2403 if (d != NULL) {
2404 unsigned long offset;
2405
2406
2407 /*
2408 * 10/08/08: Handle bitfields
2409 */
2410 if (se->dec->dtype->tbit != NULL) {
2411 /*
2412 * Align for next member. We are at
2413 *
2414 * address_of_storage_unit + size_of_storage_unit
2415 *
2416 * We only get here if the last bitfield in the
2417 * current unit is processed, so we have to account
2418 * for the entire partial storage unit.
2419 *
2420 * Note that we're setting the offset AFTER the current
2421 * item because calc_align_bytes() doesn't do this for
2422 * us
2423 */
2424 offset = se->dec->dtype->tbit->bitfield_storage_unit->offset
2425 + backend->get_sizeof_type(se->dec->dtype->tbit->
2426 bitfield_storage_unit->dtype, NULL);
2427 } else {
2428 offset = se->dec->offset;
2429 }
2430
2431
2432
2433 nbytes = calc_align_bytes(/*se->dec->*/offset,
2434 se->dec->dtype, ty, 1);
2435 if (nbytes) {
2436 x_fprintf(out, "\t.%s %lu\n",
2437 backend->arch == ARCH_SPARC?
2438 "skip": "space",
2439 nbytes);
2440 }
2441 }
2442 se = se->next;
2443 }
2444 }
2445 }
2446
2447 size_t
2448 generic_print_init_var(FILE *out, struct decl *d, size_t segoff,
2449 void (*print_init_expr)(struct type *, struct expr *),
2450 int skip_is_space) {
2451
2452 struct type *dt = d->dtype;
2453 size_t size;
2454 size_t ret = 0;
2455
2456 if (DECL_UNUSED(d)) {
2457 return 0;
2458 }
2459
2460 /* Constant initializer expression */
2461 x_fprintf(out, "%s:\n", dt->name);
2462 new_generic_print_init_list(out, d, d->init, print_init_expr);
2463
2464 ret = size = backend->get_sizeof_decl(d, NULL);
2465 if (d->next != NULL) {
2466 unsigned long align;
2467 struct decl *tmpd;
2468
2469 /*
2470 * Now we have to check which of the next variables
2471 * is actually used. Because if it's not used, it's
2472 * not printed, and then we'd get wrong alignment
2473 */
2474 for (tmpd = d->next; tmpd != NULL; tmpd = tmpd->next) {
2475 if (!DECL_UNUSED(tmpd)) {
2476 break;
2477 }
2478 }
2479
2480 if (tmpd != NULL) {
2481 align = calc_align_bytes(segoff,
2482 d->dtype, tmpd->dtype, 0);
2483 if (align) {
2484 /*
2485 * XXX is this really needed?!?! doesn't the
2486 * SPARC assembler have "space" or why was
2487 * skip usedh ere???
2488 */
2489 x_fprintf(out, "\t.%s %lu\n",
2490 skip_is_space? "space": "skip", align);
2491 ret += align;
2492 }
2493 }
2494 }
2495 return ret;
2496 }
2497
2498 int
2499 generic_same_representation(struct type *dest, struct type *src) {
2500 size_t dest_size = backend->get_sizeof_type(dest, NULL);
2501 size_t src_size = backend->get_sizeof_type(src, NULL);
2502
2503 if ((is_integral_type(dest)
2504 || dest->tlist != NULL)
2505 && (is_integral_type(src)
2506 || src->tlist != NULL)
2507 && dest_size == src_size) {
2508 return 1;
2509 } else {
2510 return 0;
2511 }
2512 }
2513
2514 void
2515 store_reg_to_stack_block(struct reg *r, struct stack_block *sb) {
2516 static struct vreg vr;
2517
2518 vr.type = make_void_ptr_type();
2519 vr.size = backend->get_sizeof_type(vr.type, NULL);
2520 vr.stack_addr = sb;
2521 backend_vreg_map_preg(&vr, r);
2522 emit->store(&vr, &vr);
2523 backend_vreg_unmap_preg(r);
2524 }
2525
2526 unsigned long
2527 align_for_cur_auto_var(struct type *ty, unsigned long curoff) {
2528 unsigned long align = backend->get_align_type(ty);
2529 unsigned long origoff = curoff;
2530
2531 while (curoff % align) {
2532 ++curoff;
2533 }
2534 return curoff - origoff;
2535 }
2536
2537 int
2538 arch_without_offset_limit(void) {
2539 switch (backend->arch) {
2540 case ARCH_X86:
2541 case ARCH_AMD64:
2542 /*
2543 * x86 and AMD64 have variable-length encodings which
2544 * allow for ``unlimited'' (within GPR range) offsets
2545 */
2546 return 1;
2547 case ARCH_MIPS:
2548 case ARCH_POWER:
2549 case ARCH_SPARC:
2550 /* These archs are limited (especially SAPRC)! */
2551 return 0;
2552 default:
2553 unimpl();
2554 }
2555 /* NOTREACHED */
2556 return 0;
2557 }
2558
2559 char *
2560 generic_elf_section_name(int value) {
2561 switch (value) {
2562 case SECTION_INIT:
2563 return "data";
2564 case SECTION_UNINIT:
2565 return "bss";
2566 case SECTION_RODATA:
2567 return "rodata";
2568 case SECTION_TEXT:
2569 return "text";
2570 case SECTION_INIT_THREAD:
2571 return "tdata";
2572 case SECTION_UNINIT_THREAD:
2573 return "tbss";
2574 default:
2575 unimpl();
2576 }
2577 /* NOTREACHED */
2578 return "";
2579 }
2580
2581 char *
2582 generic_mach_o_section_name(int value) {
2583 if (value == SECTION_RODATA) {
2584 return "cstring";
2585 } else if (value == SECTION_UNINIT) {
2586 return "data";
2587 }
2588 return generic_elf_section_name(value);
2589 }
2590
2591
2592 /*
2593 * 04/05/08: Alignment across scope boundaries wasn't working, because the
2594 * approach we use is to align for the next variable while allocating the
2595 * current variable, and this was not done if the current scope ended.
2596 * Hence this new function to look up the next variable to align for. This
2597 * stuff sucks, and we should instead align for the CURRENT variable as it
2598 * is encountered
2599 */
2600 struct decl *
2601 get_next_auto_decl_in_scope(struct scope *s, int i) {
2602 struct decl **dec = s->automatic_decls.data;
2603
2604 for (;;) {
2605 if (i+1 < s->automatic_decls.ndecls) {
2606 /*
2607 * Currently VLAs are implemented in terms of
2608 * malloc() and free(), so do not consider them
2609 * for alignment
2610 */
2611 if (!IS_VLA(dec[i+1]->dtype->flags)) {
2612 return dec[i+1];
2613 }
2614 ++i;
2615 } else {
2616 /* Scope ends here, try next one */
2617 do {
2618 s = s->next;
2619 } while (s != NULL && s->type != SCOPE_CODE);
2620 if (s == NULL) {
2621 /* No more local variables */
2622 return NULL;
2623 }
2624 i = -1; /* i+1 = 0 */
2625 dec = s->automatic_decls.data;
2626 }
2627 }
2628 /* NOTREACHED */
2629 return NULL;
2630 }
2631
2632
2633 struct stupidtrace_entry *
2634 put_stupidtrace_list(struct function *f) {
2635 struct stupidtrace_entry *ent;
2636 static struct stupidtrace_entry nullent;
2637 char buf[128];
2638 static unsigned long count;
2639
2640 ent = n_xmalloc(sizeof *ent);
2641 *ent = nullent;
2642 ent->func = f;
2643 sprintf(buf, "_Strace%lu", count++);
2644 ent->bufname = n_xstrdup(buf);
2645 if (stupidtrace_list_head == NULL) {
2646 stupidtrace_list_head = stupidtrace_list_tail = ent;
2647 } else {
2648 stupidtrace_list_tail->next = ent;
2649 stupidtrace_list_tail = ent;
2650 }
2651 return ent;
2652 }
2653
2654
2655 struct reg * /*icode_instr **/
2656 make_addrof_structret(struct vreg *struct_lvalue, struct icode_list *il) {
2657 struct type *orig_type = struct_lvalue->type;
2658 struct type *temp_type = dup_type(struct_lvalue->type);
2659 struct reg *ret;
2660
2661 temp_type->tlist = NULL;
2662 struct_lvalue->type = temp_type;
2663
2664 /*ii =*/ ret = icode_make_addrof(NULL, struct_lvalue, il);
2665 /* append_icode_list(il, ii);*/
2666
2667 struct_lvalue->type = orig_type;
2668 return ret; /*ii->dat;*/
2669 }
2670
2671