1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=i686-- -tailcallopt | FileCheck %s 3 4; With -tailcallopt, CodeGen guarantees a tail call optimization 5; for all of these. 6 7declare fastcc i32 @tailcallee(i32 %a1, i32 %a2, i32 %a3, i32 %a4) 8 9define dso_local fastcc i32 @tailcaller(i32 %in1, i32 %in2) nounwind { 10; CHECK-LABEL: tailcaller: 11; CHECK: # %bb.0: # %entry 12; CHECK-NEXT: subl $16, %esp 13; CHECK-NEXT: movl %ecx, {{[0-9]+}}(%esp) 14; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax 15; CHECK-NEXT: movl %edx, {{[0-9]+}}(%esp) 16; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp) 17; CHECK-NEXT: addl $8, %esp 18; CHECK-NEXT: jmp tailcallee@PLT # TAILCALL 19entry: 20 %tmp11 = tail call fastcc i32 @tailcallee(i32 %in1, i32 %in2, i32 %in1, i32 %in2) 21 ret i32 %tmp11 22} 23 24declare fastcc i8* @alias_callee() 25 26define fastcc noalias i8* @noalias_caller() nounwind { 27; CHECK-LABEL: noalias_caller: 28; CHECK: # %bb.0: 29; CHECK-NEXT: jmp alias_callee@PLT # TAILCALL 30 %p = tail call fastcc i8* @alias_callee() 31 ret i8* %p 32} 33 34declare fastcc noalias i8* @noalias_callee() 35 36define dso_local fastcc i8* @alias_caller() nounwind { 37; CHECK-LABEL: alias_caller: 38; CHECK: # %bb.0: 39; CHECK-NEXT: jmp noalias_callee@PLT # TAILCALL 40 %p = tail call fastcc noalias i8* @noalias_callee() 41 ret i8* %p 42} 43 44declare fastcc i32 @i32_callee() 45 46define dso_local fastcc i32 @ret_undef() nounwind { 47; CHECK-LABEL: ret_undef: 48; CHECK: # %bb.0: 49; CHECK-NEXT: jmp i32_callee@PLT # TAILCALL 50 %p = tail call fastcc i32 @i32_callee() 51 ret i32 undef 52} 53 54declare fastcc void @does_not_return() 55 56define dso_local fastcc i32 @noret() nounwind { 57; CHECK-LABEL: noret: 58; CHECK: # %bb.0: 59; CHECK-NEXT: jmp does_not_return@PLT # TAILCALL 60 tail call fastcc void @does_not_return() 61 unreachable 62} 63 64define dso_local fastcc void @void_test(i32, i32, i32, i32) { 65; CHECK-LABEL: void_test: 66; CHECK: # %bb.0: # %entry 67; CHECK-NEXT: pushl %esi 68; CHECK-NEXT: .cfi_def_cfa_offset 8 69; CHECK-NEXT: subl $8, %esp 70; CHECK-NEXT: .cfi_def_cfa_offset 16 71; CHECK-NEXT: .cfi_offset %esi, -8 72; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax 73; CHECK-NEXT: movl {{[0-9]+}}(%esp), %esi 74; CHECK-NEXT: movl %esi, {{[0-9]+}}(%esp) 75; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp) 76; CHECK-NEXT: addl $8, %esp 77; CHECK-NEXT: .cfi_def_cfa_offset 8 78; CHECK-NEXT: popl %esi 79; CHECK-NEXT: .cfi_def_cfa_offset 4 80; CHECK-NEXT: jmp void_test # TAILCALL 81 entry: 82 tail call fastcc void @void_test( i32 %0, i32 %1, i32 %2, i32 %3) 83 ret void 84} 85 86define dso_local fastcc i1 @i1test(i32, i32, i32, i32) { 87; CHECK-LABEL: i1test: 88; CHECK: # %bb.0: # %entry 89; CHECK-NEXT: pushl %esi 90; CHECK-NEXT: .cfi_def_cfa_offset 8 91; CHECK-NEXT: subl $8, %esp 92; CHECK-NEXT: .cfi_def_cfa_offset 16 93; CHECK-NEXT: .cfi_offset %esi, -8 94; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax 95; CHECK-NEXT: movl {{[0-9]+}}(%esp), %esi 96; CHECK-NEXT: movl %esi, {{[0-9]+}}(%esp) 97; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp) 98; CHECK-NEXT: addl $8, %esp 99; CHECK-NEXT: .cfi_def_cfa_offset 8 100; CHECK-NEXT: popl %esi 101; CHECK-NEXT: .cfi_def_cfa_offset 4 102; CHECK-NEXT: jmp i1test # TAILCALL 103 entry: 104 %4 = tail call fastcc i1 @i1test( i32 %0, i32 %1, i32 %2, i32 %3) 105 ret i1 %4 106} 107