1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=i686-unknown-linux-gnu < %s | FileCheck %s --check-prefix=X86
3; RUN: llc -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s --check-prefix=X64
4
5; On X86, division in expensive. BuildRemEqFold should therefore run even
6; when optimizing for size. Only optimizing for minimum size retains a plain div.
7
8define i32 @test_minsize(i32 %X) optsize minsize nounwind readnone {
9; X86-LABEL: test_minsize:
10; X86:       # %bb.0:
11; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
12; X86-NEXT:    pushl $5
13; X86-NEXT:    popl %ecx
14; X86-NEXT:    xorl %edx, %edx
15; X86-NEXT:    divl %ecx
16; X86-NEXT:    testl %edx, %edx
17; X86-NEXT:    je .LBB0_1
18; X86-NEXT:  # %bb.2:
19; X86-NEXT:    pushl $-10
20; X86-NEXT:    popl %eax
21; X86-NEXT:    retl
22; X86-NEXT:  .LBB0_1:
23; X86-NEXT:    pushl $42
24; X86-NEXT:    popl %eax
25; X86-NEXT:    retl
26;
27; X64-LABEL: test_minsize:
28; X64:       # %bb.0:
29; X64-NEXT:    movl %edi, %eax
30; X64-NEXT:    pushq $5
31; X64-NEXT:    popq %rcx
32; X64-NEXT:    xorl %edx, %edx
33; X64-NEXT:    divl %ecx
34; X64-NEXT:    testl %edx, %edx
35; X64-NEXT:    pushq $42
36; X64-NEXT:    popq %rcx
37; X64-NEXT:    pushq $-10
38; X64-NEXT:    popq %rax
39; X64-NEXT:    cmovel %ecx, %eax
40; X64-NEXT:    retq
41  %rem = urem i32 %X, 5
42  %cmp = icmp eq i32 %rem, 0
43  %ret = select i1 %cmp, i32 42, i32 -10
44  ret i32 %ret
45}
46
47define i32 @test_optsize(i32 %X) optsize nounwind readnone {
48; X86-LABEL: test_optsize:
49; X86:       # %bb.0:
50; X86-NEXT:    imull $-858993459, {{[0-9]+}}(%esp), %eax # imm = 0xCCCCCCCD
51; X86-NEXT:    cmpl $858993460, %eax # imm = 0x33333334
52; X86-NEXT:    movl $42, %eax
53; X86-NEXT:    jb .LBB1_2
54; X86-NEXT:  # %bb.1:
55; X86-NEXT:    movl $-10, %eax
56; X86-NEXT:  .LBB1_2:
57; X86-NEXT:    retl
58;
59; X64-LABEL: test_optsize:
60; X64:       # %bb.0:
61; X64-NEXT:    imull $-858993459, %edi, %eax # imm = 0xCCCCCCCD
62; X64-NEXT:    cmpl $858993460, %eax # imm = 0x33333334
63; X64-NEXT:    movl $42, %ecx
64; X64-NEXT:    movl $-10, %eax
65; X64-NEXT:    cmovbl %ecx, %eax
66; X64-NEXT:    retq
67  %rem = urem i32 %X, 5
68  %cmp = icmp eq i32 %rem, 0
69  %ret = select i1 %cmp, i32 42, i32 -10
70  ret i32 %ret
71}
72