1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -O0 -mtriple=x86_64-unknown-linux-gnu -fast-isel-abort=1 -o - %s | FileCheck %s -check-prefix=X64
3
4@var_825 = external dso_local global i16, align 2
5@var_32 = external dso_local global i16, align 2
6@var_901 = external dso_local global i16, align 2
7@var_826 = external dso_local global i64, align 8
8@var_57 = external dso_local global i64, align 8
9@var_900 = external dso_local global i16, align 2
10@var_28 = external dso_local constant i64, align 8
11@var_827 = external dso_local global i16, align 2
12
13define void @foo() {
14; X64-LABEL: foo:
15; X64:       # %bb.0: # %entry
16; X64-NEXT:    movw $0, var_825
17; X64-NEXT:    movzwl var_32, %ecx
18; X64-NEXT:    movzwl var_901, %eax
19; X64-NEXT:    movl %ecx, %edx
20; X64-NEXT:    xorl %eax, %edx
21; X64-NEXT:    movl %ecx, %eax
22; X64-NEXT:    xorl %edx, %eax
23; X64-NEXT:    addl %ecx, %eax
24; X64-NEXT:    cltq
25; X64-NEXT:    movq %rax, var_826
26; X64-NEXT:    movzwl var_32, %eax
27; X64-NEXT:    # kill: def $rax killed $eax
28; X64-NEXT:    movzwl var_901, %ecx
29; X64-NEXT:    xorl $51981, %ecx # imm = 0xCB0D
30; X64-NEXT:    movslq %ecx, %rdx
31; X64-NEXT:    movabsq $-1142377792914660288, %rcx # imm = 0xF02575732E06E440
32; X64-NEXT:    xorq %rcx, %rdx
33; X64-NEXT:    movq %rax, %rcx
34; X64-NEXT:    xorq %rdx, %rcx
35; X64-NEXT:    xorq $-1, %rcx
36; X64-NEXT:    xorq %rcx, %rax
37; X64-NEXT:    movq %rax, %rcx
38; X64-NEXT:    orq var_57, %rcx
39; X64-NEXT:    orq %rcx, %rax
40; X64-NEXT:    # kill: def $ax killed $ax killed $rax
41; X64-NEXT:    movw %ax, var_900
42; X64-NEXT:    xorl %eax, %eax
43; X64-NEXT:    # kill: def $rax killed $eax
44; X64-NEXT:    cmpq var_28, %rax
45; X64-NEXT:    setne %al
46; X64-NEXT:    andb $1, %al
47; X64-NEXT:    movzbl %al, %eax
48; X64-NEXT:    # kill: def $ax killed $ax killed $eax
49; X64-NEXT:    movw %ax, var_827
50; X64-NEXT:    retq
51entry:
52  store i16 0, i16* @var_825, align 2
53  %v0 = load i16, i16* @var_32, align 2
54  %conv = zext i16 %v0 to i32
55  %v2 = load i16, i16* @var_901, align 2
56  %conv2 = zext i16 %v2 to i32
57  %xor = xor i32 %conv, %conv2
58  %xor3 = xor i32 %conv, %xor
59  %add = add nsw i32 %xor3, %conv
60  %conv5 = sext i32 %add to i64
61  store i64 %conv5, i64* @var_826, align 8
62  %v4 = load i16, i16* @var_32, align 2
63  %conv6 = zext i16 %v4 to i64
64  %v6 = load i16, i16* @var_901, align 2
65  %conv8 = zext i16 %v6 to i32
66  %xor9 = xor i32 51981, %conv8
67  %conv10 = sext i32 %xor9 to i64
68  %xor11 = xor i64 -1142377792914660288, %conv10
69  %xor12 = xor i64 %conv6, %xor11
70  %neg = xor i64 %xor12, -1
71  %xor13 = xor i64 %conv6, %neg
72  %v9 = load i16, i16* @var_901, align 2
73  %v10 = load i64, i64* @var_57, align 8
74  %or = or i64 %xor13, %v10
75  %or23 = or i64 %xor13, %or
76  %conv24 = trunc i64 %or23 to i16
77  store i16 %conv24, i16* @var_900, align 2
78  %v11 = load i64, i64* @var_28, align 8
79  %cmp = icmp ne i64 0, %v11
80  %conv25 = zext i1 %cmp to i16
81  store i16 %conv25, i16* @var_827, align 2
82  ret void
83}
84