1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -o - %s -mtriple=x86_64--unknown-linux-gnu | FileCheck %s
3
4@base = external dso_local local_unnamed_addr global i16, align 2
5
6; We should be able to merge the two loads here
7define i16 @unify_through_trivial_asm() {
8; CHECK-LABEL: unify_through_trivial_asm:
9; CHECK:       # %bb.0:
10; CHECK-NEXT:    movzwl base(%rip), %eax
11; CHECK-NEXT:    #APP
12; CHECK-NEXT:    nop
13; CHECK-NEXT:    #NO_APP
14; CHECK-NEXT:    movzwl base(%rip), %eax
15; CHECK-NEXT:    incl %eax
16; CHECK-NEXT:    # kill: def $ax killed $ax killed $eax
17; CHECK-NEXT:    retq
18  %x = load i16, i16* @base, align 2
19  tail call void asm sideeffect "nop", "r,~{dirflag},~{fpsr},~{flags}"(i16 %x)
20  %x2 = load i16, i16* @base, align 2
21  %v = add i16 %x2, 1
22  ret i16 %v
23}
24
25; The asm call prevents the merging the loads here.
26define i16 @unify_through_trival_asm_w_memory_clobber() {
27; CHECK-LABEL: unify_through_trival_asm_w_memory_clobber:
28; CHECK:       # %bb.0:
29; CHECK-NEXT:    movzwl base(%rip), %eax
30; CHECK-NEXT:    #APP
31; CHECK-NEXT:    nop
32; CHECK-NEXT:    #NO_APP
33; CHECK-NEXT:    movzwl base(%rip), %eax
34; CHECK-NEXT:    incl %eax
35; CHECK-NEXT:    # kill: def $ax killed $ax killed $eax
36; CHECK-NEXT:    retq
37  %x = load i16, i16* @base, align 2
38  tail call void asm sideeffect "nop", "+r,~{dirflag},~{fpsr},~{flags},~{base},~{memory}"(i16 %x)
39  %x2 = load i16, i16* @base, align 2
40  %v = add i16 %x2, 1
41  ret i16 %v
42}
43
44define dso_local void @fulltest() local_unnamed_addr {
45; CHECK-LABEL: fulltest:
46; CHECK:       # %bb.0: # %entry
47; CHECK-NEXT:    movzwl base(%rip), %edx
48; CHECK-NEXT:    addl $16, %edx
49; CHECK-NEXT:    xorl %eax, %eax
50; CHECK-NEXT:    # kill: def $dx killed $dx killed $edx
51; CHECK-NEXT:    #APP
52; CHECK-NEXT:    outb %al, %dx
53; CHECK-NEXT:    #NO_APP
54; CHECK-NEXT:    movzwl base(%rip), %edx
55; CHECK-NEXT:    addl $16, %edx
56; CHECK-NEXT:    movb $1, %al
57; CHECK-NEXT:    # kill: def $dx killed $dx killed $edx
58; CHECK-NEXT:    #APP
59; CHECK-NEXT:    outb %al, %dx
60; CHECK-NEXT:    #NO_APP
61; CHECK-NEXT:    movzwl base(%rip), %edx
62; CHECK-NEXT:    addl $16, %edx
63; CHECK-NEXT:    movb $2, %al
64; CHECK-NEXT:    # kill: def $dx killed $dx killed $edx
65; CHECK-NEXT:    #APP
66; CHECK-NEXT:    outb %al, %dx
67; CHECK-NEXT:    #NO_APP
68; CHECK-NEXT:    movzwl base(%rip), %edx
69; CHECK-NEXT:    addl $16, %edx
70; CHECK-NEXT:    movb $3, %al
71; CHECK-NEXT:    # kill: def $dx killed $dx killed $edx
72; CHECK-NEXT:    #APP
73; CHECK-NEXT:    outb %al, %dx
74; CHECK-NEXT:    #NO_APP
75; CHECK-NEXT:    movzwl base(%rip), %edx
76; CHECK-NEXT:    addl $16, %edx
77; CHECK-NEXT:    movb $4, %al
78; CHECK-NEXT:    # kill: def $dx killed $dx killed $edx
79; CHECK-NEXT:    #APP
80; CHECK-NEXT:    outb %al, %dx
81; CHECK-NEXT:    #NO_APP
82; CHECK-NEXT:    movzwl base(%rip), %edx
83; CHECK-NEXT:    addl $16, %edx
84; CHECK-NEXT:    movb $5, %al
85; CHECK-NEXT:    # kill: def $dx killed $dx killed $edx
86; CHECK-NEXT:    #APP
87; CHECK-NEXT:    outb %al, %dx
88; CHECK-NEXT:    #NO_APP
89; CHECK-NEXT:    movzwl base(%rip), %edx
90; CHECK-NEXT:    addl $16, %edx
91; CHECK-NEXT:    movb $6, %al
92; CHECK-NEXT:    # kill: def $dx killed $dx killed $edx
93; CHECK-NEXT:    #APP
94; CHECK-NEXT:    outb %al, %dx
95; CHECK-NEXT:    #NO_APP
96; CHECK-NEXT:    movzwl base(%rip), %edx
97; CHECK-NEXT:    addl $16, %edx
98; CHECK-NEXT:    movb $7, %al
99; CHECK-NEXT:    # kill: def $dx killed $dx killed $edx
100; CHECK-NEXT:    #APP
101; CHECK-NEXT:    outb %al, %dx
102; CHECK-NEXT:    #NO_APP
103; CHECK-NEXT:    movzwl base(%rip), %edx
104; CHECK-NEXT:    addl $16, %edx
105; CHECK-NEXT:    movb $8, %al
106; CHECK-NEXT:    # kill: def $dx killed $dx killed $edx
107; CHECK-NEXT:    #APP
108; CHECK-NEXT:    outb %al, %dx
109; CHECK-NEXT:    #NO_APP
110; CHECK-NEXT:    retq
111entry:
112  %0 = load i16, i16* @base, align 2
113  %add = add i16 %0, 16
114  tail call void asm sideeffect "outb %al,${1:w}", "{ax},{dx},~{dirflag},~{fpsr},~{flags}"(i8 0, i16 %add)
115  %1 = load i16, i16* @base, align 2
116  %add3 = add i16 %1, 16
117  tail call void asm sideeffect "outb %al,${1:w}", "{ax},{dx},~{dirflag},~{fpsr},~{flags}"(i8 1, i16 %add3)
118  %2 = load i16, i16* @base, align 2
119  %add6 = add i16 %2, 16
120  tail call void asm sideeffect "outb %al,${1:w}", "{ax},{dx},~{dirflag},~{fpsr},~{flags}"(i8 2, i16 %add6)
121  %3 = load i16, i16* @base, align 2
122  %add9 = add i16 %3, 16
123  tail call void asm sideeffect "outb %al,${1:w}", "{ax},{dx},~{dirflag},~{fpsr},~{flags}"(i8 3, i16 %add9)
124  %4 = load i16, i16* @base, align 2
125  %add12 = add i16 %4, 16
126  tail call void asm sideeffect "outb %al,${1:w}", "{ax},{dx},~{dirflag},~{fpsr},~{flags}"(i8 4, i16 %add12)
127  %5 = load i16, i16* @base, align 2
128  %add15 = add i16 %5, 16
129  tail call void asm sideeffect "outb %al,${1:w}", "{ax},{dx},~{dirflag},~{fpsr},~{flags}"(i8 5, i16 %add15)
130  %6 = load i16, i16* @base, align 2
131  %add18 = add i16 %6, 16
132  tail call void asm sideeffect "outb %al,${1:w}", "{ax},{dx},~{dirflag},~{fpsr},~{flags}"(i8 6, i16 %add18)
133  %7 = load i16, i16* @base, align 2
134  %add21 = add i16 %7, 16
135  tail call void asm sideeffect "outb %al,${1:w}", "{ax},{dx},~{dirflag},~{fpsr},~{flags}"(i8 7, i16 %add21)
136  %8 = load i16, i16* @base, align 2
137  %add24 = add i16 %8, 16
138  tail call void asm sideeffect "outb %al,${1:w}", "{ax},{dx},~{dirflag},~{fpsr},~{flags}"(i8 8, i16 %add24)
139  ret void
140}
141