1; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
2; RUN:    -check-prefix=ALL -check-prefix=GP32 \
3; RUN:    -check-prefix=M2 -check-prefix=NOT-R2-R6
4; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
5; RUN:    -check-prefix=ALL -check-prefix=GP32 -check-prefix=NOT-R2-R6 \
6; RUN:    -check-prefix=32R1-R2
7; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
8; RUN:    -check-prefix=ALL -check-prefix=GP32 \
9; RUN:    -check-prefix=32R1-R2 -check-prefix=R2-R6
10; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
11; RUN:    -check-prefix=ALL -check-prefix=GP32 \
12; RUN:    -check-prefix=32R6 -check-prefix=R2-R6
13; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
14; RUN:    -check-prefix=ALL -check-prefix=GP64 \
15; RUN:    -check-prefix=M3 -check-prefix=NOT-R2-R6
16; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
17; RUN:    -check-prefix=ALL -check-prefix=GP64 \
18; RUN:    -check-prefix=GP64-NOT-R6 -check-prefix=NOT-R2-R6
19; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
20; RUN:    -check-prefix=ALL -check-prefix=GP64 \
21; RUN:    -check-prefix=GP64-NOT-R6 -check-prefix=NOT-R2-R6
22; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
23; RUN:    -check-prefix=ALL -check-prefix=GP64 \
24; RUN:    -check-prefix=GP64-NOT-R6 -check-prefix R2-R6
25; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
26; RUN:    -check-prefix=ALL -check-prefix=GP64 \
27; RUN:    -check-prefix=64R6 -check-prefix=R2-R6
28
29define signext i1 @shl_i1(i1 signext %a, i1 signext %b) {
30entry:
31; ALL-LABEL: shl_i1:
32
33  ; ALL:        move    $2, $4
34
35  %r = shl i1 %a, %b
36  ret i1 %r
37}
38
39define signext i8 @shl_i8(i8 signext %a, i8 signext %b) {
40entry:
41; ALL-LABEL: shl_i8:
42
43  ; NOT-R2-R6:  andi    $[[T0:[0-9]+]], $5, 255
44  ; NOT-R2-R6:  sllv    $[[T1:[0-9]+]], $4, $[[T0]]
45  ; NOT-R2-R6:  sll     $[[T2:[0-9]+]], $[[T1]], 24
46  ; NOT-R2-R6:  sra     $2, $[[T2]], 24
47
48  ; R2-R6:      andi    $[[T0:[0-9]+]], $5, 255
49  ; R2-R6:      sllv    $[[T1:[0-9]+]], $4, $[[T0]]
50  ; R2-R6:      seb     $2, $[[T1]]
51
52  %r = shl i8 %a, %b
53  ret i8 %r
54}
55
56define signext i16 @shl_i16(i16 signext %a, i16 signext %b) {
57entry:
58; ALL-LABEL: shl_i16:
59
60  ; NOT-R2-R6:  andi    $[[T0:[0-9]+]], $5, 65535
61  ; NOT-R2-R6:  sllv    $[[T1:[0-9]+]], $4, $[[T0]]
62  ; NOT-R2-R6:  sll     $[[T2:[0-9]+]], $[[T1]], 16
63  ; NOT-R2-R6:  sra     $2, $[[T2]], 16
64
65  ; R2-R6:      andi    $[[T0:[0-9]+]], $5, 65535
66  ; R2-R6:      sllv    $[[T1:[0-9]+]], $4, $[[T0]]
67  ; R2-R6:      seh     $2, $[[T1]]
68
69  %r = shl i16 %a, %b
70  ret i16 %r
71}
72
73define signext i32 @shl_i32(i32 signext %a, i32 signext %b) {
74entry:
75; ALL-LABEL: shl_i32:
76
77  ; ALL:        sllv    $2, $4, $5
78
79  %r = shl i32 %a, %b
80  ret i32 %r
81}
82
83define signext i64 @shl_i64(i64 signext %a, i64 signext %b) {
84entry:
85; ALL-LABEL: shl_i64:
86
87  ; M2:         sllv      $[[T0:[0-9]+]], $5, $7
88  ; M2:         andi      $[[T1:[0-9]+]], $7, 32
89  ; M2:         bnez      $[[T1]], $[[BB0:BB[0-9_]+]]
90  ; M2:         move      $2, $[[T0]]
91  ; M2:         sllv      $[[T2:[0-9]+]], $4, $7
92  ; M2:         not       $[[T3:[0-9]+]], $7
93  ; M2:         srl       $[[T4:[0-9]+]], $5, 1
94  ; M2:         srlv      $[[T5:[0-9]+]], $[[T4]], $[[T3]]
95  ; M2:         or        $2, $[[T2]], $[[T3]]
96  ; M2:         $[[BB0]]:
97  ; M2:         bnez      $[[T1]], $[[BB1:BB[0-9_]+]]
98  ; M2:         addiu     $3, $zero, 0
99  ; M2:         move      $3, $[[T0]]
100  ; M2:         $[[BB1]]:
101  ; M2:         jr        $ra
102  ; M2:         nop
103
104  ; 32R1-R2:    sllv      $[[T0:[0-9]+]], $4, $7
105  ; 32R1-R2:    not       $[[T1:[0-9]+]], $7
106  ; 32R1-R2:    srl       $[[T2:[0-9]+]], $5, 1
107  ; 32R1-R2:    srlv      $[[T3:[0-9]+]], $[[T2]], $[[T1]]
108  ; 32R1-R2:    or        $2, $[[T0]], $[[T3]]
109  ; 32R1-R2:    sllv      $[[T4:[0-9]+]], $5, $7
110  ; 32R1-R2:    andi      $[[T5:[0-9]+]], $7, 32
111  ; 32R1-R2:    movn      $2, $[[T4]], $[[T5]]
112  ; 32R1-R2:    jr        $ra
113  ; 32R1-R2:    movn      $3, $zero, $[[T5]]
114
115  ; 32R6:       sllv      $[[T0:[0-9]+]], $4, $7
116  ; 32R6:       not       $[[T1:[0-9]+]], $7
117  ; 32R6:       srl       $[[T2:[0-9]+]], $5, 1
118  ; 32R6:       srlv      $[[T3:[0-9]+]], $[[T2]], $[[T1]]
119  ; 32R6:       or        $[[T4:[0-9]+]], $[[T0]], $[[T3]]
120  ; 32R6:       andi      $[[T5:[0-9]+]], $7, 32
121  ; 32R6:       seleqz    $[[T6:[0-9]+]], $[[T4]], $[[T2]]
122  ; 32R6:       sllv      $[[T7:[0-9]+]], $5, $7
123  ; 32R6:       selnez    $[[T8:[0-9]+]], $[[T7]], $[[T5]]
124  ; 32R6:       or        $2, $[[T8]], $[[T6]]
125  ; 32R6:       jr        $ra
126  ; 32R6:       seleqz    $3, $[[T7]], $[[T5]]
127
128  ; GP64:       sll       $[[T0:[0-9]+]], $5, 0
129  ; GP64:       dsllv     $2, $4, $1
130
131  %r = shl i64 %a, %b
132  ret i64 %r
133}
134
135define signext i128 @shl_i128(i128 signext %a, i128 signext %b) {
136entry:
137; ALL-LABEL: shl_i128:
138
139  ; GP32:           lw        $25, %call16(__ashlti3)($gp)
140
141  ; M3:             sll       $[[T0:[0-9]+]], $7, 0
142  ; M3:             dsllv     $[[T1:[0-9]+]], $5, $[[T0]]
143  ; M3:             andi      $[[T2:[0-9]+]], $[[T0]], 64
144  ; M3:             bnez      $[[T3:[0-9]+]], $[[BB0:BB[0-9_]+]]
145  ; M3:             move      $2, $[[T1]]
146  ; M3:             dsllv     $[[T4:[0-9]+]], $4, $[[T0]]
147  ; M3:             dsrl      $[[T5:[0-9]+]], $5, 1
148  ; M3:             not       $[[T6:[0-9]+]], $[[T0]]
149  ; M3:             dsrlv     $[[T7:[0-9]+]], $[[T5]], $[[T6]]
150  ; M3:             or        $2, $[[T4]], $[[T7]]
151  ; M3:             $[[BB0]]:
152  ; M3:             bnez      $[[T3]], $[[BB1:BB[0-9_]+]]
153  ; M3:             daddiu    $3, $zero, 0
154  ; M3:             move      $3, $[[T1]]
155  ; M3:             $[[BB1]]:
156  ; M3:             jr        $ra
157  ; M3:             nop
158
159  ; GP64-NOT-R6:    sll       $[[T0:[0-9]+]], $7, 0
160  ; GP64-NOT-R6:    dsllv     $[[T1:[0-9]+]], $4, $[[T0]]
161  ; GP64-NOT-R6:    dsrl      $[[T2:[0-9]+]], $5, 1
162  ; GP64-NOT-R6:    not       $[[T3:[0-9]+]], $[[T0]]
163  ; GP64-NOT-R6:    dsrlv     $[[T4:[0-9]+]], $[[T2]], $[[T3]]
164  ; GP64-NOT-R6:    or        $2, $[[T1]], $[[T4]]
165  ; GP64-NOT-R6:    dsllv     $3, $5, $[[T0]]
166  ; GP64-NOT-R6:    andi      $[[T5:[0-9]+]], $[[T0]], 64
167  ; GP64-NOT-R6:    movn      $2, $3, $[[T5]]
168  ; GP64-NOT-R6:    jr        $ra
169  ; GP64-NOT-R6:    movn      $3, $zero, $1
170
171  ; 64R6:           sll       $[[T0:[0-9]+]], $7, 0
172  ; 64R6:           dsllv     $[[T1:[0-9]+]], $4, $[[T0]]
173  ; 64R6:           dsrl      $[[T2:[0-9]+]], $5, 1
174  ; 64R6:           not       $[[T3:[0-9]+]], $[[T0]]
175  ; 64R6:           dsrlv     $[[T4:[0-9]+]], $[[T2]], $[[T3]]
176  ; 64R6:           or        $[[T5:[0-9]+]], $[[T1]], $[[T4]]
177  ; 64R6:           andi      $[[T6:[0-9]+]], $[[T0]], 64
178  ; 64R6:           sll       $[[T7:[0-9]+]], $[[T6]], 0
179  ; 64R6:           seleqz    $[[T8:[0-9]+]], $[[T5]], $[[T7]]
180  ; 64R6:           dsllv     $[[T9:[0-9]+]], $5, $[[T0]]
181  ; 64R6:           selnez    $[[T10:[0-9]+]], $[[T9]], $[[T7]]
182  ; 64R6:           or        $2, $[[T10]], $[[T8]]
183  ; 64R6:           jr        $ra
184  ; 64R6:           seleqz    $3, $[[T0]], $[[T7]]
185
186  %r = shl i128 %a, %b
187  ret i128 %r
188}
189