1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-zbp -emit-llvm %s -o - \
3 // RUN:     | FileCheck %s  -check-prefix=RV32ZBP
4 
5 // RV32ZBP-LABEL: @grev(
6 // RV32ZBP-NEXT:  entry:
7 // RV32ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
8 // RV32ZBP-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
9 // RV32ZBP-NEXT:    store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
10 // RV32ZBP-NEXT:    store i32 [[RS2:%.*]], i32* [[RS2_ADDR]], align 4
11 // RV32ZBP-NEXT:    [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
12 // RV32ZBP-NEXT:    [[TMP1:%.*]] = load i32, i32* [[RS2_ADDR]], align 4
13 // RV32ZBP-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.grev.i32(i32 [[TMP0]], i32 [[TMP1]])
14 // RV32ZBP-NEXT:    ret i32 [[TMP2]]
15 //
grev(long rs1,long rs2)16 long grev(long rs1, long rs2)
17 {
18   return __builtin_riscv_grev_32(rs1, rs2);
19 }
20 
21 // RV32ZBP-LABEL: @grevi(
22 // RV32ZBP-NEXT:  entry:
23 // RV32ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
24 // RV32ZBP-NEXT:    [[I:%.*]] = alloca i32, align 4
25 // RV32ZBP-NEXT:    store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
26 // RV32ZBP-NEXT:    store i32 13, i32* [[I]], align 4
27 // RV32ZBP-NEXT:    [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
28 // RV32ZBP-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.grev.i32(i32 [[TMP0]], i32 13)
29 // RV32ZBP-NEXT:    ret i32 [[TMP1]]
30 //
grevi(long rs1)31 long grevi(long rs1)
32 {
33   const int i = 13;
34   return __builtin_riscv_grev_32(rs1, i);
35 }
36 
37 // RV32ZBP-LABEL: @gorc(
38 // RV32ZBP-NEXT:  entry:
39 // RV32ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
40 // RV32ZBP-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
41 // RV32ZBP-NEXT:    store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
42 // RV32ZBP-NEXT:    store i32 [[RS2:%.*]], i32* [[RS2_ADDR]], align 4
43 // RV32ZBP-NEXT:    [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
44 // RV32ZBP-NEXT:    [[TMP1:%.*]] = load i32, i32* [[RS2_ADDR]], align 4
45 // RV32ZBP-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.gorc.i32(i32 [[TMP0]], i32 [[TMP1]])
46 // RV32ZBP-NEXT:    ret i32 [[TMP2]]
47 //
gorc(long rs1,long rs2)48 long gorc(long rs1, long rs2)
49 {
50   return __builtin_riscv_gorc_32(rs1, rs2);
51 }
52 
53 // RV32ZBP-LABEL: @gorci(
54 // RV32ZBP-NEXT:  entry:
55 // RV32ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
56 // RV32ZBP-NEXT:    [[I:%.*]] = alloca i32, align 4
57 // RV32ZBP-NEXT:    store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
58 // RV32ZBP-NEXT:    store i32 13, i32* [[I]], align 4
59 // RV32ZBP-NEXT:    [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
60 // RV32ZBP-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.gorc.i32(i32 [[TMP0]], i32 13)
61 // RV32ZBP-NEXT:    ret i32 [[TMP1]]
62 //
gorci(long rs1)63 long gorci(long rs1)
64 {
65   const int i = 13;
66   return __builtin_riscv_gorc_32(rs1, i);
67 }
68 
69 // RV32ZBP-LABEL: @shfl(
70 // RV32ZBP-NEXT:  entry:
71 // RV32ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
72 // RV32ZBP-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
73 // RV32ZBP-NEXT:    store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
74 // RV32ZBP-NEXT:    store i32 [[RS2:%.*]], i32* [[RS2_ADDR]], align 4
75 // RV32ZBP-NEXT:    [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
76 // RV32ZBP-NEXT:    [[TMP1:%.*]] = load i32, i32* [[RS2_ADDR]], align 4
77 // RV32ZBP-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.shfl.i32(i32 [[TMP0]], i32 [[TMP1]])
78 // RV32ZBP-NEXT:    ret i32 [[TMP2]]
79 //
shfl(long rs1,long rs2)80 long shfl(long rs1, long rs2)
81 {
82   return __builtin_riscv_shfl_32(rs1, rs2);
83 }
84 
85 // RV32ZBP-LABEL: @shfli(
86 // RV32ZBP-NEXT:  entry:
87 // RV32ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
88 // RV32ZBP-NEXT:    [[I:%.*]] = alloca i32, align 4
89 // RV32ZBP-NEXT:    store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
90 // RV32ZBP-NEXT:    store i32 13, i32* [[I]], align 4
91 // RV32ZBP-NEXT:    [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
92 // RV32ZBP-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.shfl.i32(i32 [[TMP0]], i32 13)
93 // RV32ZBP-NEXT:    ret i32 [[TMP1]]
94 //
shfli(long rs1)95 long shfli(long rs1)
96 {
97   const int i = 13;
98   return __builtin_riscv_shfl_32(rs1, i);
99 }
100 
101 // RV32ZBP-LABEL: @unshfl(
102 // RV32ZBP-NEXT:  entry:
103 // RV32ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
104 // RV32ZBP-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
105 // RV32ZBP-NEXT:    store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
106 // RV32ZBP-NEXT:    store i32 [[RS2:%.*]], i32* [[RS2_ADDR]], align 4
107 // RV32ZBP-NEXT:    [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
108 // RV32ZBP-NEXT:    [[TMP1:%.*]] = load i32, i32* [[RS2_ADDR]], align 4
109 // RV32ZBP-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.unshfl.i32(i32 [[TMP0]], i32 [[TMP1]])
110 // RV32ZBP-NEXT:    ret i32 [[TMP2]]
111 //
unshfl(long rs1,long rs2)112 long unshfl(long rs1, long rs2)
113 {
114   return __builtin_riscv_unshfl_32(rs1, rs2);
115 }
116 
117 // RV32ZBP-LABEL: @unshfli(
118 // RV32ZBP-NEXT:  entry:
119 // RV32ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
120 // RV32ZBP-NEXT:    [[I:%.*]] = alloca i32, align 4
121 // RV32ZBP-NEXT:    store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
122 // RV32ZBP-NEXT:    store i32 13, i32* [[I]], align 4
123 // RV32ZBP-NEXT:    [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
124 // RV32ZBP-NEXT:    [[TMP1:%.*]] = call i32 @llvm.riscv.unshfl.i32(i32 [[TMP0]], i32 13)
125 // RV32ZBP-NEXT:    ret i32 [[TMP1]]
126 //
unshfli(long rs1)127 long unshfli(long rs1)
128 {
129   const int i = 13;
130   return __builtin_riscv_unshfl_32(rs1, i);
131 }
132 
133 // RV32ZBP-LABEL: @xperm_n(
134 // RV32ZBP-NEXT:  entry:
135 // RV32ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
136 // RV32ZBP-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
137 // RV32ZBP-NEXT:    store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
138 // RV32ZBP-NEXT:    store i32 [[RS2:%.*]], i32* [[RS2_ADDR]], align 4
139 // RV32ZBP-NEXT:    [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
140 // RV32ZBP-NEXT:    [[TMP1:%.*]] = load i32, i32* [[RS2_ADDR]], align 4
141 // RV32ZBP-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.xperm.n.i32(i32 [[TMP0]], i32 [[TMP1]])
142 // RV32ZBP-NEXT:    ret i32 [[TMP2]]
143 //
xperm_n(long rs1,long rs2)144 long xperm_n(long rs1, long rs2)
145 {
146   return __builtin_riscv_xperm_n(rs1, rs2);
147 }
148 
149 // RV32ZBP-LABEL: @xperm_b(
150 // RV32ZBP-NEXT:  entry:
151 // RV32ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
152 // RV32ZBP-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
153 // RV32ZBP-NEXT:    store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
154 // RV32ZBP-NEXT:    store i32 [[RS2:%.*]], i32* [[RS2_ADDR]], align 4
155 // RV32ZBP-NEXT:    [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
156 // RV32ZBP-NEXT:    [[TMP1:%.*]] = load i32, i32* [[RS2_ADDR]], align 4
157 // RV32ZBP-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.xperm.b.i32(i32 [[TMP0]], i32 [[TMP1]])
158 // RV32ZBP-NEXT:    ret i32 [[TMP2]]
159 //
xperm_b(long rs1,long rs2)160 long xperm_b(long rs1, long rs2)
161 {
162   return __builtin_riscv_xperm_b(rs1, rs2);
163 }
164 
165 // RV32ZBP-LABEL: @xperm_h(
166 // RV32ZBP-NEXT:  entry:
167 // RV32ZBP-NEXT:    [[RS1_ADDR:%.*]] = alloca i32, align 4
168 // RV32ZBP-NEXT:    [[RS2_ADDR:%.*]] = alloca i32, align 4
169 // RV32ZBP-NEXT:    store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
170 // RV32ZBP-NEXT:    store i32 [[RS2:%.*]], i32* [[RS2_ADDR]], align 4
171 // RV32ZBP-NEXT:    [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
172 // RV32ZBP-NEXT:    [[TMP1:%.*]] = load i32, i32* [[RS2_ADDR]], align 4
173 // RV32ZBP-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.xperm.h.i32(i32 [[TMP0]], i32 [[TMP1]])
174 // RV32ZBP-NEXT:    ret i32 [[TMP2]]
175 //
xperm_h(long rs1,long rs2)176 long xperm_h(long rs1, long rs2)
177 {
178   return __builtin_riscv_xperm_h(rs1, rs2);
179 }
180