1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
3; RUN:   < %s | FileCheck %s
4declare <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1(
5  <vscale x 1 x i1>,
6  <vscale x 1 x i1>,
7  i32);
8
9define <vscale x 1 x i1> @intrinsic_vmxor_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
10; CHECK-LABEL: intrinsic_vmxor_mm_nxv1i1:
11; CHECK:       # %bb.0: # %entry
12; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
13; CHECK-NEXT:    vmxor.mm v0, v0, v8
14; CHECK-NEXT:    ret
15entry:
16  %a = call <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1(
17    <vscale x 1 x i1> %0,
18    <vscale x 1 x i1> %1,
19    i32 %2)
20
21  ret <vscale x 1 x i1> %a
22}
23
24declare <vscale x 2 x i1> @llvm.riscv.vmxor.nxv2i1(
25  <vscale x 2 x i1>,
26  <vscale x 2 x i1>,
27  i32);
28
29define <vscale x 2 x i1> @intrinsic_vmxor_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
30; CHECK-LABEL: intrinsic_vmxor_mm_nxv2i1:
31; CHECK:       # %bb.0: # %entry
32; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
33; CHECK-NEXT:    vmxor.mm v0, v0, v8
34; CHECK-NEXT:    ret
35entry:
36  %a = call <vscale x 2 x i1> @llvm.riscv.vmxor.nxv2i1(
37    <vscale x 2 x i1> %0,
38    <vscale x 2 x i1> %1,
39    i32 %2)
40
41  ret <vscale x 2 x i1> %a
42}
43
44declare <vscale x 4 x i1> @llvm.riscv.vmxor.nxv4i1(
45  <vscale x 4 x i1>,
46  <vscale x 4 x i1>,
47  i32);
48
49define <vscale x 4 x i1> @intrinsic_vmxor_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
50; CHECK-LABEL: intrinsic_vmxor_mm_nxv4i1:
51; CHECK:       # %bb.0: # %entry
52; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
53; CHECK-NEXT:    vmxor.mm v0, v0, v8
54; CHECK-NEXT:    ret
55entry:
56  %a = call <vscale x 4 x i1> @llvm.riscv.vmxor.nxv4i1(
57    <vscale x 4 x i1> %0,
58    <vscale x 4 x i1> %1,
59    i32 %2)
60
61  ret <vscale x 4 x i1> %a
62}
63
64declare <vscale x 8 x i1> @llvm.riscv.vmxor.nxv8i1(
65  <vscale x 8 x i1>,
66  <vscale x 8 x i1>,
67  i32);
68
69define <vscale x 8 x i1> @intrinsic_vmxor_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
70; CHECK-LABEL: intrinsic_vmxor_mm_nxv8i1:
71; CHECK:       # %bb.0: # %entry
72; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
73; CHECK-NEXT:    vmxor.mm v0, v0, v8
74; CHECK-NEXT:    ret
75entry:
76  %a = call <vscale x 8 x i1> @llvm.riscv.vmxor.nxv8i1(
77    <vscale x 8 x i1> %0,
78    <vscale x 8 x i1> %1,
79    i32 %2)
80
81  ret <vscale x 8 x i1> %a
82}
83
84declare <vscale x 16 x i1> @llvm.riscv.vmxor.nxv16i1(
85  <vscale x 16 x i1>,
86  <vscale x 16 x i1>,
87  i32);
88
89define <vscale x 16 x i1> @intrinsic_vmxor_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
90; CHECK-LABEL: intrinsic_vmxor_mm_nxv16i1:
91; CHECK:       # %bb.0: # %entry
92; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
93; CHECK-NEXT:    vmxor.mm v0, v0, v8
94; CHECK-NEXT:    ret
95entry:
96  %a = call <vscale x 16 x i1> @llvm.riscv.vmxor.nxv16i1(
97    <vscale x 16 x i1> %0,
98    <vscale x 16 x i1> %1,
99    i32 %2)
100
101  ret <vscale x 16 x i1> %a
102}
103
104declare <vscale x 32 x i1> @llvm.riscv.vmxor.nxv32i1(
105  <vscale x 32 x i1>,
106  <vscale x 32 x i1>,
107  i32);
108
109define <vscale x 32 x i1> @intrinsic_vmxor_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
110; CHECK-LABEL: intrinsic_vmxor_mm_nxv32i1:
111; CHECK:       # %bb.0: # %entry
112; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
113; CHECK-NEXT:    vmxor.mm v0, v0, v8
114; CHECK-NEXT:    ret
115entry:
116  %a = call <vscale x 32 x i1> @llvm.riscv.vmxor.nxv32i1(
117    <vscale x 32 x i1> %0,
118    <vscale x 32 x i1> %1,
119    i32 %2)
120
121  ret <vscale x 32 x i1> %a
122}
123
124declare <vscale x 64 x i1> @llvm.riscv.vmxor.nxv64i1(
125  <vscale x 64 x i1>,
126  <vscale x 64 x i1>,
127  i32);
128
129define <vscale x 64 x i1> @intrinsic_vmxor_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
130; CHECK-LABEL: intrinsic_vmxor_mm_nxv64i1:
131; CHECK:       # %bb.0: # %entry
132; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
133; CHECK-NEXT:    vmxor.mm v0, v0, v8
134; CHECK-NEXT:    ret
135entry:
136  %a = call <vscale x 64 x i1> @llvm.riscv.vmxor.nxv64i1(
137    <vscale x 64 x i1> %0,
138    <vscale x 64 x i1> %1,
139    i32 %2)
140
141  ret <vscale x 64 x i1> %a
142}
143